def login(self): """ Log in to Google Voice Returns True if successful, or False if unsuccessful """ # Grab the login page, and then parse out the FORM data page = self.__get_doc(ROOT_URL) action_url, data = form_grabber.process_form(page, ROOT_URL) data["Email"] = self.__username data["Passwd"] = self.__password # Prepare the login request and try logging in data = urllib.urlencode(data) request = urllib2.Request(action_url, data) response = self.__get_doc(request) # Process the server's response if "Sign out" not in response: return False self.__logged_in = True # Get _rnr_se variable rnr_se = response.split("\"_rnr_se\"")[1].split("value=\"")[1].split("\"")[0] self.__rnr_se = rnr_se return True
def login(self): """ Log in to IMOS Returns True if successful, or False if unsuccessful """ # Grab the login page, and then parse out the FORM data self.__show_status("Getting login page") page = self.__get_doc(LOGIN_URL) action_url, data = form_grabber.process_form(page, LOGIN_URL) data["j_username"] = self.__username data["j_password"] = self.__password data["j_submit"] = "Sign In" print "Done" # Prepare the login request and try logging in self.__show_status("Attempting login") data = urllib.urlencode(data) request = urllib2.Request(action_url, data) response = self.__get_doc(request) print "Done" # Process the server's response if "Sign Out" not in response: return False self.__logged_in = True # get view_state variable view_state = response.split("name=\"javax.faces.ViewState\"")[1] view_state = view_state.split("value=\"")[1].split("\"")[0] self.__view_state = view_state return True
def login(self): """ Log in to Google Voice Returns True if successful, or False if unsuccessful """ # Grab the login page, and then parse out the FORM data page = self.__get_doc(ROOT_URL) action_url, data = form_grabber.process_form(page, ROOT_URL) data["Email"] = self.__username data["Passwd"] = self.__password # Prepare the login request and try logging in data = urllib.urlencode(data) request = urllib2.Request(action_url, data) response = self.__get_doc(request) # Process the server's response if "Sign out" not in response: return False self.__logged_in = True # Get _rnr_se variable rnr_se = response.split("\"_rnr_se\"")[1].split("value=\"")[1].split( "\"")[0] self.__rnr_se = rnr_se return True
def prepare_download(opener, page_url): # Get web page sys.stdout.write("[+] Grabbing web page, ") sys.stdout.flush() page = opener.open(page_url) print "Done" # "click" the free download button sys.stdout.write("[+] Requesting free download, ") sys.stdout.flush() form_action = page.geturl() data = {"gateway_result": "1"} data = urllib.urlencode(data) request = urllib2.Request(form_action, data) response = opener.open(request).read() print "Done" # Parse for file download URL file_download_url = response.split("('#download_container').load('")[1] file_download_url = file_download_url.split("'")[0] file_download_url = urlparse.urljoin("http://depositfiles.com", file_download_url) # Wait the requisite 60 seconds for free downloads print "[+] Waiting the required 60 seconds" last_line_length = 0 for i in range(60, -1, -1): line = "[+] Time remaining: %d seconds" % i while len(line) < last_line_length: line += " " last_line_length = len(line) sys.stdout.write("\r%s" % line) sys.stdout.flush() time.sleep(1) print # get the ACTUAL file download link sys.stdout.write("[+] Building download request, ") sys.stdout.flush() page = opener.open(file_download_url).read() soup = BeautifulSoup.BeautifulSoup(page) form_action, data = form_grabber.process_form(soup, file_download_url) data["submit"] = "Download the file" data = urllib.urlencode(data) request = urllib2.Request(form_action, data) print "Done" return request, form_action
def send_message(self, to_addr, message_subject, message_body): """ Send an email! Returns true if was successful """ if not self.__logged_in: self.login() # Get inbox page page = self.__get_doc(HTML_INBOX_URL) soup = BeautifulSoup.BeautifulSoup(page) # Get the compose url session_url = "" for tag in soup.findAll("link"): if not tag.has_key("rel") or tag["rel"] != "stylesheet": continue session_url = tag["href"] session_url = session_url.split("?")[0] compose_url = urllib2.urlparse.urljoin(session_url, "?v=b&pv=tl&cs=b") # Get compose page page = self.__get_doc(compose_url) soup = BeautifulSoup.BeautifulSoup(page) # Get the session url again session_url = "" for tag in soup.findAll("link"): if not tag.has_key("rel") or tag["rel"] != "stylesheet": continue session_url = tag["href"] session_url = session_url.split("?")[0] # set POST data action_url, data = form_grabber.process_form(page, session_url, form_index=1) data["to"] = to_addr data["subject"] = message_subject data["body"] = message_body data["nvp_bu_send"] = "Send" # send the message request = urllib2.Request(action_url) response = self.__get_doc(request, data) if "Your message has been sent." not in response: return False return True
def login(self): """ Log in to GMail Returns True if successful, or False if unsuccessful """ # Grab the login page, and then parse out the FORM data page = self.__get_doc(GMAIL_URL) action_url, data = form_grabber.process_form(page, GMAIL_URL) data["Email"] = self.__username data["Passwd"] = self.__password # Prepare the login request and try logging in data = urllib.urlencode(data) request = urllib2.Request(action_url, data) response = self.__get_doc(request) # Process the server's response if "Sign out" not in response: return False self.__logged_in = True return True
def prepare_download(opener, page_url): # Get web page sys.stdout.write("[+] Grabbing web page, ") sys.stdout.flush() page = opener.open(page_url).read() print "Done" # Parse for download request form sys.stdout.write("[+] Parsing for download request form, ") sys.stdout.flush() soup = BeautifulSoup.BeautifulSoup(page) form_action, data = form_grabber.process_form(soup, page_url, form_index=-1) data["method_free"] = " Regular Download " print "Done" # make a request to download the file sys.stdout.write("[+] Requesting to download file, ") sys.stdout.flush() data = urllib.urlencode(data) request = urllib2.Request(form_action, data) response = opener.open(request).read() soup = BeautifulSoup.BeautifulSoup(response) a = open("page11.html", "w") a.write(response) a.close() print "Done" # Wait the required 60 seconds for free downloads #print "[+] Waiting the required 60 seconds" #last_line_length = 0 #for i in range(60, -1, -1): # line = "[+] Time remaining: %d seconds" % i # while len(line) < last_line_length: # line += " " # last_line_length = len(line) # sys.stdout.write("\r%s" % line) # sys.stdout.flush() # time.sleep(1) #print # check for CAPTCHA sys.stdout.write("[+] Processing CAPTCHA data, ") sys.stdout.flush() print print #form_action, data = form_grabber.process_form(soup, page_url, form_index=1) captcha_iframe_url = soup.findAll("iframe")[0]["src"] captcha_iframe_page = opener.open(captcha_iframe_url).read() captcha_iframe_soup = BeautifulSoup.BeautifulSoup(captcha_iframe_page) form_action, data = form_grabber.process_form(captcha_iframe_soup, page_url, debug=True) sys.exit() captcha_image_url = captcha_iframe_soup.findAll("img")[0]["src"] captcha_image_url = urlparse.urljoin(captcha_iframe_url, captcha_image_url) captcha_image_data = opener.open(captcha_image_url).read() a = open("image.jpg", "wb") a.write(captcha_image_data) a.close() print "Done" # get CAPTCHA answer print "[+] Open image.jpg, and type in the words below" captcha_answer = raw_input("[?] Answer: ") data["recaptcha_response_field"] = captcha_answer # post response sys.stdout.write("[+] Sending CAPTCHA response to server, ") sys.stdout.flush() data = urllib.urlencode(data) request = urllib2.Request(form_action, data) response = opener.open(request).read() a = open("page.html", "w") a.write(response) a.close() if "Wrong Code. Please try again." in response: print "Failed" return print "Done" # parse out download URL file_download_url = "" soup = BeautifulSoup.BeautifulSoup(response) for tag in soup.findAll("a"): if not tag.has_key("class"): continue if tag["class"] == "click_download": file_download_url = tag["href"] return file_download_url
def prepare_download(opener, page_url): # Get web page sys.stdout.write("[+] Grabbing web page, ") sys.stdout.flush() page = opener.open(page_url) print "Done" # Parse for download request form sys.stdout.write("[+] Parsing for download request form, ") sys.stdout.flush() soup = BeautifulSoup.BeautifulSoup(page) form_action, data = form_grabber.process_form(soup, page_url, form_index=1) wait_time = int(data["wait"]) print "Done" # Wait the requisite nummber of seconds for free downloads print "[+] Waiting the required %d seconds" % wait_time last_line_length = 0 for i in range(wait_time, -1, -1): line = "[+] Time remaining: %d seconds" % i while len(line) < last_line_length: line += " " last_line_length = len(line) sys.stdout.write("\r%s" % line) sys.stdout.flush() time.sleep(1) print # make a request to download the file sys.stdout.write("[+] Requesting to download file, ") sys.stdout.flush() data = urllib.urlencode(data) request = urllib2.Request(form_action, data) response = opener.open(request).read() soup = BeautifulSoup.BeautifulSoup(response) print "Done" if "You reached your hourly traffic limit." in response: print "[!] Hourly traffic limit reached!" return # check for CAPTCHA sys.stdout.write("[+] Processing CAPTCHA data, ") sys.stdout.flush() form_action, data = form_grabber.process_form(soup, page_url, form_index=1) captcha_iframe_url = soup.findAll("iframe")[0]["src"] captcha_iframe_page = opener.open(captcha_iframe_url) captcha_iframe_soup = BeautifulSoup.BeautifulSoup(captcha_iframe_page) form_action, data = form_grabber.process_form(captcha_iframe_soup, page_url) captcha_image_url = captcha_iframe_soup.findAll("img")[0]["src"] captcha_image_url = urlparse.urljoin(captcha_iframe_url, captcha_image_url) captcha_image_data = opener.open(captcha_image_url).read() a = open("image.jpg", "wb") a.write(captcha_image_data) a.close() print "Done" # get CAPTCHA answer print "[+] Open image.jpg, and type in the words below" captcha_answer = raw_input("[?] Answer: ") data["recaptcha_response_field"] = captcha_answer data["action"] = "checkcaptcha" # post response sys.stdout.write("[+] Sending CAPTCHA response to server, ") sys.stdout.flush() data = urllib.urlencode(data) request = urllib2.Request(form_action, data) response = opener.open(request).read() if "Wrong Code. Please try again." in response: print "Failed" return print "Done" # parse out download URL file_download_url = "" soup = BeautifulSoup.BeautifulSoup(response) for tag in soup.findAll("a"): if not tag.has_key("class"): continue if tag["class"] == "click_download": file_download_url = tag["href"] return file_download_url