def post_data(key, value): max_chunk_size = 1500 indx = 0 iterations = 0 value_length = len(value) while (indx < value_length): value_suffix = "" key_suffix = "" if iterations > 0: #on all iterations after the first, append _# to key name key_suffix = "_"+str(iterations) if (indx+max_chunk_size) < value_length: #if there are more chunks, append -m to the value value_suffix = "-m" headers = { 'User-Agent': get_random_useragent() } postdata = { "url": "http://stackoverflow.com/?"+value[indx:indx+max_chunk_size]+value_suffix, "format": "json", "shorturl": key+key_suffix } print "CREATED", "http://is.gd/"+postdata['shorturl'] requests.post('http://is.gd/create.php', params=postdata, headers=headers) indx += max_chunk_size iterations += 1
def get_data(key): headers = {'User-Agent': get_random_useragent()} url = "http://pastebin.com/raw.php?i=" + key print "hitting ", url r = requests.get(url, headers=headers) if "Pastebin.com Unknown Paste ID" in r.text: print "Unable to pull from pastebin." return None print "Pulled from pastebin" return r.text
def get_data(key): headers = { 'User-Agent': get_random_useragent() } url = "http://pastebin.com/raw.php?i="+key print "hitting ", url r = requests.get(url, headers=headers) if "Pastebin.com Unknown Paste ID" in r.text: print "Unable to pull from pastebin." return None print "Pulled from pastebin" return r.text
def get_data(key): final_data = "" iterations = 0 while True: key_suffix = "" if iterations > 10: return None #stop 'dem infinite loops if iterations > 0: #on all iterations after the first, append -# to key name key_suffix = "-"+str(iterations) current_url = "" headers = { 'User-Agent': get_random_useragent() } url = 'http://tinyurl.com/'+key+key_suffix print "hitting", url try: r = requests.get(url, headers=headers) if "http://stackoverflow.com/?" in r.url: #what is the url after following redirects? print "Retrieved by following redirects" current_url = r.url.replace("http://stackoverflow.com/?", "") except requests.exceptions.ConnectionError: pass if not current_url: #Alternate retrieval method that doesn't ping stackoverflow: url = 'http://preview.tinyurl.com/'+key+key_suffix r = requests.get(url, headers=headers) try: current_url = r.text.split('http://stackoverflow.com/?')[1].split('</b>')[0].replace("<br />", "") except IndexError: #print "Error while checking preview", r.text return None print "Retrieved by using preview" if current_url[-2:] == "-m": #first chunk of several #print "Retrieved chunk", current_url[:-2] final_data += current_url[:-2] iterations += 1 else: #print "Retrieved final piece", current_url return final_data + current_url
def get_data(key): final_data = "" iterations = 0 while True: key_suffix = "" if iterations > 10: return None # stop 'dem infinite loops if iterations > 0: # on all iterations after the first, append -# to key name key_suffix = "-" + str(iterations) current_url = "" headers = {"User-Agent": get_random_useragent()} url = "http://tinyurl.com/" + key + key_suffix print "hitting", url try: r = requests.get(url, headers=headers) if "http://stackoverflow.com/?" in r.url: # what is the url after following redirects? print "Retrieved by following redirects" current_url = r.url.replace("http://stackoverflow.com/?", "") except requests.exceptions.ConnectionError: pass if not current_url: # Alternate retrieval method that doesn't ping stackoverflow: url = "http://preview.tinyurl.com/" + key + key_suffix r = requests.get(url, headers=headers) try: current_url = r.text.split("http://stackoverflow.com/?")[1].split("</b>")[0].replace("<br />", "") except IndexError: # print "Error while checking preview", r.text return None print "Retrieved by using preview" if current_url[-2:] == "-m": # first chunk of several # print "Retrieved chunk", current_url[:-2] final_data += current_url[:-2] iterations += 1 else: # print "Retrieved final piece", current_url return final_data + current_url
def get_data(key): final_data = "" iterations = 0 while True: print "Retrieving chunk..." key_suffix = "" if iterations > 10: return None #stop 'dem infinite loops if iterations > 0: #on all iterations after the first, append _# to key name key_suffix = "_"+str(iterations) current_url = "" headers = { 'User-Agent': get_random_useragent() } url = 'http://is.gd/'+key+key_suffix print "hitting", url r = requests.get(url, headers=headers, cookies={'preview': '0'}) if "http://stackoverflow.com/?" in r.url: #what is the url after following redirects? print "Retrieved by following redirects" current_url = r.url.replace("http://stackoverflow.com/?", "") if not current_url: #Alternate retrieval method that doesn't ping stackoverflow: r = requests.get(url, headers=headers, cookies={'preview': '1'}) try: current_url = r.text.split('http://stackoverflow.com/?')[1].split('" class="biglink')[0] except IndexError: #print "Error while checking preview", r.text return None print "Retrieved by using preview" if current_url[-2:] == "-m": #first chunk of several print "Retrieved chunk", current_url[:-2] final_data += current_url[:-2] iterations += 1 else: print "Retrieved final piece", current_url return final_data + current_url
def post_data(value): headers = { 'User-Agent': get_random_useragent() } with requests.session() as c: t = c.get('http://pastebin.com/', headers=headers) post_key = t.text.split('name="post_key"')[1].split("hidden")[0].split('value="')[1].split('"')[0] r = c.post('http://pastebin.com/post.php', data={ 'paste_name': "", 'paste_private': 1, 'paste_expire_date': "N", "paste_format": 1, "paste_code": value, "submit_hidden": "submit_hidden", "post_key": post_key }, headers=headers) try: key = r.text.split("/raw.php?i=")[1].split('"')[0].lower() print "CREATED http://pastebin.com/raw.php?i=" + key return key.encode("hex") except: #Woah, you have reached your paste limit of 10 pastes per 24 hours. [IP-based]. return None
def post_data(value): headers = {'User-Agent': get_random_useragent()} with requests.session() as c: t = c.get('http://pastebin.com/', headers=headers) post_key = t.text.split('name="post_key"')[1].split("hidden")[0].split( 'value="')[1].split('"')[0] r = c.post('http://pastebin.com/post.php', data={ 'paste_name': "", 'paste_private': 1, 'paste_expire_date': "N", "paste_format": 1, "paste_code": value, "submit_hidden": "submit_hidden", "post_key": post_key }, headers=headers) try: key = r.text.split("/raw.php?i=")[1].split('"')[0].lower() print "CREATED http://pastebin.com/raw.php?i=" + key return key.encode("hex") except: #Woah, you have reached your paste limit of 10 pastes per 24 hours. [IP-based]. return None
def post_data(key, value): max_chunk_size = 1800 indx = 0 iterations = 0 value_length = len(value) while indx < value_length: value_suffix = "" key_suffix = "" if iterations > 0: # on all iterations after the first, append _# to key name key_suffix = "-" + str(iterations) if (indx + max_chunk_size) < value_length: # if there are more chunks, append -m to the value value_suffix = "-m" headers = {"User-Agent": get_random_useragent()} postdata = { "url": "http://stackoverflow.com/?" + value[indx : indx + max_chunk_size] + value_suffix, "submit": "Make+TinyURL%21", "alias": key + key_suffix, } print "CREATED", "http://tinyurl.com/" + postdata["alias"] requests.post("http://tinyurl.com/create.php", params=postdata, headers=headers) indx += max_chunk_size iterations += 1