Esempio n. 1
0
 def getSubpage(self, suburl):
     # request
     # print suburl
     r = urllib2.Request(suburl, headers=self.headers)
     # responce
     re = urllib2.urlopen(r)
     subpage = re.read().decode("utf-8", "ignore")
     # print subpage
     return subpage
Esempio n. 2
0
def getHTMLTextUseUrlLib(url):
    try:
        r = urllib.request.Request(
            url,
            headers={
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1;Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36'
            })
        re = urllib.request.urlopen(r)
        return re.read()
    except:
        return ""
Esempio n. 3
0
 def save_video(self, data):
     data1 = urllib.parse.urlencode({
         'source': 1,
         'sourceUrl': data['sourceUrl'],
         'title': data['title'],
         'authorName': data['authorName'],
         'releaseTime': data['releaseTime'],
         'video': data['video'],
     })
     data2 = data1.encode('utf-8')
     re = urllib.request.urlopen(url=self.post_video_url, data=data2)
     status = re.read().decode('utf-8')
     print('status:\n', status)
Esempio n. 4
0
def searchRv(txtnumber):
    try:
       
        name = str(txtnumber) + ".txt"
        if os.path.exists(name):
            #print(name+" FOUND  --------------DATA Have Installed")
            re = open(name, 'r', encoding='ascii', errors='ignore')
            datareview = re.read();
            re.close()
        else:
            datareview = "NULL"
            #print("review txt file  [" + name + "]  does not exist") 
    except:
        print("Can't get Data")       
    return datareview      
Esempio n. 5
0
 def filter_data(self, details_url):
     data1 = urllib.parse.urlencode({
         'type': int(4),
         'days': int(3),
     })
     data2 = data1.encode('utf-8')
     re = urllib.request.urlopen(url=self.filter_url, data=data2)
     status = re.read().decode('utf-8')
     result = json.loads(status)
     data = result['data']
     for kw in data:
         self.have_met.add(data[kw])
     if details_url in self.have_met:
         return True
     else:
         return False
def Modifysetting():
    clear_screen()
    with open("token.json", "r") as re:
        data = json.loads(re.read())
        re.close()
    print('APP_KEY :', data['Plurk']['APP_KEY'])
    print('APP_SECRET :', data['Plurk']['APP_SECRET'])
    print('ACCEESS_TOKEN :', data['Plurk']['ACCEESS_TOKEN'])
    print('ACCESS_TOKEN_SECRET :', data['Plurk']['ACCESS_TOKEN_SECRET'])
    print('Token :', data['Discord']['Token'])
    print('ServerID :', data['Discord']['ServerID'])
    print('ChannelID :', data['Discord']['ChannelID'])
    print('Prefix :', data['Discord']['Prefix'])
    print('BlockedWord :', data['BlockedWord'])
    print(modt)
    print(modif)
    choice = user_choice()
    if not choice == "0":
        print('\n' + userinputs[int(choice) - 1])
        instr = input("> ").replace(' ', '')
    if choice == "1":
        data['Plurk']['APP_KEY'] = instr
    elif choice == "2":
        data['Plurk']['APP_SECRET'] = instr
    elif choice == "3":
        data['Plurk']['ACCEESS_TOKEN'] = instr
    elif choice == "4":
        data['Plurk']['ACCESS_TOKEN_SECRET'] = instr
    elif choice == "5":
        data['Discord']['Token'] = instr
    elif choice == "6":
        data['Discord']['ServerID'] = instr
    elif choice == "7":
        data['Discord']['ChannelID'] = instr
    elif choice == "8":
        data['Discord']['Prefix'] = instr
    elif choice == "9":
        data = ModifysettinBlockedWord(data)
    elif choice == "0":
        with open("token.json", "w") as write_file:
            json.dump(data, write_file)
            write_file.close
        return True
    with open("token.json", "w") as write_file:
        json.dump(data, write_file)
        write_file.close
Esempio n. 7
0
def dirbrute(URL, agent, https):
    print PASS + "Starting brute mode."
    print PASS + "The URL is under scan."
    print INFO + "The scan can take several minutes."
    t = 6769
    if agent == True:
        print INFO + "Requests under random User-agent.\n"
    else:
        print INFO + "Base User-Agent -- {'User-Agent': 'OnixIs/:)'}\n"

    wordlist = "wordlist"
    with open(wordlist, "r") as checks:
        for i in checks.readlines():
            DIR = i.strip("\n")
            try:
                link = "http://" + URL + "/" + DIR
                if https == True:
                    link = "https://" + URL + "/" + DIR

                if agent == True:
                    req = Request(link,
                                  headers={'User-Agent': random.choice(usera)})
                else:
                    req = Request(link, headers={'User-Agent': 'OnixIs/:)'})
                urlopen(req)
                print G + "[+]" + O + " Directory found " + URL + "/" + DIR
                t = t - 1
            except HTTPError:
                t = t - 1
                pass

            sys.stdout.write(INFO + str(t) + " remaining tests\r")
            sys.stdout.flush()

    if https == True:
        r = requests.get("https://" + URL + "/robots.txt")
    else:
        r = requests.get("http://" + URL + "/robots.txt")

    if r.status_code == 200:
        print PASS + "Display robots.txt content."
        re = urlopen("https://" + URL + "/robots.txt")
        t = re.read()
        print t
Esempio n. 8
0
def gethtml(url=''):
    try:
        re = urllib.request.urlopen(url, timeout=20)
    except urllib.error.HTTPError as e:
        print(e.code, e.reason)
        return ''
    except ValueError as e:
        print("Value Error!")
        return ''
    except UnicodeEncodeError as e:
        print("UnicodeEncodeError!")
        return ''
    except urllib.error.URLError as e:
        print("URLError:", e.reason)
    except socket.timeout as e:
        print("socket timeout:", url)
        return ''
    except:
        return ''
    # req=urllib.request.Request(url)
    # req.add_header('Accept-Encoding','deflate')
    # re=urllib.request.urlopen(req)
    return re.read()
Esempio n. 9
0
    str_free = re.search("MemFree:.*?\n", str_out).group()
    freemem = int(re.search("\d+", str_free).group()) // 1000

    mem_use = round(float(freemem) / float(totalmem), 2)
    # print("当前服务器IP地址:{0}\n内存总大小为:{1} M\n内存空闲为:{2} M\n内存使用率为:{3}\n".format(host["ip"], totalmem, freemem, mem_use))
    text = "当前服务器IP地址:{0}\n内存总大小为:{1}\n内存空闲为:{2}M\n内存使用率为:{3}\n".format(
        host["ip"], totalmem, freemem, mem_use)
    print(text)
    arr.append(text)


def convert(arr):
    temp = ''
    for text in arr:
        temp += text + "\n"
    return temp


url = 'https://oapi.dingtalk.com/robot/send?access_token=af3a595521fe84c9644835c93ec3821d0ff3f4654549c8eb1663e69b36ecd2c2'
data = {
    "msgtype": "text",
    "text": {
        "content": '服务器性能监控: ' + str(dt_new) + '\n' + convert(arr)
    }
}
headers = {'Content-Type': 'application/json'}
request = urllib2.Request(url=url, headers=headers, data=json.dumps(data))
re = urllib2.urlopen(request, timeout=10)
re_data = re.read()
ssh.close()
Esempio n. 10
0
def analysis_url(url):
    req=urllib.request.Request(url,headers=headers)
    re=urllib.request.urlopen(req)
    text=re.read()
    html=BeautifulSoup(text,'html.parser')
    return html
Esempio n. 11
0
from urllib import request
from urllib import parse
import json

file = open('/Users/jishu/Desktop/网易云逆向.rtf', mode='r')
file.read()
print(file.read())

# file2 = open('/Users/jishu/Desktop/python.txt', mode='x')
# file2.write('你是知道我的吧,哈哈哈哈哈')

file3 = open('/Users/jishu/Desktop/python.txt', mode='a+')
file3.write('shide ba sfasdfasdf')

re = request.urlopen('http://www.baidu.com')
content = re.read()
string = str(content, encoding='utf8')
if not content:
    print('请求失败')
else:
    file3.write(string)

paras = parse.urlencode({
    'types': 'search',
    'count': 20,
    'source': 'netease',
    'pages': '1',
    'name': '是想你的声音啊'
})
paras = paras.encode('utf-8')
musicRequest = request.urlopen('http://y.webzcz.cn/api.php', data=paras)
Esempio n. 12
0
	try:
		re = urllib2.urlopen(req)
	except urllib2.HTTPError,e:
		#刷新cookie
		refresh_cookie()

		
		req2 = urllib2.Request(url)
		cookie_str = utils.get_cookies_str()
		req2.add_header("Cookie", cookie_str)
		time.sleep(2)
		re = urllib2.urlopen(req2)
	finally:
		pass

	html = re.read()
	html =  utils.html_to_utf8(html)
	open("html", "wb").write(html)	
	return html




def get_topics(url):

	html = get_html(url)

	topics = []

	p = etree.HTMLParser(encoding = "utf-8")
	tree = etree.HTML(html, parser = p)
Esempio n. 13
0
def getFeed():
    conn = httplib.HTTPSConnection("nvd.nist.gov")
    conn.request("GET", "/download/nvd-rss.xml")
    re = conn.getresponse()

    return re.read()
Esempio n. 14
0
import re
import urllib2
from bs4 import BeautifulSoup

re = urllib2.urlopen('http://www.hupu.com/')
html = re.read()

soup = BeautifulSoup(html, "html.parser")
print html
Esempio n. 15
0
File: fdb.py Progetto: smsouls/mor
def get_robots(url):
	"""Initialize robots parser for this domain"""
	rp = robotparser.RobotFileParser()
	rp.set_url(urlparse.urljoin(url, '/robots.txt'))
	re.read()
	return rp