Esempio n. 1
0
def get_cookie():

    # 创建url
    login_url = "*****"

    # 构建伪装头
    headers = {
        "User-Agent": UserAgent().chrome
    }

    # 构建伪装用户登陆
    form_data = {
        "user": "******"
        "pasword":"*****"
    }

    f_data = urlencode(form_data).encode()
    re = Request(login_url, headers=headers, data=f_data)  # 构建请求头


    cookie_jar = MozillaCookieJar()    # 保存cookie文件
    handler = HTTPCookieProcessor(cookie_jar)
    opener = build_opener(handler)  # 构建opener对象
    response = opener.open(re)  # 准备返回值

    # 保存cookie值到文件
    cookie_jar.save("cookie.txt", ignore_expires = True, ignore_discard = True)
Esempio n. 2
0
    def url_request(self, action, url, **extra_data):
        #get or post
        abs_url = "http://%s:%s/%s" % (settings.configs['Server'],
                                       settings.configs['ServerPort'], url)
        if action in ('get', 'GET'):
            print(abs_url, extra_data)
            try:
                req = request.Request(abs_url)
                req_data = request.urlopen(
                    req, timeout=settings.configs['RequestTimeout'])
                callback = req_data.read()  # 获取回来的数据
                #print(callback)
                return callback
            except error.URLError as e:
                exit("\033[31;1m%s\033(0m" % e)

        elif action in ('post', 'POST'):
            # print(abs_url,extra_data['params'])
            try:
                data_encode = request.urlencode(extra_data['params'])
                req = request.Request(url=abs_url, data=data_encode)
                res_data = request.urlopen(
                    req, timeout=settings.configs['RequestTimeout'])
                callback = res_data.read()
                callback = json.loads(callback.decode("UTF-8"))
                print
                "\033[31;1m[%s]:[%s]\033[0m response:\n%s" % (action, abs_url,
                                                              callback)
                return callback
            except Exception as e:
                print('---exec', e)
                exit("\033[31;1m%s\033[0m" % e)
Esempio n. 3
0
def make_signature(**params):
    sign_data = [(k, v) for k, v in params.items() if k != 'signature' or k.startswith("__")]
    sorted_parameters = sorted(sign_data, key=lambda kv: kv[0])
    sorted_query_string = _standard_url(urlencode(sorted_parameters))
    string_to_sign = _standard_url(pathname2url(sorted_query_string))
    h = hmac.new(settings.YKT_ACCESS_KEY, string_to_sign, hashlib.sha1)
    signature = encodestring(h.digest()).strip()
    return signature
Esempio n. 4
0
 def give_access(doc, opener):
     parser = FormParser()
     parser.feed(doc)
     parser.close()
     if not parser.form_parsed or parser.url is None:
           raise RuntimeError("Something wrong")
     if parser.method == "POST":
         response = opener.open(parser.url, urllib.urlencode(parser.params))
     else:
         raise NotImplementedError("Method '{}'".format(parser.method))
     return response.geturl()
Esempio n. 5
0
 def give_access(doc, opener):
     parser = FormParser()
     parser.feed(doc)
     parser.close()
     if not parser.form_parsed or parser.url is None:
         raise RuntimeError("Something wrong")
     if parser.method == "POST":
         response = opener.open(parser.url, urllib.urlencode(parser.params))
     else:
         raise NotImplementedError("Method '{}'".format(parser.method))
     return response.geturl()
Esempio n. 6
0
def check_profanity(qu):
    print(getproxies())
    connection = urlopen(r"http://www.wdyl.com/profanity?q="+ urlencode(str(qu)))
    output = connection.read()
    if "true" in output:
        print("Profanity Alert!!!")
    elif "false" in output:
        print("This document has no curse words!");
    else:
        print("Please mannualy check for Profanity");
    connection.close()
Esempio n. 7
0
def check_profanity(qu):
    print(getproxies())
    connection = urlopen(r"http://www.wdyl.com/profanity?q=" +
                         urlencode(str(qu)))
    output = connection.read()
    if "true" in output:
        print("Profanity Alert!!!")
    elif "false" in output:
        print("This document has no curse words!")
    else:
        print("Please mannualy check for Profanity")
    connection.close()
Esempio n. 8
0
def search(kwd=''):
    search_url = "https://www.bing.com/search?q=" + urlencode(kwd)
    if kwd == '':
        kwd = choice(iJustWantUtoStudy)
    wk_page = wikipedia(kwd)
    if (wk_page):
        return [
            '在 Wikipedia 上找到如下信息:\n' + wk_page.url + '\n\n' + wk_page.summary,
            wk_page.images[0]
        ]
    else:
        return search_url
Esempio n. 9
0
def wiki_search(term, url=None):
    term = urlencode(term.replace(" ", "_"))
    url = (url or "https://en.wikipedia.org/w/api.php%s") % ("?action=query&prop=pageprops&format=json&generator=search&cllimit=max&gsrsearch=")
    url += term

    data = requests.get(url, headers=headers).json()
    pages = sorted([v for k,v in data['query']['pages'].items()], key=lambda x: x['index'])

    pages = [page for page in pages if not 'disambiguation' in page['pageprops']]
    pages = [{key: t[key] for key in t if key not in ["ns", "index"]} for t in pages]

    return [p['title'] for p in pages]
Esempio n. 10
0
def pronounce(word, type=0):
    '''
    Pronounce English words
    Args:
        type: American pronunciation(0) or British pronunciation(1)
        word: word to pronounce
    Returns:
        String, link
    '''
    info('Geting pronunciation:' + word)
    api_url = 'http://dict.youdao.com/dictvoice?type=' + str(type) + '&audio='
    return api_url + urlencode(word)
Esempio n. 11
0
def wikipedia_get(bot, nick, chan, arg, root=None):
    """ wiki <page> -> Get the first two sentences in a wikipedia article. """
    if not arg:
        return bot.msg(chan, get_doc())
    name = arg
    if ' (' in arg:
        name = arg.split()[0]
    term = arg.replace(" ", "_")
    term = urlencode(term)

    url = root or "http://en.wikipedia.org"
    url += ("/w/api.php?action=parse&format=json&prop=text&page=%s&redirects=" % (term))

    response = requests.get(url, headers=headers)
    res = response.json()
    if res.get("error", None):
        return bot.msg(chan, "%s: Error: %s" % (nick, res['error']['info']))

    soup = soupify(res['parse']['text']['*'], "lxml")
    paragraph = soup.find('p')
    url = "http://en.wikipedia.org/wiki/%s"
    htmlurl = url % term

    for i in soup.find_all('b'):
        i.string = "%s" % (bot.hicolor(i.text))
    
    if soup.find("table", id="disambigbox") is not None:
        bot.msg(chan, "%s (%s) points to a disambiguation page." % (arg, shorten(htmlurl)))
        return

    if res['parse'].get('redirects', None):
        if res['parse']['redirects'][0].get("tofragment", None):
            anchor = res['parse']['redirects'][0]['tofragment']
            paragraph = soup.find("span", {"id": anchor}).findNext("p")
            htmlurl = url % (res['parse']['redirects'][0]['to']) + "#%s" % (anchor)
        elif res['parse']['redirects'][0].get("to", None):
            htmlurl = url % (res['parse']['redirects'][0]['to'].replace(" ", "_"))
    sentences = bot.state.data["sentence_re"].findall(paragraph.text)[:2]
    readmore = bot.style.color("\u2192 %s\x0f" % (bot.state.data['shortener'](bot, htmlurl)), color="blue")
    text = ''.join(sentences)
    if re.search("\[\d+\]", text):
        text = re.sub("\[\d+\]", "", text)
    output = "%s %s" % (text, readmore)

    bot.msg(chan, '\n'.join(lines(output)))
    time.time()
Esempio n. 12
0
def getweather(location):
    try:
        url = "http://woeid.rosselliot.co.nz/lookup/"
        loc = location
        qry = urlencode(loc.lower().replace(",", " "))
        req = url+qry
        rsp = requests.get(req)
        bsp = soupify(rsp.text, "lxml")
        tbl = bsp.findAll("table")[0]
        ids = [x.attrs for x in list(tbl)[1:]]

        for x in ids[0:1]:
            w = Weather(x)
            if 'condition' in w.__dict__.keys():
                yield w.form()
            else:
                print(w.__dict__)
    except Exception as e:
        yield "NoSuchPlaceError: '%s' not found (on earth)" %(loc)
Esempio n. 13
0
def post(url, headers={}, data={}, json={}, as_json=False):
    """
    Make a HTTP post request and return response

    `Required`
    :param str url:       URL of target web page

    `Optional`
    :param dict headers:  HTTP request headers
    :param dict data:     HTTP request POST data
    :param dict json:     POST data in JSON format
    :param bool as_json:  return JSON formatted output

    """
    try:
        import requests
        req = requests.post(url, headers=headers, data=data, json=json)
        output = req.content
        if as_json:
            try:
                output = req.json()
            except:
                pass
        return output
    except ImportError:
        import sys
        if sys.version_info[0] > 2:
            from urllib.request import urlopen, urlencode, Request
        else:
            from urllib import urlencode
            from urllib2 import urlopen, Request
        data = urlencode(data)
        req = Request(str(url), data=data)
        for key, value in headers.items():
            req.headers[key] = value
        output = urlopen(req).read()
        if as_json:
            import json
            try:
                output = json.loads(output)
            except:
                pass
        return output
Esempio n. 14
0
File: util.py Progetto: hmz777/byob
def post(url, headers={}, data={}, json={}, as_json=False):
    """
    Make a HTTP post request and return response

    `Required`
    :param str url:       URL of target web page

    `Optional`
    :param dict headers:  HTTP request headers
    :param dict data:     HTTP request POST data
    :param dict json:     POST data in JSON format
    :param bool as_json:  return JSON formatted output

    """
    try:
        import requests
        req = requests.post(url, headers=headers, data=data, json=json)
        output = req.content
        if as_json:
            try:
                output = req.json()
            except: pass
        return output
    except ImportError:
        import sys
        if sys.version_info[0] > 2:
            from urllib.request import urlopen,urlencode,Request
        else:
            from urllib import urlencode
            from urllib2 import urlopen,Request
        data = urlencode(data)
        req  = Request(str(url), data=data)
        for key, value in headers.items():
            req.headers[key] = value
        output = urlopen(req).read()
        if as_json:
            import json
            try:
                output = json.loads(output)
            except: pass
        return output
Esempio n. 15
0
def _open(cgi, params=None, get=1):
    """Open a handle to SCOP and return it (PRIVATE).

    Open a handle to SCOP.  cgi is the URL for the cgi script to access.
    params is a dictionary with the options to pass to it.  get is a boolean
    that describes whether a GET should be used.

    """
    from urllib.request import urlopen, urlencode

    # Open a handle to SCOP.
    if params is None:
        params = {}
    options = urlencode(params)
    if get:  # do a GET
        if options:
            cgi += "?" + options
        handle = urlopen(cgi)
    else:  # do a POST
        handle = urlopen(cgi, data=options)
    return handle
Esempio n. 16
0
# 3.需要登录的情况

# 3.1 cookie的处理
cookie_support = request.HTTPCookieProcessor(http.cookiejar.CookieJar)
opener = request.build_opener(cookie_support, request.HTTPHandler)

request.install_opener(opener)

# content = request.urlopen('https://www.baidu.com/').read()

# 3.2 表单的处理

postdata = request.urlencode({
    'username': '******',
    'password': '******',
    'continueURI': 'http://www.verycd.com/',
    'fk': 'fk',
    'login_submit': '登录'
})

# 然后生成http请求,再发送请求:

req = request.Request(
    url='http://secure.verycd.com/signin/*/http://www.verycd.com/',
    data=postdata
)
result = request.urlopen(req).read()

# 3.3 伪装成浏览器访问
'''某些网站反感爬虫的到访,于是对爬虫一律拒绝请求。这时候我们需要伪装成浏览器,这可以通过修改http包中的header来实现'''
headers = {
Esempio n. 17
0
if len(sys.argv) > 3:
    endpoint = "http://" + sys.argv[3] + "/grafter-ws/formTransform"
print( endpoint )


headers = {}
headers["Content-type"] = "application/x-www-form-urlencoded"

csvString = ""
with open(input_csv, "r") as csv:
    csvString = csv.read()


with open(output_rdf, "wb") as rdf:
    request = Request(url = endpoint, data=urlencode({'csv': csvString, 'output': output_rdf}), headers=headers)
    try:
        request.get_method = lambda: "POST";
        result = urlopen(request)
        print ("HTML response: " , str(result.getcode()))
        
        while True:
            buffer = result.read()
            if not buffer:
                break
            rdf.write(buffer)
    except HTTPError, error:
        print error
        print (error.read())

Esempio n. 18
0
def get_dce_daily(date=None):
    """
        获取大连商品交易所日交易数据
    Parameters
    ------
        date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
    Return
    -------
        DataFrame
            大商所日交易数据(DataFrame):
                symbol        合约代码
                date          日期
                open          开盘价
                high          最高价
                low           最低价
                close         收盘价
                pre_close     前收盘价
                volume        成交量
                open_interest 持仓量
                turnover      成交额
                settle        结算价
                pre_settle    前结算价
                variety       合约类别
        或 None(给定日期没有交易数据)
    """
    day = ct.convert_date(date) if date is not None else date.today()
    url = ct.DCE_DAILY_URL + '?' + urlencode(
        {
            "currDate": day.strftime('%Y%m%d'),
            "year": day.strftime('%Y'),
            "month": str(int(day.strftime('%m')) - 1),
            "day": day.strftime('%d')
        })
    response = urlopen(Request(url, method='POST',
                               headers=ct.DCE_HEADERS)).read().decode('utf8')
    if u'错误:您所请求的网址(URL)无法获取' in response:
        return
    elif u'暂无数据' in response:
        return

    data = BeautifulSoup(response, 'html.parser').find_all('tr')
    if len(data) == 0:
        return

    dict_data = list()
    for idata in data[1:]:
        if u'小计' in idata.text or u'总计' in idata.text:
            continue
        x = idata.find_all('td')
        row_dict = dict()
        for i, field in enumerate(ct.DCE_COLUMNS):
            if i == 0:
                row_dict[field] = ct.DCE_MAP[x[i].text.strip()]
            elif i >= 2:
                if '-' in x[i].text.strip():
                    row_dict[field] = 0.0
                else:
                    row_dict[field] = float(x[i].text.strip().replace(',', ''))
            else:
                row_dict[field] = x[i].text.strip()
        dict_data.append(row_dict)
    df = pd.DataFrame(dict_data)
    df['date'] = day.strftime('%Y%m%d')
    df['pre_close'] = df.close - df.change1
    df['symbol'] = df.variety + df.month
    return df[ct.OUTPUT_COLUMNS]
Esempio n. 19
0
#endpoint = "http://192.168.11.43:8080/jarfter/webresources/jarCreatorStandAlone"
endpoint = "http://localhost:8088/jarfter/webresources/jarCreatorStandAlone"

headers = {}
#headers["Content-Length"]= "%d" % os.stat(input_clj).st_size
#headers["Content-type"] = "txt/text; charset=utf-8"
headers["Content-type"] = "application/x-www-form-urlencoded"

clojureSource = ""
with open(input_clj, "r") as clj:
    clojureSource = clj.read()

#with open(input_clj, "r") as clj:
with open(output_jar, "wb") as jar:
    #params = ('clojure': clj)
    request = Request(url = endpoint, data=urlencode({'clojure': clojureSource}), headers=headers)
    try:
        request.get_method = lambda: "POST";
        result = urlopen(request)
        print ("HTML response: " , str(result.getcode()))
        
        while True:
            buffer = result.read()
            if not buffer:
                break
            jar.write(buffer)
    except HTTPError, error:
        print error
        print (error.read())

#headers["Content-Length"]= "%d" % os.stat(input_clj).st_size
#headers["Content-type"] = "txt/text; charset=utf-8"
headers["Content-type"] = "application/x-www-form-urlencoded"
#headers["transformed_filename"] = output_rdf

clojureString = ""
with open(input_clj, "r") as clj:
    clojureString = clj.read()

csvString = ""
with open(input_csv, "r") as csv:
    csvString = csv.read()


with open(output_rdf, "wb") as rdf:
    request = Request(url = endpoint, data=urlencode({'clojure': clojureString, 'csv': csvString, 'transformed_filename': output_rdf}), headers=headers)
    try:
        request.get_method = lambda: "POST";
        result = urlopen(request)
        print ("HTML response: " , str(result.getcode()))
        
        while True:
            buffer = result.read()
            if not buffer:
                break
            rdf.write(buffer)
    except HTTPError, error:
        print error
        print (error.read())

Esempio n. 21
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date    : 2017-09-23 17:10:37
# @Author  : Your Name ([email protected])
# @Link    : http://example.org
# @Version : $Id$

from urllib.request import urlopen, urlencode
from urllib.error import URLError, HTTPError
from bs4 import BeautifulSoup

# POST
values = {'username': "******", 'password': '******'}
data = urlencode(values)

# values = {}
# values['username'] = "******"
# values['password'] = "******"
# data = urllib.urlencode(values)

# print data

url = "http://passport.csdn.net/account/login?from=http://my.csdn.net/my/mycsdn"
html = urlopen(url, data)
html = BeautifulSoup(html.read(), 'lxml')

print(html)
Esempio n. 22
0
    def weblogo(self, fname, format="PNG", version="2.8.2", **kwds):
        """Download and save a weblogo using the Berkeley weblogo service.

        Requires an internet connection.

        The parameters from ``**kwds`` are passed directly to the weblogo server.

        Currently, this method uses WebLogo version 3.3.
        These are the arguments and their default values passed to
        WebLogo 3.3; see their website at http://weblogo.threeplusone.com
        for more information::

            'stack_width' : 'medium',
            'stacks_per_line' : '40',
            'alphabet' : 'alphabet_dna',
            'ignore_lower_case' : True,
            'unit_name' : "bits",
            'first_index' : '1',
            'logo_start' : '1',
            'logo_end': str(self.length),
            'composition' : "comp_auto",
            'percentCG' : '',
            'scale_width' : True,
            'show_errorbars' : True,
            'logo_title' : '',
            'logo_label' : '',
            'show_xaxis': True,
            'xaxis_label': '',
            'show_yaxis': True,
            'yaxis_label': '',
            'yaxis_scale': 'auto',
            'yaxis_tic_interval' : '1.0',
            'show_ends' : True,
            'show_fineprint' : True,
            'color_scheme': 'color_auto',
            'symbols0': '',
            'symbols1': '',
            'symbols2': '',
            'symbols3': '',
            'symbols4': '',
            'color0': '',
            'color1': '',
            'color2': '',
            'color3': '',
            'color4': '',

        """
        from urllib.request import urlopen, urlencode, Request

        if set(self.alphabet) == set("ACDEFGHIKLMNPQRSTVWY"):
            alpha = "alphabet_protein"
        elif set(self.alphabet) == set("ACGU"):
            alpha = "alphabet_rna"
        elif set(self.alphabet) == set("ACGT"):
            alpha = "alphabet_dna"
        else:
            alpha = "auto"

        frequencies = self.format("transfac")
        url = "http://weblogo.threeplusone.com/create.cgi"
        values = {
            "sequences": frequencies,
            "format": format.lower(),
            "stack_width": "medium",
            "stacks_per_line": "40",
            "alphabet": alpha,
            "ignore_lower_case": True,
            "unit_name": "bits",
            "first_index": "1",
            "logo_start": "1",
            "logo_end": str(self.length),
            "composition": "comp_auto",
            "percentCG": "",
            "scale_width": True,
            "show_errorbars": True,
            "logo_title": "",
            "logo_label": "",
            "show_xaxis": True,
            "xaxis_label": "",
            "show_yaxis": True,
            "yaxis_label": "",
            "yaxis_scale": "auto",
            "yaxis_tic_interval": "1.0",
            "show_ends": True,
            "show_fineprint": True,
            "color_scheme": "color_auto",
            "symbols0": "",
            "symbols1": "",
            "symbols2": "",
            "symbols3": "",
            "symbols4": "",
            "color0": "",
            "color1": "",
            "color2": "",
            "color3": "",
            "color4": "",
        }

        values.update({k: "" if v is False else str(v) for k, v in kwds.items()})
        data = urlencode(values).encode("utf-8")
        req = Request(url, data)
        response = urlopen(req)
        with open(fname, "wb") as f:
            im = response.read()
            f.write(im)
Esempio n. 23
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct  3 19:19:56 2018

@author: wesleyz
"""

import json

try:
    import urllib.request as urllib2
except ImportError:
    import urllib2

baseurl = "https://query.yahooapis.com/v1/public/yql?"
yql_query = "select wind from weather.forecast where woeid=2460286"
yql_url = baseurl + urllib2.urlencode({'q': yql_query}) + "&format=json"
result = urllib2.urlopen(yql_url).read()
data = json.loads(result)

print(data['query']['results'])
Esempio n. 24
0
    p = optparse.OptionParser('%prog [(filename|url) [encoding]]',
                              version='%prog ' + __version__)
    args = p.parse_args()[1]
    if len(args) > 0:
        file_ = args[0]
        encoding = None
        if len(args) == 2:
            encoding = args[1]
        if len(args) > 2:
            p.error('Too many arguments')

        if file_.startswith('http://') or file_.startswith('https://'):
            baseurl = file_
            req_headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.A.B.C Safari/525.13','Referer': 'http://python.org'}

            j = urllib.urlopen(baseurl,urllib.urlencode(req_headers))
            text = j.read()
            if encoding is None:
                try:
                    from feedparser import _getCharacterEncoding as enc
                except ImportError:
                    enc = lambda x, y: ('utf-8', 1)
                encoding = enc(j.headers, text)[0]
                if encoding == 'us-ascii':
                    encoding = 'utf-8'
            data = text.decode(encoding, 'ignore')

        else:
            data = open(file_, 'rb').read()
            if encoding is None:
                try:
Esempio n. 25
0
    'used': userData['used'],
    'isTrial': int(userData['isTrial']),
    'days': userData['days'],
    'registerDate': userData['registerDate'],
    'lastUsedDate': userData['lastUpdate'],
    'referenceCount': len(referenceList),
    'nameSpaceList': ','.join(nameSpaceList),
    'os': str(cmds.about(operatingSystem=True))
}

url = 'https://hook.integromat.com/gnjcww5lcvgjhn9lpke8v255q6seov35'
if sys.version[0] == '3':  #python 3
    import urllib.parse
    params = urllib.parse.urlencode(data)
else:  #python 2
    params = uLib.urlencode(data)
conn = uLib.urlopen('{}?{}'.format(url, params))
print(conn.read())
#print(conn.info())

# Supporter Coding
# Force Update for 1 month since 1 oct 2020
try:
    updateSource = 'source "' + projectDir.replace(
        '\\', '/') + '/BRS_DragNDrop_Update.mel' + '";'
    mel.eval(updateSource)
except:
    pass

# Fix Distance Slider
cmds.floatSlider(distanceS, e=True, minValue=0.01, maxValue=500, value=2)
Esempio n. 26
0
def wiki_get(bot, nick, chan, arg, searchkey=None, api_url=None):
    """ wiki <page> -> Get a summary of a wikipedia article. """
    if not arg:
        return bot.msg(chan, get_doc())

    searchkey = searchkey or arg

    # Get the article name, so we can highlight it later in the function
    article_name = arg
    if (' (' in arg):
        article_name = arg.split()[0]

    term = urlencode(arg.replace(" ", "_"))

    url = (api_url or "https://en.wikipedia.org/w/api.php%s") % ("?action=parse&format=json&prop=text&redirects=&page=")
    url += term

    response = requests.get(url, headers=headers).json()

    if response.get("error", None):
        if response['error']['code'] == "missingtitle":
            # the page we specified does not exist. We'll guess on the basis of search, then.
            return wiki_get(bot, nick, chan, wiki_search(arg, url=api_url)[0], searchkey=arg, api_url=api_url)
        else:
            return bot.msg(chan, "Error: %s" % (response['error']['info']))

    soup = soupify(response['parse']['text']['*'], "lxml")

    # Is the page a disambiguation page? If so, search for the page title and get other pages with similar names

    if soup.find("table", id="disambigbox") is not None:
        return wiki_get(bot, nick, chan, wiki_search(arg, url=api_url)[0], searchkey=arg, api_url=api_url)

    # Get the paragraph and the html url from the soup

    paragraph = get_paragraph(response, soup)
    html_url = get_html_url(response, soup, term)
    print(html_url)

    # First, highlight all of the bold terms in the page
    for i in soup.find_all("b"):
        i.string = "%s" % (bot.hicolor(i.text))

    # Get the sentences
    sentences = ''.join(bot.state.data["sentence_re"].findall(paragraph.text)[:2])

    # Replace footnotes in the sentences
    sentences = re.sub("\[\d+\]", "", sentences)

    # Get other search results
    results = wiki_search(searchkey)[1:4]

    # Get a shortlink
    short_link = bot.state.data["shortener"](bot, html_url)

    output = "%s %s" % (sentences, bot.style.color("\u2192%s" % short_link, "blue"))

    bot.msg(chan, output)

    # other possible pages
    bot.msg(chan, bot.style.color("Did you mean: %s" % (' - '.join(results)), "gray"))
Esempio n. 27
0
server.sendmail('*****@*****.**', '*****@*****.**',
..."""to: [email protected]
...from: [email protected]
... 
...baresasdada
...""")
server.quit()

# 处理get请求,不传data,则为get请求
import urllib
from urllib.request import urlopen
from urllib.request import urlencode

url='http://www.xxx.com/login'
data={"username":"******","password":123456}
req_data=urlencode(data) #将字典类型的请求数据转变为url编码
res=urlopen(url+'?'+req_data)#通过urlopen方法访问拼接好的url
#read()方法是读取返回数据内容,decode是转换返回数据的bytes格式为str
res=res.read().decode()

#处理post请求,如果传了data,则为post请求
url='http://www.xxx.com/login'
data={"username":"******","password":123456}
data = urlencode(data)
data=data.encode('ascii')#将url编码类型的请求数据转变为bytes类型
req_data=Request(url,data)#将url和请求数据处理为一个Request对象,供urlopen调用

#read()方法是读取返回数据内容,decode是转换返回数据的bytes格式为str
with urlopen(req_data) as res:
    res=res.read().decode()