def getHtml(url):
    request = urllib3.Request(url, headers=headers)
    try:
        response = urllib3.urlopen(request)
        html = response.read()
        return html
    except urllib3.URLError.re:
        print(re.reason)
Пример #2
0
 def redirect_request(self, req, fp, code, msg, headers, newurl):
     newurl = newurl.replace(' ', '%20')
     if code in (301, 307):
         return urllib3.Request(newurl,
                                data=req.get_data(),
                                headers=req.headers,
                                origin_req_host=req.get_origin_req_host(),
                                unverifiable=True)
     elif code in (302, 303):
         newheaders = dict((k, v) for k, v in req.headers.items()
                           if k.lower() not in ("content-length", "content-type"))
         return urllib3.Request(newurl,
                                headers=newheaders,
                                origin_req_host=req.get_origin_req_host(),
                                unverifiable=True)
     else:
         raise urllib3.HTTPError(req.get_full_url(), code, msg, headers, fp)
Пример #3
0
def delete_access_token(token):
    path = nest_tokens_path + token
    req = urllib3.Request(nest_api_root_url + path, None)
    req.get_method = lambda: "DELETE"
    response = urllib3.urlopen(req)
    resp_code = response.getcode()
    print("deleted token, response code: ", resp_code)
    return resp_code
Пример #4
0
def get_post(url):
    print("IN GETPOST")
    req = urllib3.Request(url, None, headers)
    req2 = urllib3.urlopen(req)
    page = req2.read()
    page = str(page)
    page = page.replace("false", "False")
    data = ast.literal_eval(page)
    next_url = get_next_url(data)
    save_to_db(data, next_url)
Пример #5
0
 def prepare(self, data=None, method='GET', type='application/json'):
     full_resource_url = self._compose_resource_path()
     if data:
         data = self._convert(data)
     if self.debug:
         print(full_resource_url)
     request = urllib3.Request(full_resource_url, data if data else None,
                               {'Content-Type': type})
     request.get_method = lambda: method
     return request
Пример #6
0
def get_access_token(authorization_code):
    data = urllib.urlencode({
        'client_id': product_id,
        'client_secret': product_secret,
        'code': authorization_code,
        'grant_type': 'authorization_code'
    })
    req = urllib3.Request(nest_access_token_url, data)
    response = urllib3.urlopen(req)
    data = json.loads(response.read())
    return data['access_token']
Пример #7
0
def get_comments_count(val):
    print("In Comments")
    urllike = "https://graph.facebook.com/%s?fields=comments.limit(0).summary(true)&access_token=%s" % (
        val, token)
    req = urllib3.Request(urllike, None, headers)
    req2 = urllib3.urlopen(req)
    page = req2.read()
    page = str(page)
    data = ast.literal_eval(page)
    total_count_comment = data['comments']['summary']['total_count']
    return total_count_comment
Пример #8
0
def interfaceTest(api_url,parameter):
    params = urllib.urlencode(parameter)
    headers = {"Content-type": "application/x-www-form-urlencoded; charset=UTF-8"}
    req = urllib3.Request(url=api_url, data=params, headers=headers)
    response = urllib3.urlopen(req)
    a = json.loads(response.read())
    if a["err"] == "ok":
        if len(a["data"]) == 0:
            send_mail(mailto_list,'data',k + '\n' 'The data is None!!!')
    else:
        send_mail(mailto_list, 'err','"err" not equal to "ok"')
Пример #9
0
def parse(url):
    request = urllib3.Request(url)

    request.add_header('User-Agent',
                       'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1)')
    try:
        http = BeautifulSoup(urllib3.urlopen(request), "lxml")
    except:
        print("- Error parsing %s" % (url))
    return
    return http
Пример #10
0
 def support_point(self, url):
     req = urllib3.Request(url)
     req.add_header('Range', 'bytes=0-20')
     res = urllib3.urlopen(req)
     content_length = res.headers['Content-Length']
     content_range = res.headers['Content-Range']
     if content_length and content_range:
         m = re.match('bytes 0-', content_range)
         if m:
             size = content_range.split('/')[1]
             return True,size
     return False,0
Пример #11
0
def get_api(url):
    USER='******'
    API_TOKEN='ef77171c9449656ca7bfb3bfe082ef2238e5b102'
    GIT_API_URL='https://api.github.com'
    try:
        request = urllib3.Request(GIT_API_URL + url)
        base64string = base64.encodestring('%s/token:%s' % (USER, API_TOKEN)).replace('\n', '')
        request.add_header("Authorization", "Token %s" % base64string)
        result = urllib2.urlopen(request)
        result.close()
    except:
        print('Failed to get api request') 
Пример #12
0
def download_web_image(url):

    #name = random.randrange(1,1000)

    full_name = str('j2') + ".jpg"

    request = urllib3.Request(url)

    img = urllib3.urlopen(request).read()

    with open(full_name, 'w') as f:
        f.write(img)
Пример #13
0
def api_request(url, values):
    data = urllib.urlencode(values)
    context = ssl._create_unverified_context()

    try:
        request = urllib3.Request(url, data)
        return urllib3.urlopen(request, context=context).read()

    except urllib3.URLError:
        print("\n\033[1;31;40m[Error] : Connecting to {url}. Check IP address."
              .format(url=url) + "\033[0m")
        return None
Пример #14
0
def _fetch_with_rate_limit(url):
    authorized_url = _authorize_url(url)
    request = urllib3.Request(authorized_url)

    try:
        return urllib3.urlopen(request)
    except urllib3.URLError as e:
        if e.code == 429:
            _mark_token_as_exceeded()
            return _fetch_with_rate_limit(url)
        else:
            raise e
Пример #15
0
def main(args):
    urllib3.disable_warnings()
    parser = argparse.ArgumentParser(description='Scrape Google images')
    parser.add_argument('-s',
                        '--search',
                        default='bananas',
                        type=str,
                        help='search term')
    parser.add_argument('-n',
                        '--num_images',
                        default=10,
                        type=int,
                        help='num images to save')
    parser.add_argument('-d',
                        '--directory',
                        default='/Users/gene/Downloads/',
                        type=str,
                        help='save directory')
    args = parser.parse_args()
    query = args.search  #raw_input(args.search)
    max_images = args.num_images
    save_directory = args.directory
    image_type = "Action"
    query = query.split()
    query = '+'.join(query)
    url = "https://www.google.co.in/search?q=" + query + "&source=lnms&tbm=isch"
    header = {
        'User-Agent':
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
    }
    soup = get_soup(url, header)
    ActualImages = [
    ]  # contains the link for Large original images, type of  image
    for a in soup.find_all("div", {"class": "rg_meta"}):
        link, Type = json.loads(a.text)["ou"], json.loads(a.text)["ity"]
        ActualImages.append((link, Type))
    for i, (img, Type) in enumerate(ActualImages[0:max_images]):
        try:
            req = urllib3.Request(img, headers={'User-Agent': header})
            raw_img = urllib3.urlopen(req).read()
            if len(Type) == 0:
                f = open(
                    os.path.join(save_directory,
                                 "img" + "_" + str(i) + ".jpg"), 'wb')
            else:
                f = open(
                    os.path.join(save_directory,
                                 "img" + "_" + str(i) + "." + Type), 'wb')
            f.write(raw_img)
            f.close()
        except Exception as e:
            print("could not load : " + img)
            print(e)
Пример #16
0
    def getHtml(self, url):
        if url is None:
            return None

        req = urllib3.Request(url)
        Request = urllib3.urlopen(req)

        if Request.getcode() != 200:
            return None
        data = Request.read()

        return data
Пример #17
0
    def sendTemplateSMS(self, to, datas, tempId):

        self.accAuth()
        nowdate = datetime.datetime.now()
        self.Batch = nowdate.strftime("%Y%m%d%H%M%S")
        # 生成sig
        signature = self.AccountSid + self.AccountToken + self.Batch
        sig = md5.new(signature).hexdigest().upper()
        # 拼接URL
        url = "https://" + self.ServerIP + ":" + self.ServerPort + "/" + self.SoftVersion + "/Accounts/" + self.AccountSid + "/SMS/TemplateSMS?sig=" + sig
        # 生成auth
        src = self.AccountSid + ":" + self.Batch
        auth = base64.encodestring(src).strip()
        req = urllib3.Request(url)
        self.setHttpHeader(req)
        req.add_header("Authorization", auth)
        # 创建包体
        b = ''
        for a in datas:
            b += '<data>%s</data>' % (a)

        body = '<?xml version="1.0" encoding="utf-8"?><SubAccount><datas>' + b + '</datas><to>%s</to><templateId>%s</templateId><appId>%s</appId>\
            </SubAccount>\
            ' % (to, tempId, self.AppId)
        if self.BodyType == 'json':
            # if this model is Json ..then do next code
            b = '['
            for a in datas:
                b += '"%s",' % (a)
            b += ']'
            body = '''{"to": "%s", "datas": %s, "templateId": "%s", "appId": "%s"}''' % (
                to, b, tempId, self.AppId)
        req.add_data(body)
        data = ''
        try:
            res = urllib3.urlopen(req)
            data = res.read()
            res.close()

            if self.BodyType == 'json':
                # json格式
                locations = json.loads(data)
            else:
                # xml格式
                xtj = xmltojson()
                locations = xtj.main(data)
            if self.Iflog:
                self.log(url, body, data)
            return locations
        except Exception:
            if self.Iflog:
                self.log(url, body, data)
            return {'172001': '网络错误'}
Пример #18
0
    def landingCall(self, to, mediaName, mediaTxt, displayNum, playTimes,
                    respUrl, userData, maxCallTime, speed, volume, pitch,
                    bgsound):

        self.accAuth()
        nowdate = datetime.datetime.now()
        self.Batch = nowdate.strftime("%Y%m%d%H%M%S")
        # 生成sig
        signature = self.AccountSid + self.AccountToken + self.Batch
        sig = md5.new(signature).hexdigest().upper()
        # 拼接URL
        url = "https://" + self.ServerIP + ":" + self.ServerPort + "/" + self.SoftVersion + "/Accounts/" + self.AccountSid + "/Calls/LandingCalls?sig=" + sig
        # 生成auth
        src = self.AccountSid + ":" + self.Batch
        auth = base64.encodestring(src).strip()
        req = urllib3.Request(url)
        self.setHttpHeader(req)
        req.add_header("Authorization", auth)

        # 创建包体
        body = '''<?xml version="1.0" encoding="utf-8"?><LandingCall>\
            <to>%s</to><mediaName>%s</mediaName><mediaTxt>%s</mediaTxt><appId>%s</appId><displayNum>%s</displayNum>\
            <playTimes>%s</playTimes><respUrl>%s</respUrl><userData>%s</userData><maxCallTime>%s</maxCallTime><speed>%s</speed>
            <volume>%s</volume><pitch>%s</pitch><bgsound>%s</bgsound></LandingCall>\
            ''' % (to, mediaName, mediaTxt, self.AppId, displayNum, playTimes,
                   respUrl, userData, maxCallTime, speed, volume, pitch,
                   bgsound)
        if self.BodyType == 'json':
            body = '''{"to": "%s", "mediaName": "%s","mediaTxt": "%s","appId": "%s","displayNum": "%s","playTimes": "%s","respUrl": "%s","userData": "%s","maxCallTime": "%s","speed": "%s","volume": "%s","pitch": "%s","bgsound": "%s"}''' % (
                to, mediaName, mediaTxt, self.AppId, displayNum, playTimes,
                respUrl, userData, maxCallTime, speed, volume, pitch, bgsound)
        req.add_data(body)
        data = ''
        try:
            res = urllib3.urlopen(req)
            data = res.read()
            res.close()

            if self.BodyType == 'json':
                # json格式
                locations = json.loads(data)
            else:
                # xml格式
                xtj = xmltojson()
                locations = xtj.main(data)
            if self.Iflog:
                self.log(url, body, data)
            return locations
        except Exception:
            if self.Iflog:
                self.log(url, body, data)
            return {'172001': '网络错误'}
Пример #19
0
def save_comments_data(obj_id, next_url):
    print("In Save Comments Data")
    urllike = "https://graph.facebook.com/%s?fields=comments.limit(1000)&access_token=%s" % (
        obj_id, token)
    while True:
        req = urllib3.Request(urllike, None, headers)
        req2 = urllib3.urlopen(req)
        page = req2.read()
        page = str(page)
        page = page.replace("false", "False")
        data = ast.literal_eval(page)
        try:
            base = data['comments']['data']
        except KeyError:
            base = data['data']
        length = len(base)
        for num in xrange(length):
            user_id = base[num]['from']['id']
            user_name = base[num]['from']['name']
            message = base[num]['message']
            created_time = base[num]['created_time']
            if "'" in message:
                message = str(message)
                message = message.replace("'", " ")
            if "'" in user_name:
                user_name = str(user_name)
                user_name = user_name.replace("'", " ")
            cnx = mysql.connector.connect(host='127.0.0.1',
                                          database='DB',
                                          user='******',
                                          password='******')
            cursor = cnx.cursor()
            # sql Query
            query = (
                "insert INTO commentdetails(objectID, user_id, user_name, message, created_time)   VALUES('"
                + obj_id + "', '" + user_id + "', '" + user_name + "','" +
                message + "','" + created_time + "');")
            try:
                cursor.execute(query)
            except:
                print("Error here %s : %s") % (user_name, user_id)
            cnx.commit()
            cursor.close()
            cnx.close()
        print("heeee")
        try:
            urllike = data['comments']['paging']['next']
        except KeyError:
            try:
                urllike = data['paging']['next']
            except:
                get_post(next_url)
Пример #20
0
def download(url, user_agent='wswp', num_retries=2):
    print('Downloading:', url)
    headers = {'User-agent': user_agent}
    request = urllib3.Request(url, headers=headers)
    try:
        html = urllib3.urlopen(request).read()
    except urllib3.URLERROR as e:
        print('Download Error:', e.reason)
    html = None
    if num_retries > 0:
        if hasattr(e, 'code') and 500 <= e.code < 600:
            return download(url, num_retries - 1)
    return html
Пример #21
0
def content_test(url, badip):
    try:
        request = urllib3.Request(url)
        opened_request = urllib3.build_opener().open(request)
        html_content = opened_request.read()
        retcode = opened_request.code

        matches = retcode == 200
        matches = matches and re.findall(badip, html_content)

        return len(matches) == 0
    except:
        return False
Пример #22
0
def get_para(wlink):
    msg = ''
    try:
        page_request = urllib3.Request(wlink)
        page_request.add_header('User-agent', 'Mozilla/5.0')
        page = urllib3.urlopen(page_request)
    except IOError:
        msg = 'No hay articulos en Wikipedia, tal vez quieras buscarlo en Google!'

    else:
        msg = wlink

    return msg
Пример #23
0
    def voiceVerify(self, verifyCode, playTimes, to, displayNum, respUrl, lang,
                    userData):

        self.accAuth()
        nowdate = datetime.datetime.now()
        self.Batch = nowdate.strftime("%Y%m%d%H%M%S")
        # 生成sig
        signature = self.AccountSid + self.AccountToken + self.Batch
        sig = md5.new(signature).hexdigest().upper()
        # 拼接URL
        url = "https://" + self.ServerIP + ":" + self.ServerPort + "/" + self.SoftVersion + "/Accounts/" + self.AccountSid + "/Calls/VoiceVerify?sig=" + sig
        # 生成auth
        src = self.AccountSid + ":" + self.Batch
        auth = base64.encodestring(src).strip()
        req = urllib3.Request(url)
        self.setHttpHeader(req)

        req.add_header("Authorization", auth)

        # 创建包体
        body = '''<?xml version="1.0" encoding="utf-8"?><VoiceVerify>\
            <appId>%s</appId><verifyCode>%s</verifyCode><playTimes>%s</playTimes><to>%s</to><respUrl>%s</respUrl>\
            <displayNum>%s</displayNum><lang>%s</lang><userData>%s</userData></VoiceVerify>\
            ''' % (self.AppId, verifyCode, playTimes, to, respUrl, displayNum,
                   lang, userData)
        if self.BodyType == 'json':
            # if this model is Json ..then do next code
            body = '''{"appId": "%s", "verifyCode": "%s","playTimes": "%s","to": "%s","respUrl": "%s","displayNum": "%s","lang": "%s","userData": "%s"}''' % (
                self.AppId, verifyCode, playTimes, to, respUrl, displayNum,
                lang, userData)
        req.add_data(body)
        data = ''
        try:
            res = urllib3.urlopen(req)
            data = res.read()
            res.close()

            if self.BodyType == 'json':
                # json格式
                locations = json.loads(data)
            else:
                # xml格式
                xtj = xmltojson()
                locations = xtj.main(data)
            if self.Iflog:
                self.log(url, body, data)
            return locations
        except Exception:
            if self.Iflog:
                self.log(url, body, data)
            return {'172001': '网络错误'}
Пример #24
0
def get_ip_arr():
    gc.enable()
    try:
        url = 'http://vtp.daxiangdaili.com/ip/?tid=559609709731038&num=2000&delay=1&protocol=https'
        headers = {"User-Agent": "Mozilla/5.0"}
        req = urllib3.Request(url, headers=headers)
        res = urllib3.urlopen(req, timeout=20)
        res = res.read()
        ips_arr = res.split('\r\n')
        print(ips_arr)
        return ips_arr
    except Exception as e:
        cb_print('ip_arr_error:{}'.format(e))
    gc.collect()
Пример #25
0
def get_access_token():
    url = 'https://oapi.dingtalk.com/gettoken?corpid=%s&corpsecret=%s' % (
        corp_id, corp_secret)
    request = urllib3.Request(url)
    response = urllib3.urlopen(request)
    response_str = response.read()
    response_dict = json.loads(response_str)
    error_code_key = "errcode"
    access_token_key = "access_token"
    if response_dict.has_key(error_code_key) and response_dict[
            error_code_key] == 0 and response_dict.has_key(access_token_key):
        return response_dict[access_token_key]
    else:
        return '测试11111111111'
Пример #26
0
 def doGet(self, url, data={}, header={}):
     data = urllib.urlencode(data)
     geturl = url + "?" + data
     request = urllib3.Request(geturl)
     for (key,val) in header:
         request.add_header(key, val)
     try:
         response = urllib3.urlopen(request)
     except urllib3.HTTPError as e:
         print(e.code)
     except urllib3.URLError as e:
         print(e.reason)
     else:
         print(response.read())
Пример #27
0
 def get_rep_list(self):
     alientvault_resp = ""
     try:
         req = urllib3.Request(
             'http://reputation.alienvault.com/reputation.data')
         req.add_header(
             'User-agent',
             'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'
         )
         response = urllib3.urlopen(req)
         alientvault_resp = response.read()
     except Exception as e:
         self._is_rep_list = False
     return alientvault_resp
Пример #28
0
def deploy():
    opts, args = parse_opts()
    if not inside_project():
        _log("Error: no Scrapy project found in this location")
        sys.exit(1)

    _delete_old_package()

    urllib3.install_opener(urllib3.build_opener(HTTPRedirectHandler))

    if opts.list_targets:
        for name, target in _get_targets().items():
            print ("%-20s %s" % (name, target['url']))
        return

    if opts.list_projects:
        target = _get_target(opts.list_projects)
        req = urllib3.Request(_url(target, 'listprojects.json'))
        _add_auth_header(req, target)
        f = urllib3.urlopen(req)
        projects = json.loads(f.read())['projects']
        print (os.linesep.join(projects))
        return

    tmpdir = None

    # build egg only
    if opts.build_egg:
        egg, tmpdir = _build_egg()
        _log("Writing egg to %s" % opts.build_egg)
        shutil.copyfile(egg, opts.build_egg)
    elif opts.deploy_all_targets:
        version = None
        for name, target in _get_targets().items():
            if version is None:
                version = _get_version(target, opts)
            _build_egg_and_deploy_target(target, version, opts)
    else: # buld egg and deploy
        target_name = _get_target_name(args)
        target = _get_target(target_name)
        version = _get_version(target, opts)
        exitcode, tmpdir = _build_egg_and_deploy_target(target, version, opts)

    if tmpdir:
        if opts.debug:
            _log("Output dir not removed: %s" % tmpdir)
        else:
            shutil.rmtree(tmpdir)
            _delete_old_package()
Пример #29
0
def request_until_succeed(url):
    req = urllib3.Request(url)
    success = False
    while success is False:
        try:
            response = urllib3.urlopen(req)
            if response.getcode() == 200:
                success = True
        except Exception as e:
            print(e)
            time.sleep(5)

            print("Error for URL %s: %s" % (url, datetime.datetime.now()))

    return response.read()
    def redeem_trusted_ticket(self, view_to_redeem, trusted_ticket, site='default'):
        trusted_view_url = "{}/trusted/{}".format(self.tableau_server_url, trusted_ticket)
        if site.lower() != 'default':
            trusted_view_url += "/t/{}/views/{}".format(site, view_to_redeem)
        else:
            trusted_view_url += "/views/{}".format(view_to_redeem)

        opener = urllib3.build_opener(urllib3.HTTPHandler)
        request = urllib3.Request(trusted_view_url)
        try:
            response = opener.open(request)
        except urllib3.HTTPError as e:
            if e.code >= 500:
                raise
            raw_error_response = e.fp.read()