def cleanup(url): url = _follow(url) # remove trackers params try: urlp = urlparse(url) # cleanup query param query = parse_qsl(urlp.query) # only if query is non empty and we manage to parse fragment as # key/value if urlp.query and query: for annoying in ANNOYING_PARAMS: query = [(x, y) for x, y in query if not x.startswith(annoying)] urlp = urlp._replace( query=urlencode(query), ) # cleanup fragment param fragment = parse_qsl(urlp.fragment) # only if fragments is non empty and we manage to parse fragment as # key/value if urlp.fragment and fragment: for annoying in ANNOYING_PARAMS: fragment = [(x, y) for x, y in fragment if not x.startswith(annoying)] urlp = urlp._replace( fragment=urlencode(fragment), ) url = urlp.geturl() except Exception: app.logger.exception("Problem cleaning url %s", url) app.logger.info("Final url %s", url) return url
def facebook_request(self, path, callback, access_token=None, post_args=None, **args): """Fetches the given relative API path, e.g., "/btaylor/picture" If the request is a POST, ``post_args`` should be provided. Query string arguments should be given as keyword arguments. An introduction to the Facebook Graph API can be found at http://developers.facebook.com/docs/api Many methods require an OAuth access token which you can obtain through `~OAuth2Mixin.authorize_redirect` and `get_authenticated_user`. The user returned through that process includes an ``access_token`` attribute that can be used to make authenticated requests via this method. Example usage: ..testcode:: class MainHandler(tornado.web.RequestHandler, tornado.auth.FacebookGraphMixin): @tornado.web.authenticated @tornado.gen.coroutine def get(self): new_entry = yield self.facebook_request( "/me/feed", post_args={"message": "I am posting from my Tornado application!"}, access_token=self.current_user["access_token"]) if not new_entry: # Call failed; perhaps missing permission? yield self.authorize_redirect() return self.finish("Posted a message!") .. testoutput:: :hide: The given path is relative to ``self._FACEBOOK_BASE_URL``, by default "https://graph.facebook.com". .. versionchanged:: 3.1 Added the ability to override ``self._FACEBOOK_BASE_URL``. """ url = self._FACEBOOK_BASE_URL + path all_args = {} if access_token: all_args["access_token"] = access_token all_args.update(args) if all_args: url += "?" + urllib_parse.urlencode(all_args) callback = functools.partial(self._on_facebook_request, callback) http = self.get_auth_http_client() if post_args is not None: http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args), callback=callback) else: http.fetch(url, callback=callback)
def test_login_failed_empty(self): post_data = {'action': 'Login', 'email': '', 'password': '******'} body = urlencode(post_data) self.http_client.fetch(self.get_url('/login/email'), self.stop, method='POST', body=body, follow_redirects=False) response = self.wait() self.assertEqual(json_decode(response.body)['status'], 'failed') self.assertEqual(json_decode(response.body)['error'], 'Email and password are mandatory') post_data = {'action': 'Login', 'email': 'foo', 'password': ''} body = urlencode(post_data) self.http_client.fetch(self.get_url('/login/email'), self.stop, method='POST', body=body, follow_redirects=False) response = self.wait() self.assertEqual(json_decode(response.body)['status'], 'failed') self.assertEqual(json_decode(response.body)['error'], 'Email and password are mandatory')
def WriteData(self, data): """ Write observation data to server :param data: dictionary with observation data :returns: server answer or None (if error) """ par = {} if data is None or self.DropData(data): logging.warning(" empty or inappropiate data not written") return # add datetime and/or id data = self.ExtendData(data) for key, val in data.items(): if self.filt is None or key in self.filt: par[key] = self.StrVal(val) if self.mode == 'GET': #res = urlopen(self.url + '?' + urlencode(par)).read() print(self.url + '?' + urlencode(par)) try: res = urlopen(self.url + '?' + urlencode(par)) print(res) res = res.read() print(res) except: res = None else: try: d = urlencode(par) req = Request(self.url, d) print(req) res = urlopen(req).read() print(res) except: res = None return res
def get_user_work_page(self): self.userId = input('Enter User id: ') self.userDirName = join(self.imgStoreDirName, self.pixiv, self.userId) if self.userId.isdigit(): if not exists(self.userDirName): makedirs(self.userDirName) else: print('Wrong id.') raise SystemExit(1) get_value = {'id': self.userId, 'type': 'all', 'p': 1 } get_data = urlencode(get_value) work_url = self.memIllUrl + get_data print("Load the page 1...") work_page = self.url_open(work_url) pattern = compile('class="count-badge">(.*?)</span>') count_num = search(pattern, work_page) count_num = int(count_num.group(1)[:-1]) total_page = ceil(count_num / 20) yield work_page for i in range(2, total_page + 1): get_value['p'] = i get_data = urlencode(get_value) work_url = self.memIllUrl + get_data print("Load the page %d..." % i) work_page = self.url_open(work_url) yield work_page
def request_page(self, p, action="GET", **kvargs): conn = http.client.HTTPConnection(self.serv+".pokemon-gl.com") url = "/api/?" headers = {"Cookie": "PMDSUSSID={}; locale=en".format(self.PMDSUSSID)} get = [("p", p)] #if member_id != None: # get.append(("member_savedata_id", member_id)) if self.token != None: get.append(("token", self.token)) get += kvargs.items() if debug: print(get) if action == "GET": url += urlencode(get) data = None elif action == "POST": headers["Content-type"]='application/x-www-form-urlencoded' data = urlencode(get) conn.request(action, url, data, headers) r = conn.getresponse() data = r.read().decode("utf-8") tree = json.loads(data) if 'error' in tree: error = tree['error'] raise RuntimeError(error['code'], error['mess'], error['details']) if debug: print(tree) return tree
def _photo_item(users, user, api_item): """Parses a photo item.""" if api_item["type"] == "photo": title = "новые фотографии" photos = api_item["photos"] get_photo_url = lambda photo: _vk_url("feed?" + urlencode({ "section": "photos", "z": "photo{owner_id}_{photo_id}/feed1_{source_id}_{timestamp}".format( owner_id=photo["owner_id"], photo_id=photo["pid"], source_id=api_item["source_id"], timestamp=api_item["date"])})) elif api_item["type"] == "photo_tag": title = "новые отметки на фотографиях" photos = api_item["photo_tags"] get_photo_url = lambda photo: _vk_url("feed?" + urlencode({ "z": "photo{owner_id}_{photo_id}/feed3_{source_id}_{timestamp}".format( owner_id=photo["owner_id"], photo_id=photo["pid"], source_id=api_item["source_id"], timestamp=api_item["date"])})) else: raise Error("Logical error.") item = { "title": user["name"] + ": " + title, "text": "", } for photo in photos[1:]: url = get_photo_url(photo) item.setdefault("url", url) item["text"] += _block(_link(url, _image(photo["src_big"]))) if photos[0] > len(photos) - 1: item["text"] += _block("[показаны не все фотографии]") return item
def method(self, url, method="GET", parameters=None, timeout=None): log.info('Making {0} request to {1} with parameters {2}'.format(method, url, parameters)) method_url = urljoin(self.url, url) if method == "GET": if not parameters: parameters = dict() parameters['format'] = self.format parameters['auth_token'] = self.token query_string = urlencode(parameters) request_data = None else: query_parameters = dict() query_parameters['auth_token'] = self.token query_string = urlencode(query_parameters) if parameters: request_data = urlencode(parameters).encode('utf-8') else: request_data = None method_url = method_url + '?' + query_string log.debug('Method URL: {0}'.format(method_url)) req = self.RequestWithMethod(method_url, http_method=method, data=request_data) response = self.opener.open(req, None, timeout) # TODO Check response status (failures, throttling) return json.loads(response.read().decode('utf-8'))
def total_file_size(slack_token, verbose=False): """ Finds the total size of all files on the slack server :param slack_token: :param verbose: :return: """ params = { 'token': slack_token, 'count': 500, } response = reader(urlopen('https://slack.com/api/files.list?' + urlencode(params))) size = 0 file_ids = [f['id'] for f in load(response)['files']] for file_id in file_ids: params = { 'token': token, 'file': file_id } response = reader(urlopen('https://slack.com/api/files.info?' + urlencode(params))) size += load(response)['file']['size'] mb = size / 1048576 if verbose: print('{0:.2f} MB total'.format(mb)) mb = size / 1048576 return '{0:.2f} MB'.format(mb)
def auth_user(email, password, client_id, scope, opener): request_params = { 'redirect_uri': 'https://oauth.vk.com/blank.html', 'response_type': 'token', 'client_id': client_id, 'display': 'mobile', 'scope': ','.join(scope), 'v': API_VERSION } base_auth_url = 'https://oauth.vk.com/authorize' params = list(request_params.items()) params = urlencode(params).encode('utf-8') response = opener.open(base_auth_url, params) doc = response.read().decode(encoding='utf-8', errors='replace') parser = FormParser() parser.feed(doc) parser.close() if (not parser.form_parsed or parser.url is None or "pass" not in parser.params or "email" not in parser.params): raise RuntimeError("Something wrong") parser.params["email"] = email parser.params["pass"] = password if parser.method == "POST": params = urlencode(parser.params).encode('utf-8') response = opener.open(parser.url, params) else: raise NotImplementedError("Method '%s'" % parser.method) doc = response.read().decode(encoding='utf-8', errors='replace') return doc, response.geturl()
def delete_files(file_ids, slack_token, verbose=False): """ Deletes all files with IDs matching the given list :param file_ids: :param slack_token: :param verbose: """ size = 0 count = 0 num_files = len(file_ids) for file_id in file_ids: count += 1 params = { 'token': slack_token, 'file': file_id } response = reader(urlopen('https://slack.com/api/files.info?' + urlencode(params))) size += load(response)['file']['size'] response = reader(urlopen('https://slack.com/api/files.delete?' + urlencode(params))) ok = load(response)['ok'] mb = size / 1048576 if verbose: print("{0} of {1} - {2} {3} ... {4:.2f} MB saved".format(count, num_files, file_id, ok, mb))
def MsgAddCard(self, chat_id): self.p_sendMessage['text'] = "Enter card number [xxxxxxxxxxx]:" self.p_sendMessage['chat_id'] = str(chat_id) udata = urlencode(self.p_sendMessage).encode() req = Request(self.sendMessage, udata) resp = urlopen(req) bNum = False while not bNum: udata = urlencode(self.p_getUpdates).encode() req = Request(self.getUpdates, udata) resp = urlopen(req).read().decode() data = json.loads(resp).get('result')[0] if data.get('update_id') > self.update_id: self.update_id = data.get('update_id') bNum = True if self.CheckCardNum(data.get('message').get('text')): if chat_id in self.cards: self.cards[chat_id].append(data.get('message').get('text')) else: self.cards[chat_id] = [] self.cards[chat_id].append(data.get('message').get('text')) self.p_sendMessage['text'] = "Card " + data.get('message').get('text') + " successfully added!" self.p_sendMessage['chat_id'] = str(chat_id) udata = urlencode(self.p_sendMessage).encode() req = Request(self.sendMessage, udata) resp = urlopen(req) else: self.p_sendMessage['text'] = "This doesn't look like card number!" self.p_sendMessage['chat_id'] = str(chat_id) udata = urlencode(self.p_sendMessage).encode() req = Request(self.sendMessage, udata) resp = urlopen(req)
def set(self, pagepath, text, comment=None): """Inputs text into the wiki input text box. Parameters ---------- pagepath : :class:`str` Wiki page to update. text : :class:`str` The wiki text. comment : :class:`str`, optional A comment on the change. """ response = self.opener.open(self.url+"/wiki/"+pagepath+"?action=edit") if self._debug: print(response.info()) parser = SimpleWikiHTMLParser('version') parser.feed(response.read().decode('utf-8')) response.close() postdata = {'__FORM_TOKEN': self._form_token, 'from_editor': '1', 'action': 'edit', 'version': parser.search_value, 'save': 'Submit changes', 'text': CRLF(text)} if comment is not None: postdata['comment'] = CRLF(comment) if self._debug: print(urlencode(postdata)) response = self.opener.open(self.url+"/wiki/"+pagepath, urlencode(postdata).encode('utf-8')) response.close() return
def send_feedback(dsn, event_id, name, email, comment, timeout): """Send feedback, blocking. Args: dsn (str): The DSN event_id (str): The event ID this feedback should be attached to name (str): The user name email (str): The user email comment (str): The feedback text timeout (float): The timeout for this request Raises: SentryError: In case of timeout or other errors """ name = str(name).encode("utf-8") email = str(email).encode("utf-8") comment = str(comment).encode("utf-8") data = urlencode( [('name', name), ('email', email), ('comments', comment)]) if not isinstance(data, bytes): # py3 data = data.encode("utf-8") headers = {"Referer": "https://quodlibet.github.io"} params = urlencode([("dsn", dsn), ("eventId", event_id)]) try: req = Request( "https://sentry.io/api/embed/error-page/?" + params, data=data, headers=headers) urlopen(req, timeout=timeout).close() except EnvironmentError as e: raise SentryError(e)
def tag_text(self, text): """Tag the text with proper named entities token-by-token. :param text: raw text strig to tag :returns: tagged text in given output format """ for s in ("\f", "\n", "\r", "\t", "\v"): # strip whitespaces text = text.replace(s, "") text += "\n" # ensure end-of-line with http_connection(self.host, self.port) as c: headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"} if self.classifier: params = urlencode( { "input": text, "outputFormat": self.oformat, "preserveSpacing": self.spacing, "classifier": self.classifier, } ) else: params = urlencode({"input": text, "outputFormat": self.oformat, "preserveSpacing": self.spacing}) try: c.request("POST", self.location, params, headers) response = c.getresponse() tagged_text = response.read() except httplib.HTTPException as e: print("Failed to post HTTP request.") raise e return tagged_text
def tag_text(self, text): """Tag the text with proper named entities token-by-token. :param text: raw text strig to tag :returns: tagged text in given output format """ for s in ('\f', '\n', '\r', '\t', '\v'): #strip whitespaces text = text.replace(s, '') text += '\n' #ensure end-of-line with http_connection(self.host, self.port) as c: headers = {'Content-type': 'application/x-www-form-urlencoded', 'Accept' : 'text/plain'} if self.classifier: params = urlencode( {'input': text, 'outputFormat': self.oformat, 'preserveSpacing': self.spacing, 'classifier': self.classifier}) else: params = urlencode( {'input': text, 'outputFormat': self.oformat, 'preserveSpacing': self.spacing}) try: c.request('POST', self.location, params, headers) response = c.getresponse() tagged_text = response.read() except httplib.HTTPException as e: print("Failed to post HTTP request.") raise e return tagged_text
def method(self, url, method="GET", parameters=None, timeout=None): method_url = urljoin(self.url, url) if method == "GET": if not parameters: parameters = dict() parameters['format'] = self.format parameters['auth_token'] = self.token query_string = urlencode(parameters) request_data = None else: query_parameters = dict() query_parameters['auth_token'] = self.token query_string = urlencode(query_parameters) if parameters: request_data = urlencode(parameters) else: request_data = None method_url = method_url + '?' + query_string if method == "POST": req = self.RequestWithMethod(method_url, http_method=method, data=request_data.encode('utf-8')) if method == "GET": req = self.RequestWithMethod(method_url, http_method=method, data=request_data) response = self.opener.open(req, None, timeout).read() return json.loads(response.decode('utf-8'))
def request(self, uri, *, method='GET', query=None, headers={}, body=None): conn = self.connection() assert method.isidentifier(), method assert uri.startswith('/'), uri if query: if '?' in uri: uri += '&' + urlencode(query) else: uri += '?' + urlencode(query) headers = headers.copy() statusline = '{} {} HTTP/1.1'.format(method.upper(), uri) lines = [statusline] if isinstance(body, dict): body = urlencode(body) if isinstance(body, str): body = body.encode('utf-8') # there are no other encodings, right? if body is not None: clen = len(body) else: clen = 0 body = b'' headers['Content-Length'] = clen for k, v in headers.items(): lines.append('{}: {}'.format(k, str(v))) lines.append('') lines.append('') buf = '\r\n'.join(lines).encode('ascii') return self.response_class(*conn.request(buf + body).get())
def post(self, url, data, accept = "*/*", charset = "UTF-8", referer = ""): curl = pycurl.Curl() print("post: %s", url) print("data: %s", urlencode(data)) buf = BytesIO() try: curl.setopt(pycurl.COOKIEFILE, CookieFile) curl.setopt(pycurl.COOKIEJAR, CookieFile) curl.setopt(pycurl.FOLLOWLOCATION, 1) # 允许跟踪来源 curl.setopt(pycurl.MAXREDIRS, 5) # 设置最大重定向次数 curl.setopt(pycurl.TIMEOUT, 80) # 连接超时设置 # curl.setopt(pycurl.VERBOSE, 1) # verbose curl.setopt(pycurl.WRITEFUNCTION, buf.write) curl.setopt(pycurl.POSTFIELDS, urlencode(data)) curl.setopt(pycurl.URL, url) curl.setopt(pycurl.HTTPHEADER, ["Accept: {0}", "Accept-Charset: {1}", "Referer: {2}".format(accept, charset, referer)]) curl.perform() for line in curl.getinfo(pycurl.INFO_COOKIELIST): self._parseCookie(line) result = buf.getvalue() # 得到缓冲区的数据 buf.close() curl.close() del buf del curl return self.bytesToStr(result) except Exception as e: print(e) del buf del curl return ""
def __build_params(self): # See https://developers.facebook.com/docs/reference/api/batch/ # for documentation on how the batch api is supposed to work. batch = [] all_files = [] for request in self.__api_calls: payload = {'method': request.method} if not request.ignore_result: payload['omit_response_on_success'] = False if request.name: payload['name'] = request.name if request.method in ['GET', 'DELETE']: payload['relative_url'] = request.path+'?'+urlencode(request.params) elif request.method == 'POST': payload['relative_url'] = request.path files = [] params = {} for key, value in request.params.iteritems(): if isinstance(value, FileType): all_files.append(value) files.append('file%s' % (len(all_files) - 1)) else: params[key] = value payload['body'] = urlencode(params) payload['attached_files'] = ','.join(files) batch.append(payload) params = {'batch':json.dumps(batch)} for i, f in enumerate(all_files): params['file%s' % i] = f return params
def load_posts(self, *, web_driver: WebDriver = None, **params) -> List[Post]: params.setdefault('owner_id', -self.group_id) if web_driver is None: raw_posts = self.get_all_objects('wall.get', **params) else: open_url('https://vk.com', web_driver) login = web_driver.find_element_by_xpath('//*[@id="index_email"]') login.clear() login.send_keys(self.user_login) password = web_driver.find_element_by_xpath('//*[@id="index_pass"]') password.clear() password.send_keys(self.user_password) web_driver.find_element_by_xpath('//*[@id="index_login_button"]').click() url_parts = list(urlparse('https://vk.com/dev/wall.get')) count = 100 query = {'params[owner_id]': params['owner_id'], 'params[count]': count, 'params[offset]': params.get('offset', 0), 'params[filter]': params.get('filter', 'owner'), 'params[fields]': params.get('fields', ''), 'params[v]': self.api_version} url_parts[4] = urlencode(query) url = urlunparse(url_parts) response = parse_from_vk_dev(url, web_driver)['response'] total_count = response['count'] raw_posts = response['items'] while len(raw_posts) < total_count: query['params[offset]'] += count url_parts[4] = urlencode(query) url = urlunparse(url_parts) response = parse_from_vk_dev(url, web_driver)['response'] raw_posts += response['items'] return [Post.from_raw(raw_post) for raw_post in raw_posts]
def __getChannelId(self): """ Obtain channel id for channel name, if present in ``self.search_params``. """ if not self.search_params.get("channelId"): return api_fixed_url = "https://www.googleapis.com/youtube/v3/channels?part=id&maxResults=1&fields=items%2Fid&" url = api_fixed_url + urlencode({"key": self.api_key, "forUsername": self.search_params["channelId"]}) get = requests.get(url).json() try: self.search_params["channelId"] = get['items'][0]['id'] return # got it except IndexError: pass # try searching now... api_fixed_url = "https://www.googleapis.com/youtube/v3/search?part=snippet&type=channel&fields=items%2Fid&" url = api_fixed_url + urlencode({"key": self.api_key, "q": self.search_params['channelId']}) get = requests.get(url).json() try: self.search_params["channelId"] = get['items'][0]['id']['channelId'] except IndexError: del self.search_params["channelId"] # channel not found
def suggest(context, request): text = '' result = { '@id': '/suggest/?' + urlencode({'q': text}), '@type': ['suggest'], 'title': 'Suggest', '@graph': [], } if 'q' in request.params: text = request.params.get('q', '') else: return [] es = request.registry['snp_search'] query = { "suggester": { "text": text, "completion": { "field": "name_suggest", "size": 10 } } } try: results = es.suggest(index='annotations', body=query) except: return {} else: result['@id'] = '/suggest/?' + urlencode({'q': text}) result['@graph'] = [] for item in results['suggester'][0]['options']: if not any(x in item['text'] for x in ['(C. elegans)','(mus musculus)','(D. melanogaster)']): result['@graph'].append(item) return result
def twitter_request(self, path, callback=None, access_token=None, post_args=None, **args): """Fetches the given API path, e.g., ``statuses/user_timeline/btaylor`` The path should not include the format or API version number. (we automatically use JSON format and API version 1). If the request is a POST, ``post_args`` should be provided. Query string arguments should be given as keyword arguments. All the Twitter methods are documented at http://dev.twitter.com/ Many methods require an OAuth access token which you can obtain through `~OAuthMixin.authorize_redirect` and `~OAuthMixin.get_authenticated_user`. The user returned through that process includes an 'access_token' attribute that can be used to make authenticated requests via this method. Example usage:: class MainHandler(tornado.web.RequestHandler, tornado.auth.TwitterMixin): @tornado.web.authenticated @tornado.web.asynchronous @tornado.gen.coroutine def get(self): new_entry = yield self.twitter_request( "/statuses/update", post_args={"status": "Testing Tornado Web Server"}, access_token=self.current_user["access_token"]) if not new_entry: # Call failed; perhaps missing permission? yield self.authorize_redirect() return self.finish("Posted a message!") """ if path.startswith('http:') or path.startswith('https:'): # Raw urls are useful for e.g. search which doesn't follow the # usual pattern: http://search.twitter.com/search.json url = path else: url = self._TWITTER_BASE_URL + path + ".json" # Add the OAuth resource request signature if we have credentials if access_token: all_args = {} all_args.update(args) all_args.update(post_args or {}) method = "POST" if post_args is not None else "GET" oauth = self._oauth_request_parameters( url, access_token, all_args, method=method) args.update(oauth) if args: url += "?" + urllib_parse.urlencode(args) http = self.get_auth_http_client() http_callback = self.async_callback(self._on_twitter_request, callback) if post_args is not None: http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args), callback=http_callback) else: http.fetch(url, callback=http_callback)
def asyncServe(privmsg, query, yearhint): if yearhint: params = urlencode({'title': query, 'sort': 'num_votes,desc', 'release_date': "%04i-06-01,%04i-06-01"%(yearhint-1, yearhint+1)}) else: params = urlencode({'title': query, 'sort': 'num_votes,desc'}) url = "http://akas.imdb.com/search/title?" + params debug("int_imdb", "url", url) html = urllib.request.urlopen(url).read(50000).decode("iso-8859-1") html = html.partition('<table class="results">')[2] if not html: debug("int_imdb", "result-table not found.") return ttid = re.search( r'<a href="/title/(tt[^/]+)/">', html).group(1) title = htmlDecodeEntites(re.search( r'<a href="/title/tt[^/]+/">([^<]+)</a>', html).group(1)) director = htmlDecodeEntites(re.search( r'Dir: <a[^>]+>([^<]+)</a>', html).group(1)) #outline = re.search( # r'<span class="outline">([^>]+)</span>', html) #if outline: outline = htmlDecodeEntites(outline.group(1)) year = re.search( r'<span class="year_type">\((\d{4})', html).group(1) rating = re.search( r'itemprop="ratingValue">([0-9.]+|-)</span>', html).group(1) privmsg.reply_imore("%s (%s) %s [%s] http://www.imdb.com/title/%s/"%( title, year, director, rating, ttid))
def test_call_app(self): url = reverse('two_factor:twilio_call_app', args=['123456']) response = self.client.get(url) self.assertEqual(response.content, b'<?xml version="1.0" encoding="UTF-8" ?>' b'<Response>' b' <Gather timeout="15" numDigits="1" finishOnKey="">' b' <Say language="en">Hi, this is testserver calling. ' b'Press any key to continue.</Say>' b' </Gather>' b' <Say language="en">You didn\'t press any keys. Good bye.</Say>' b'</Response>') url = reverse('two_factor:twilio_call_app', args=['123456']) response = self.client.post(url) self.assertEqual(response.content, b'<?xml version="1.0" encoding="UTF-8" ?>' b'<Response>' b' <Say language="en">Your token is 1. 2. 3. 4. 5. 6. ' b'Repeat: 1. 2. 3. 4. 5. 6. Good bye.</Say>' b'</Response>') # there is a en-gb voice response = self.client.get('%s?%s' % (url, urlencode({'locale': 'en-gb'}))) self.assertContains(response, '<Say language="en-gb">') # there is no nl voice response = self.client.get('%s?%s' % (url, urlencode({'locale': 'nl-nl'}))) self.assertContains(response, '<Say language="en">')
def test_query_dont_unqoute_twice(): sample_url = "http://base.place?" + urlencode({"a": "/////"}) query = urlencode({"url": sample_url}) full_url = "http://test_url.aha?" + query url = URL(full_url) assert url.query["url"] == sample_url
def api(self, command, args={}): """ Main Api Function - encodes and sends <command> with optional [args] to Poloniex api - raises 'ValueError' if an api key or secret is missing (and the command is 'private'), or if the <command> is not valid - returns decoded json api message """ if self._coaching: self.apiCoach.wait() # check in with the coach args['command'] = command # pass the command global PUBLIC_COMMANDS;global PRIVATE_COMMANDS if command in PRIVATE_COMMANDS: # private? try: if len(self.APIKey) < 2 or len(self.Secret) < 2: raise ValueError("An APIKey and Secret is needed for private api commands!") args['nonce'] = self.nonce post_data = urlencode(args) sign = hmac.new(self.Secret, post_data.encode('utf-8'), hashlib.sha512).hexdigest() headers = {'Sign': sign, 'Key': self.APIKey} ret = requests.post('https://poloniex.com/tradingApi', data=args, headers=headers, timeout=self.timeout) return json.loads(ret.text) except Exception as e:raise e finally:self.nonce+=1 # increment nonce elif command in PUBLIC_COMMANDS: # public? try: ret = requests.post('https://poloniex.com/public?' + urlencode(args), timeout=self.timeout) return json.loads(ret.text) except Exception as e:raise e else:raise ValueError("Invalid Command!")
def request( cls, uri, params={}, client=None, wrapper=FreesoundObject, method='GET', data=False ): p = params if params else {} url = '%s?%s' % (uri, urlencode(p)) if params else uri d = urlencode(data) if data else None headers = {'Authorization': client.header} req = Request(url, d, headers) try: f = urlopen(req) except HTTPError as e: resp = e.read() if e.code >= 200 and e.code < 300: return resp else: raise FreesoundException(e.code, json.loads(resp)) resp = f.read() f.close() result = None try: result = json.loads(resp) except: raise FreesoundException(0, "Couldn't parse response") if wrapper: return wrapper(result, client) return result
def _surlencode(): for k,v in query.items(): if isinstance(v, (list, tuple)): for vsub in v: yield urlencode({ k : vsub }) else: yield urlencode({ k : v })
def close(today_str, day1_str, day2_str, day3_str, day4_str, day5_str, day6_str, day7_str): queryParams = '?' + urlencode({ quote_plus("authKey"): "uZ98I03VR/zZ3zAvdNIVEl8EWJy6llSwLtDAMDlOYkE=", quote_plus("lastModTsBgn"): day1_str, quote_plus("lastModTsEnd"): today_str, quote_plus("state"): "03", quote_plus("pageSize"): "10000", quote_plus("resultType"): "xml" }) request = Request(url + queryParams) request.get_method = lambda: 'GET' response_body = urlopen(request).read() dict = xmltodict.parse(response_body) jsonString = json.dumps(dict['result']['body'], ensure_ascii=False) jsonObj = json.loads(jsonString) local_real_time_close = localWeekData['test_close'] if jsonObj['rows']: cnt = 0 for item in jsonObj['rows']['row']: if item['dcbYmd'] == day1_str or item['dcbYmd'] == day2_str or item['dcbYmd'] == day3_str or item['dcbYmd'] == day4_str or item['dcbYmd'] == day5_str or item['dcbYmd'] == day6_str or item['dcbYmd'] == day7_str: is_exist = local_real_time_close.find_one( {"$and": [{"mgtNo": item['mgtNo']}, {"store_name": item['bplcNm']}]}) if is_exist is None: cnt += 1 city_name = "" category_name = "" category_kor = "" city_split = "" if item['rdnWhlAddr']: city_split = item['rdnWhlAddr'].split(' ')[0] address = item['rdnWhlAddr'] else: city_split = item['siteWhlAddr'].split(' ')[0] address = item['siteWhlAddr'] if city_split == "부산광역시": city_name = "busan" elif city_split == "충청북도": city_name = "chungbuk" elif city_split == "충청남도": city_name = "chungnam" elif city_split == "대구광역시": city_name = "daegu" elif city_split == "대전광역시": city_name = "daejeon" elif city_split == "강원도": city_name = "gangwon" elif city_split == "광주광역시": city_name = "gwangju" elif city_split == "경기도": city_name = "gyeonggi" elif city_split == "경상북도": city_name = "gyeongbuk" elif city_split == "경상남도": city_name = "gyeongnam" elif city_split == "인천광역시": city_name = "incheon" elif city_split == "제주특별자치도": city_name = "jeju" elif city_split == "전라북도": city_name = "jeonbuk" elif city_split == "전라남도": city_name = "jeonnam" elif city_split == "세종특별자치시": city_name = "sejong" elif city_split == "서울특별시": city_name = "seoul" elif city_split == "울산광역시": city_name = "ulsan" open_service_id_split = item['opnSvcId'][0:2] if open_service_id_split == "01": category_name = "health" category_kor = "건강" elif open_service_id_split == "02": category_name = "animal" category_kor = "동물" elif open_service_id_split == "03" or open_service_id_split == "04": category_name = "culture" category_kor = "문화" elif open_service_id_split == "05" or open_service_id_split == "06" or open_service_id_split == "08" or open_service_id_split == "10": category_name = "life" category_kor = "생활" elif open_service_id_split == "07": category_name = "food" category_kor = "식품" elif open_service_id_split == "09": category_name = "environment" category_kor = "환경" elif open_service_id_split == "11": category_name = "other" category_kor = "기타" info = { "mgtNo": item['mgtNo'], "data": "close", "update_date": item['updateDt'], "authorization_date": item['apvPermYmd'], "closed_date": item['dcbYmd'], "store_name": item['bplcNm'], "address": address, "state_code": item['trdStateGbn'], "state": item['trdStateNm'], "open_service_id": item['opnSvcId'], "open_service": item['opnSvcNm'], "detailed_classification": item['uptaeNm'], "city_name": city_name, "category_name": category_name, "category_kor": category_kor, } print(cnt, "close", info) # local_real_time_close = localWeekData['test_close'] # local_real_time_close_id = local_real_time_close.insert_one(info).inserted_id return cnt
url = 'http://news.donga.com/search' param_dict = { 'p' : '', 'query' : KEY_WORD, 'check_news' : '1', 'more' : '1', 'sorting' : '1', # 정확도=3, 최신순=1 'search_date' : '1', 'v1' : '', 'v2' : '', 'range' : '3', } url_all = url + '?' + urlencode(query=param_dict) source_code_from_url = urllib.request.urlopen(url_all) soup = BeautifulSoup(source_code_from_url, 'lxml', from_encoding='utf-8') # select 나 find_all 이나 결과는 똑같다. --> tag list 를 반환한다. content_of_articles = soup.select('p.tit') findings = soup.find_all('p', 'tit') search_md_string = f"## Search Result for '{KEY_WORD}'<br>\n" for i, title in enumerate(soup.find_all('p', 'tit'),1): date_text = title.select('span')[-1].get_text() datetime_format = datetime.datetime.strptime(date_text, '%Y-%m-%d %H:%M') date_new = datetime.datetime.strftime(datetime_format, '%y.%m/%d(%a)')