def authenticate(self, environ): """ This function takes a WSGI environment and authenticates the request returning authenticated user or error. """ method = REQUEST_METHOD(environ) fullpath = url_quote(SCRIPT_NAME(environ)) + url_quote(PATH_INFO(environ)) authorization = AUTHORIZATION(environ) if not authorization: return self.build_authentication() (authmeth, auth) = authorization.split(" ", 1) if 'digest' != authmeth.lower(): return self.build_authentication() amap = {} for itm in auth.split(", "): (k,v) = [s.strip() for s in itm.split("=", 1)] amap[k] = v.replace('"', '') try: username = amap['username'] authpath = amap['uri'] nonce = amap['nonce'] realm = amap['realm'] response = amap['response'] assert authpath.split("?", 1)[0] in fullpath assert realm == self.realm qop = amap.get('qop', '') cnonce = amap.get('cnonce', '') nc = amap.get('nc', '00000000') if qop: assert 'auth' == qop assert nonce and nc except: return self.build_authentication() ha1 = self.authfunc(environ, realm, username) return self.compute(ha1, username, response, method, authpath, nonce, nc, cnonce, qop)
def build_backend_request(request): """ Based on Ian Bicking's WSGIProxy (http://pythonpaste.org/wsgiproxy/) HTTP proxying WSGI application that proxies the exact request given in the environment. All controls are passed through the environment. This connects to the server given in addr:port, and sends the Host header in HTTP_HOST -- they do not have to match. FIXME: Does not add X-Forwarded-For or other standard headers """ env = request['env'] headers = {} for key, value in env.items(): if key.startswith('HTTP_'): key = key[5:].replace('_', '-').title() headers[key] = value path = (url_quote(env.get('SCRIPT_NAME', '')) + url_quote(env.get('PATH_INFO', ''))) if env.get('QUERY_STRING'): path += '?' + env['QUERY_STRING'] if env.get('CONTENT_TYPE'): headers['Content-Type'] = env['CONTENT_TYPE'] if not path.startswith("/"): path = "/" + path request['path'] = path request['headers'] = headers return request
def authenticate(self, environ): """ This function takes a WSGI environment and authenticates the request returning authenticated user or error. """ method = REQUEST_METHOD(environ) fullpath = url_quote(SCRIPT_NAME(environ)) + url_quote(PATH_INFO(environ)) authorization = AUTHORIZATION(environ) if not authorization: return self.build_authentication() (authmeth, auth) = authorization.split(" ", 1) if "digest" != authmeth.lower(): return self.build_authentication() amap = dict(_auth_to_kv_pairs(auth)) try: username = amap["username"] authpath = amap["uri"] nonce = amap["nonce"] realm = amap["realm"] response = amap["response"] assert authpath.split("?", 1)[0] in fullpath assert realm == self.realm qop = amap.get("qop", "") cnonce = amap.get("cnonce", "") nc = amap.get("nc", "00000000") if qop: assert "auth" == qop assert nonce and nc except: return self.build_authentication() ha1 = self.authfunc(environ, realm, username) return self.compute(ha1, username, response, method, authpath, nonce, nc, cnonce, qop)
def proxy_exact_request(environ, start_response): """ HTTP proxying WSGI application that proxies the exact request given in the environment. All controls are passed through the environment. This connects to the server given in SERVER_NAME:SERVER_PORT, and sends the Host header in HTTP_HOST -- they do not have to match. Does not add X-Forwarded-For or other standard headers """ scheme = environ['wsgi.url_scheme'] if scheme == 'http': ConnClass = httplib.HTTPConnection elif scheme == 'https': ConnClass = httplib.HTTPSConnection else: raise ValueError( "Unknown scheme: %r" % scheme) conn = ConnClass('%(SERVER_NAME)s:%(SERVER_PORT)s' % environ) headers = {} for key, value in environ.items(): if key.startswith('HTTP_'): key = key[5:].replace('_', '-').title() headers[key] = value path = (url_quote(environ.get('SCRIPT_NAME', '')) + url_quote(environ.get('PATH_INFO', ''))) if environ.get('QUERY_STRING'): path += '?' + environ['QUERY_STRING'] try: content_length = int(environ.get('CONTENT_LENGTH', '0')) except ValueError: content_length = 0 if content_length: body = environ['wsgi.input'].read(content_length) else: body = '' headers['Content-Length'] = content_length if environ.get('CONTENT_TYPE'): headers['Content-Type'] = environ['CONTENT_TYPE'] if not path.startswith("/"): path = "/" + path try: conn.request(environ['REQUEST_METHOD'], path, body, headers) except socket.error, exc: if exc.args[0] == -2: # Name or service not known exc = httpexceptions.HTTPBadGateway( "Name or service not known (bad domain name: %s)" % environ['SERVER_NAME']) return exc(environ, start_response) raise
def __call__(self, environ, start_response): """ HTTP proxying WSGI application that proxies the exact request given in the environment. All controls are passed through the environment. This connects to the server given in SERVER_NAME:SERVER_PORT, and sends the Host header in HTTP_HOST -- they do not have to match. Does not add X-Forwarded-For or other standard headers """ if isinstance(self.address, basestring): parsed_address = urlparse(self.address) host = parsed_address.hostname port = parsed_address.port ConClass = self._get_conn_class(environ, parsed_address.scheme) conn = ConnClass(parsed_address.hostname, parsed_address.port) else: conn = self._get_conn_class(environ)(*self.address) headers = {} for key, value in environ.items(): if key.startswith('HTTP_'): key = key[5:].replace('_', '-').title() headers[key] = value path = (url_quote(environ.get('SCRIPT_NAME', '')) + url_quote(environ.get('PATH_INFO', ''))) if environ.get('QUERY_STRING'): path += '?' + environ['QUERY_STRING'] try: content_length = int(environ.get('CONTENT_LENGTH', '0')) except ValueError: content_length = 0 if content_length: body = environ['wsgi.input'].read(content_length) else: body = '' headers['Content-Length'] = content_length if environ.get('CONTENT_TYPE'): headers['Content-Type'] = environ['CONTENT_TYPE'] if not path.startswith("/"): path = "/" + path try: conn.request(environ['REQUEST_METHOD'], path, body, headers) except socket.error, exc: if exc.args[0] == -2: return http.BadGateway()(environ, start_response) raise
def index_html(self, REQUEST=None, RESPONSE=None, charset='utf-8', disposition='inline'): """ make it directly viewable when entering the objects URL """ if REQUEST is None: REQUEST = self.REQUEST if RESPONSE is None: RESPONSE = REQUEST.RESPONSE RESPONSE.setHeader('Last-Modified', rfc1123_date(self._p_mtime)) RESPONSE.setHeader('Content-Type', self.getContentType()) RESPONSE.setHeader('Accept-Ranges', 'bytes') if handleIfModifiedSince(self, REQUEST, RESPONSE): return '' length = self.get_size() RESPONSE.setHeader('Content-Length', length) filename = self.getFilename() if filename is not None: if not isinstance(filename, unicode): filename = unicode(filename, charset, errors="ignore") quoted_filename = url_quote(filename.encode("utf8")) filename = IUserPreferredFileNameNormalizer(REQUEST).normalize( filename) header_value = contentDispositionHeader( disposition=disposition, filename=filename) # Add original filename in utf-8, ref to rfc2231 header_value = header_value + "; filename*=UTF-8''" + quoted_filename RESPONSE.setHeader("Content-disposition", header_value) request_range = handleRequestRange(self, length, REQUEST, RESPONSE) return self.getIterator(**request_range)
def _add_playlist(self, **kwargs): """This method is private because the class method on :class:`Playlist` should be used instead :param kwargs: The values for the playlist. See \ `Add Playlist <http://developer.harvestmedia.net/working-with-members-2/add-a-member-playlist/>`_ """ _client = kwargs.get('_client', None) if not _client: raise MissingParameter('You must pass _client to Playlist.add') member_id = kwargs.get('member_id', None) if not member_id: raise MissingParameter('You must pass member_id to Playlist.add') playlist_name = kwargs.get('playlist_name', None) if not playlist_name: raise MissingParameter('You must pass playlist_name to Playlist.add') method_uri = '/addplaylist/{{service_token}}/%(member_id)s/%(playlist_name)s/' % \ {'member_id': member_id, 'playlist_name': url_quote(playlist_name.encode('utf-8'))} xml_root = _client.get_xml(method_uri) playlists = xml_root.find('playlists') if playlists is not None: for playlist_xml in playlists.getchildren(): name = playlist_xml.get('name') if name == playlist_name: return Playlist._from_xml(playlist_xml, _client)
def add_slices(stats, aggregations, base): slice_map = {} for agg_key, agg in aggregations.items(): observations= [] slice_node = { 'dimension': agg_key.replace('.'+ID, ''), 'observation': observations } for bucket in agg['buckets']: item_id = bucket.pop('key') search_page_url = "{base}&{param}={value}".format( base=base, param=agg_key, value=url_quote(item_id)) observation = { 'totalItems': bucket.pop('doc_count'), 'view': {ID: search_page_url}, 'object': self.lookup(item_id) } observations.append(observation) add_slices(observation, bucket, search_page_url) if observations: slice_map[agg_key] = slice_node if slice_map: stats['sliceByDimension'] = slice_map
def __init__(self, git_dir, hg_url): self.hg_url = hg_url self.hg_name = hg_name = url_quote(hg_url, safe="") self.hg_repo_dir = os.path.join(git_dir, "hgremotes", hg_name) if not os.path.exists(self.hg_repo_dir): self.initialize_hg_repo() self.git_repo_dir = os.path.join(self.hg_repo_dir, ".hg", "git")
def api(self, cmd, extras = '', no_session = False): ret = {} for _k1, api_url_k, _clienttype, _v in (('down', 'api_url', 'swjsq', 'do_down_accel'), ('up', 'api_up_url', 'uplink', 'do_up_accel')): if not getattr(self, _v): continue while True: # missing dial_account, (userid), os api_url = getattr(self, api_url_k) # TODO: phasing out time_and url = 'http://%s/v2/%s?%sclient_type=android-%s-%s&peerid=%s&time_and=%d&client_version=android%s-%s&userid=%s&os=android-%s%s' % ( api_url, cmd, ('sessionid=%s&' % self.xl_session) if not no_session else '', _clienttype, APP_VERSION, self.mac, time.time() * 1000, _clienttype, APP_VERSION, self.xl_uid, url_quote("%s.%s%s" % (OS_VERSION, OS_API_LEVEL, DEVICE_MODEL)), ('&%s' % extras) if extras else '', ) try: ret[_k1] = {} ret[_k1] = json.loads(http_req(url, headers = header_api)) break except URLError as ex: uprint("Warning: error during %sapi connection: %s, use portal: %s" % (_k1, str(ex), api_url)) if (_k1 == 'down' and api_url == FALLBACK_PORTAL) or (_k1 == 'up' and api_url == FALLBACK_UPPORTAL): print("Error: can't connect to %s api" % _k1) os._exit(5) if _k1 == 'down': setattr(self, api_url_k, FALLBACK_PORTAL) elif _k1 == 'up': setattr(self, api_url_k, FALLBACK_UPPORTAL) return ret
def _get_links(query): """Return all google result links.""" result = _get_result(SEARCH_URL.format(site=URL, query=url_quote(query))) html = pq(result) return [a.attrib['href'] for a in html('.l')] or \ [a.attrib['href'] for a in html('.r')('a')]
def setup(self): """Setup the instance.""" ldapi_socket = self.run_dir + "/ldapi" self.ldapi_url = "ldapi://" + url_quote(ldapi_socket, "") self.url_list = self.ldapi_url + " " + self.ldap_url os.makedirs(self.conf_slapd_d_dir) os.makedirs(self.run_dir) os.makedirs(self.data_dir) super(FakeAD, self)._setup_config() self._setup_config() # Start the daemon super(FakeAD, self)._start_daemon() # Relax requirement of surname attribute presence in person modlist = [ (ldap.MOD_DELETE, "olcObjectClasses", b"{4}( 2.5.6.6 NAME 'person' DESC 'RFC2256: a person' SUP top " b"STRUCTURAL MUST ( sn $ cn ) MAY ( userPassword $ " b"telephoneNumber $ seeAlso $ description ) )"), (ldap.MOD_ADD, "olcObjectClasses", b"{4}( 2.5.6.6 NAME 'person' DESC 'RFC2256: a person' SUP top " b"STRUCTURAL MUST ( cn ) MAY ( sn $ userPassword $ " b"telephoneNumber $ seeAlso $ description ) )"), ] ldap_conn = ldap.initialize(self.ldapi_url) ldap_conn.simple_bind_s(self.admin_rdn + ",cn=config", self.admin_pw) ldap_conn.modify_s("cn={0}core,cn=schema,cn=config", modlist) ldap_conn.unbind_s() # restart daemon for reloading schema super(FakeAD, self)._stop_daemon() super(FakeAD, self)._start_daemon() # Add data ldap_conn = ldap.initialize(self.ldap_url) ldap_conn.simple_bind_s(self.admin_dn, self.admin_pw) ldap_conn.add_s(self.base_dn, [ ("objectClass", [b"dcObject", b"organization"]), ("o", b"Example Company"), ]) ldap_conn.add_s("cn=Manager," + self.base_dn, [ ("objectClass", b"organizationalRole"), ]) for ou in ("Users", "Groups", "Netgroups", "Services", "Policies"): ldap_conn.add_s("ou=" + ou + "," + self.base_dn, [ ("objectClass", [b"top", b"organizationalUnit"]), ]) ldap_conn.unbind_s() # import data from real AD subprocess.check_call( ["ldapadd", "-x", "-w", self.admin_pw, "-D", self.admin_dn, "-H", self.ldap_url, "-f", "data/ad_data.ldif"], )
def clientside_actions(self, content_doc, log): if self.content_href: href = urlparse.urljoin(log.request.url, self.content_href) url = '%s/.deliverance/subreq?url=%s&action=%s&content=%s&theme=%s' % ( log.request.application_url, url_quote(href), url_quote(self.name), url_quote(str(self.content)), url_quote(str(self.theme))) return [{'mode': 'include', 'callback': url}] if not self.if_content_matches(content_doc, log): return [] content_type, content_els, content_attributes = self.select_elements( self.content, content_doc, theme=False) if not content_els: if self.nocontent == 'abort': ## FIXME: uh oh raise AbortTheme('No content matches content="%s"' % self.content) else: ## FIXME: log return [] theme_type, theme_selector = str(self.theme).split(':', 1) data = {'type': self.name, 'mode': theme_type, 'selector': theme_selector} if content_type == 'attributes' or content_type == 'tag': data['attributes'] = dict(content_els[0].attrib) if content_type == 'tag': data['tag'] = content_els[0].tag elif content_type == 'children': text = [] for el in content_els: text.append(el.text) for child in el: text.append(tostring(child)) data['content'] = ''.join(text) elif content_type == 'elements': text = [] for el in content_els: ## FIXME: sloppy :( el.tail = None text.append(tostring(el)) data['content'] = ''.join(text) return [data]
def transform(row, table): file_data = tag_to_dict(row.filename) absolute_url = url_join(URL_ROUTER_SEARCH, url_quote(file_data['href'])) return {'date': extract_text(row.date), 'description': extract_text(row.description), 'filename': file_data['text'], 'size': extract_text(row.size), 'url': absolute_url, }
def posts(self, subscription_url, count=20): """ return posts of subscriptions """ url = "{subscription_url}{subscription}".format( subscription_url=SUBSCRIPTION_URL, subscription=url_quote(subscription_url, '') ) return self.get_items(url, count)
def mkdtemp(self, prefix): """ Creates a new directory in the document root and returns its path and URI. """ path = mkdtemp(prefix=prefix + "_", dir=self.docroot) uri = joinURL("/", url_quote(os.path.basename(path))) + "/" return (os.path.abspath(path), uri)
def vcal(request): data = {} data['page_title'] = "vCal exports" authenticated_user_string = None if request.user.is_authenticated(): authenticated_user_string = (request.user.email and url_quote(request.user.email) or request.user.username) data['authenticated_user_string'] = authenticated_user_string return jingo.render(request, 'vcal/vcal.html', data)
def get_search_hits(query): """ Get links pointing to answers to a query, out of the results served by a search engine about that query :returns: a list of links, as plain text """ page = Page(SEARCH_URL.format(SITE_URL, url_quote(query))) search_hits = SearchHit(page) return search_hits
def cleanId(id): """'url_quote' turns strange chars into '%xx', which is not a valid char for ObjectManager. Here we encode '%' into '-' (and '-' into '--' as escaping). De-clean is possible; see 'decleanId'. Assumes that id can start with non-alpha(numeric), which is true. """ __traceback_info__ = (id,) if id: # note: we provide the 'safe' param to get '/' encoded return url_quote(id, '').replace('-','--').replace('%','-') return ''
def _get_links(query): search_engine = os.getenv('HOWDOI_SEARCH_ENGINE', 'google') search_url = _get_search_url(search_engine) result = _get_result(search_url.format(URL, url_quote(query))) if _is_blocked(result): _print_err('Unable to find an answer because the search engine temporarily blocked the request. ' 'Please wait a few minutes or select a different search engine.') raise BlockError("Temporary block by search engine") html = pq(result) return _extract_links(html, search_engine)
def get_url(self, sl, tl, qry): http_host = "translate.googleapis.com" if "zh" in tl: http_host = "translate.google.cn" qry = url_quote(qry) url = ( "https://{}/translate_a/single?client=gtx&sl={}&tl={}&dt=at&dt=bd&dt=ex&" "dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&q={}".format( http_host, sl, tl, qry ) ) return url
def translate(self, sl, tl, text, options=None): url = "http://dict.cn/mini.php" req = {} req["q"] = url_quote(text) resp = self.http_get(url, req) if not resp: return res = self.create_translation(sl, tl, text) res["phonetic"] = self.get_phonetic(resp) res["explains"] = self.get_explains(resp) return res
def getBody(self, number, msgid, group): '''Retrieve the body of the article. First need to enter the group, then retrieve with an HEAD the command the headers list and then retrieve the body. Arguments: number : article number msgid : article message-id group : the group the article is in Return: message : it can be used to interact with the user headerList : the headers list, XPN uses it with xpn_src.Article.parse_header_list rawBody : the body retrieved (or phantom article). XPN uses it with xpn_src.Article.set_body (not in the case it is the phantom article) bodyRetrieved : True if the bosy has been successfully retrieved from the server (so rawBody isn't [] or phantom article) ''' bodyRetrieved = False headerList = [] rawBody = [] message, first, last = self._enterGroup(group) if self._isUp(): try: self._addLog("ARTICLE " + number, True) resp, number, id, rawBody = self.serverConnection.article( number) except nntplib.NNTPTemporaryError: message = _("Server error: %s") % (str(sys.exc_info()[1]), ) if str(sys.exc_info()[1])[:1] == "4": #article is not on the server, we use a phantom article link = r"http://groups.google.com/groups?selm=" + url_quote( msgid[1:-1]) rawBody = (_("Server Error: ") + str(sys.exc_info()[1]), "", _("You can try on Google:"), "", link) self._addLog( str(sys.exc_info()[0]) + "," + str(sys.exc_info()[1]), False) except: #every other type of errors message = _("Server error: %s") % (str(sys.exc_info()[1]), ) self._addLog( str(sys.exc_info()[0]) + "," + str(sys.exc_info()[1]), False) else: self._addLog(resp, False) message = _("%s response : %s") % (self.serverAddress, resp) ind = rawBody.index("") headerList = rawBody[:ind] rawBody = rawBody[ind + 1:] bodyRetrieved = True return message, headerList, rawBody, bodyRetrieved
def cleanId(id): """'url_quote' turns strange chars into '%xx', which is not a valid char for ObjectManager. Here we encode '%' into '-' (and '-' into '--' as escaping). De-clean is possible; see 'decleanId'. Assumes that id can start with non-alpha(numeric), which is true. """ __traceback_info__ = (id, ) if id: # note: we provide the 'safe' param to get '/' encoded return url_quote(id, '').replace('-', '--').replace('%', '-') return ''
def __str__(self): """ Returns a string representation of the cookie in the format, eg ``session_id=abc123;Path=/cgi-bin;Domain=.example.com;Version=1`` """ cookie = ['%s=%s' % (self.name, url_quote(str(self.value)))] for cookie_name, att_name in self.attributes: value = getattr(self, att_name, None) if value is not None: cookie.append('%s=%s' % (cookie_name, str(value))) if self.http_only: cookie.append('HttpOnly') return ';'.join(cookie)
def translate(self, sl, tl, text, options=None): url = "http://mall.baicizhan.com/ws/search" req = {} req["w"] = url_quote(text) r = self.http_get(url, req, None) if r: resp = json.loads(r) if not resp: return self._trans["phonetic"] = self.get_phonetic(resp) self._trans["explain"] = self.get_explain(resp) return self._trans
def is_logged_in(self): guid = self.get_secure_cookie('user') if guid: if self.db.users.User(dict(guid=guid)): return func(self) if redirect_to: next = self.request.path if self.request.query: next += '?%s' % self.request.query url = redirect_to + '?next=%s' % url_quote(next) self.redirect(url) else: raise HTTPError(403, "Must be logged in")
def publish_analyst_note(self, title, text, topic=None, entities=None, note_entities=None, context_entities=None, validated_on=None, validation_urls=None, labels=None, source=None, resolve_entities=None): """Publish an Analyst Note Ex: >>> api = ConnectApiClient(app_name='DocTest') >>> res = api.publish_analyst_note("TestNote", "this is the content", ... context_entities=["<entity id>"]) {'document_id': u'doc:<doc id>'} """ params = { "title": title, "text": text, } if topic is not None: params["topic"] = topic if entities is not None: params["entities"] = entities if note_entities is not None: params["note_entities"] = note_entities if context_entities is not None: params["context_entities"] = context_entities if validated_on is not None: params["validated_on"] = validated_on if validation_urls is not None: params["validation_urls"] = validation_urls if labels is not None: params["labels"] = labels qs = {} if source is not None: qs["source"] = url_quote(source) if resolve_entities is not None: qs["resolveEntities"] = "true" if resolve_entities else "false" query_str = "" if len(qs) == 0 else \ "?" + "&".join(["{}={}".format(k, v) for (k, v) in qs.items()]) url = "analystnote/publish" + query_str response = self._query(url, params, method='post') return DotAccessDict(response.result)
def __call__(self, environ, start_response): if isinstance(self.address, basestring): parsed_address = urlparse(self.address) host = parsed_address.hostname port = parsed_address.port ConClass = self._get_conn_class(environ, parsed_address.scheme) conn = ConnClass(parsed_address.hostname, parsed_address.port) else: conn = self._get_conn_class(environ)(*self.address) headers = {} for key, value in environ.items(): if key.startswith('HTTP_'): key = key[5:].replace('_', '-').title() headers[key] = value path = (url_quote(environ.get('SCRIPT_NAME', '')) + url_quote(environ.get('PATH_INFO', ''))) if environ.get('QUERY_STRING'): path += '?' + environ['QUERY_STRING'] try: content_length = int(environ.get('CONTENT_LENGTH', '0')) except ValueError: content_length = 0 if content_length: body = environ['wsgi.input'].read(content_length) else: body = '' headers['Content-Length'] = content_length if environ.get('CONTENT_TYPE'): headers['Content-Type'] = environ['CONTENT_TYPE'] if not path.startswith("/"): path = "/" + path try: conn.request(environ['REQUEST_METHOD'], path, body, headers) except socket.error, exc: if exc.args[0] == -2: return http.BadGateway()(environ, start_response) raise
def translate(self, sl, tl, text, options=None): url = self._cnurl if "zh" in tl else self._url url = url + "?q=" + url_quote(text) headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", } resp = self.http_get(url, None, headers) if not resp: return None res = self.create_translation(sl, tl, text) res["phonetic"] = self.get_phonetic(resp) res["explains"] = self.get_explains(resp) return res
def select_builtin_assignment(lti=lti): assignment_type = request.args.get('assignment_type', None) assignment_id = request.args.get('assignment_id', None) user, roles, course = ensure_canvas_arguments() if not User.is_lti_instructor(roles): return "You are not an instructor in this course." assignment = Assignment.by_builtin(assignment_type, assignment_id, owner_id=user.id, course_id=course.id) assignment_url = url_for('lti_assignments.index', assignment_id=assignment.id, _external=True) print assignment_url encoded_url = url_quote(assignment_url) return jsonify(url=encoded_url)
def test_authenticated_decorator_redirect(self): # if you anonymously click on a link you need to be logged in # to it should redirect you to the login page login_url = self.reverse_url('login') assert self.client.get(login_url).code == 200 url = self.reverse_url('questions') response = self.client.get(url) self.assertEqual(response.code, 302) parsed = urlparse(response.headers['location']) self.assertEqual(parsed.path, login_url) self.assertEqual(parsed.query, 'next=%s' % url_quote(url, safe=''))
def test_settings_redirect(self): url = self.reverse_url('settings') response = self.client.get(url) self.assertEqual(response.code, 302) self.assertTrue(self.reverse_url('login') in \ response.headers['location']) self.assertTrue(response.headers['location']\ .endswith('?next=%s' % url_quote(url))) # with query string response = self.client.get(url, {'name': 'Peter'}) self.assertEqual(response.code, 302) self.assertTrue(self.reverse_url('login') in \ response.headers['location']) self.assertTrue(self.reverse_url('login') in \ response.headers['location']) self.assertTrue('?next=%s' % url_quote(url) in \ response.headers['location']) self.assertTrue(url_quote('?name=Peter') in \ response.headers['location'])
def new_assignment(course_id, lti=lti): name = request.values.get('name', None) or None level = request.values.get('level', None) or None menu = request.values.get('menu', "select") #TODO: change "normal" type to "blockpy" type = request.values.get('type', "normal") assignment = Assignment.new(owner_id=g.user.id, course_id=course_id, type=type, name=name, level=level) launch_type = 'lti_launch_url' if menu != 'embed' else 'iframe' endpoint = 'assignments.load' return jsonify(success=True, redirect=url_for('assignments.load', assignment_id=assignment.id), id=assignment.id, name=assignment.name, type=type, body=strip_tags(assignment.body)[:255], title=assignment.title(), view=url_for('assignments.load', assignment_id=assignment.id, embed=menu == 'embed'), select=url_quote( url_for(endpoint, assignment_id=assignment.id, _external=True, embed=menu == 'embed')) + "&return_type=" + launch_type + "&title=" + url_quote(assignment.title()) + "&text=BlockPy%20Exercise&width=100%25&height=600", edit=url_for('assignments.load', assignment_id=assignment.id, course_id=assignment.course_id), date_modified=assignment.date_modified.strftime( " %I:%M%p on %a %d, %b %Y").replace(" 0", " "))
def documents_a_la_google(request): """put in a link that says _Did you mean: *correction*_""" documents = Document.objects.all() if request.GET.get('q'): q = request.GET.get('q') documents = documents.filter(Q(title__icontains=q) | Q(body__icontains=q)) corrected = spellcorrector_instance.correct(q) print "corrected", repr(corrected) if corrected != q: correction = {'query_string': url_quote(corrected), 'correction': corrected} return render_to_response('documents-a-la-google.html', locals(), context_instance=RequestContext(request))
def translate(self, sl, tl, text, options=None): if "zh" in tl: self._url = "http://cn.bing.com/dict/SerpHoverTrans" url = self._url + "?q=" + url_quote(text) headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", } resp = self.http_get(url, None, headers) if not resp: return self._trans["phonetic"] = self.get_phonetic(resp) self._trans["explain"] = self.get_explain(resp) return self._trans
def translate(self, sl, tl, text): if 'zh' in tl: self._url = 'http://cn.bing.com/dict/SerpHoverTrans' url = self._url + '?q=' + url_quote(text) headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5' } resp = self.http_get(url, None, headers) if not resp: return self._trans['phonetic'] = self.get_phonetic(resp) self._trans['explain'] = self.get_explain(resp) return self._trans
def _get_links(query): """ Zwroc linki dla zadanego pytania """ """ Zwraca url przegladarki. Taki link odnosi sie do strony oraz tego co na tej stronie szukamy. Pierwszy parametr to URL stackoverflow. """ search_engine = os.getenv('HOWDOI_SEARCH_ENGINE', 'google') search_url = _get_search_url(search_engine) # Odpowiedz na zapytanie ktore jest textem result = _get_result(search_url.format(URL, url_quote(query))) # bibliotek pyquery ladnie zamienia przydka odpowiedz na html html = pq(result) # zwraca linki z pobranego htmla. Ok, ale po co search engine ? W odpowiedzi pojawia sie search engine, ktory trzeba odseparowac ???? return _extract_links(html, search_engine)
def test_releases_urlquoting_hash(self, mapp, simpypi): mapp.create_and_login_user('mirror10') indexconfig = dict(type="mirror", mirror_url=simpypi.simpleurl, mirror_cache_expiry=0) mapp.create_index("mirror", indexconfig=indexconfig) mapp.use("mirror10/mirror") url_quoted_pkgver = "%s#sha256=1234" % url_quote( 'pkg-1!2017.4+devpi.zip') assert url_quoted_pkgver == 'pkg-1%212017.4%2Bdevpi.zip#sha256=1234' simpypi.add_release('pkg', pkgver=url_quoted_pkgver) result = mapp.getreleaseslist("pkg") assert len(result) == 1 assert result[0].endswith( '/mirror10/mirror/+f/123/4/pkg-1!2017.4+devpi.zip')
def test_releases_urlquoting(self, mapp, simpypi): mapp.create_and_login_user('mirror9') indexconfig = dict(type="mirror", mirror_url=simpypi.simpleurl, mirror_cache_expiry=0) mapp.create_index("mirror", indexconfig=indexconfig) mapp.use("mirror9/mirror") url_quoted_pkgver = url_quote('pkg-1!2017.4+devpi.zip') assert url_quoted_pkgver == 'pkg-1%212017.4%2Bdevpi.zip' simpypi.add_release('pkg', pkgver=url_quoted_pkgver) result = mapp.getreleaseslist("pkg") base = simpypi.baseurl.replace('http://', 'http_').replace(':', '_') assert len(result) == 1 assert result[0].endswith( '/mirror9/mirror/+e/%s_pkg/pkg-1!2017.4+devpi.zip' % base)
def translate(self, sl, tl, text, options=None): url = "http://mall.baicizhan.com/ws/search" req = {} req["w"] = url_quote(text) resp = self.http_get(url, req, None) if not resp: return None try: obj = json.loads(resp) except: return None res = self.create_translation(sl, tl, text) res["phonetic"] = self.get_phonetic(obj) res["explains"] = self.get_explains(obj) return res
def verify_simple(environ, start_response): logging.debug(environ['tiddlyweb.query']) domain = get_domain(environ['HTTP_HOST']) try: redirect = environ['tiddlyweb.query']['recaptcha_redirect'][0] except (KeyError, IndexError): redirect = environ['HTTP_REFERER'].split('?', 1)[0] query = environ['tiddlyweb.query'] formErrors = [] name = query['name'][0] if name == '': formErrors.append('name') email = query['email'][0] if email == '': formErrors.append('email') challenge_field = environ['tiddlyweb.query']['recaptcha_challenge_field'][0] # logging.debug('challenge_field: '+challenge_field) response_field = environ['tiddlyweb.query']['recaptcha_response_field'][0] # logging.debug('response_field: '+response_field) private_key = "6Ld8HAgAAAAAAAyOgYXbOtqAD1yuTaOuwP8lpzX0" ip_addr = environ['REMOTE_ADDR'] # logging.debug('ip_addr: '+ip_addr) resp = captcha.submit(challenge_field, response_field, private_key, ip_addr) if not resp.is_valid: formErrors.append('recaptcha_response_field') if formErrors: params = [] for key, value in query.iteritems(): params.append(key+'='+url_quote(value[0])) redirect = str(redirect + '?formErrors='+','.join(formErrors)+'&'+'&'.join(params)) # JRL: not sure why I need to wrap this in str(), but if I don't, I get WSGI response header value %r is not a byte string else: try: emailAvox(query) redirect = redirect + '?success=1' except Exception as exc: logging.debug(exc) redirect = redirect + '?emailError=1' start_response('302 Found', [ ('Content-Type', 'text/html'), ('Location', redirect), ('Pragma', 'no-cache') ]) return []
def get_links(text): query = SEARCH_QUERY.format(url_quote(text)) #print('Query: ', query) html = pg(requests.get(query).text) links = [a.attrib['href'] for a in html('.r')('a')] result = [] for link in links: # q={url} match_result = RE_LINK.match(link) if not match_result: continue link = match_result.groups()[0] result.append(url_unquote(link)) return result
def new_assignment(lti=lti): user, roles, course = ensure_canvas_arguments() menu = request.values.get('menu', "select") if not User.is_lti_instructor(roles): return "You are not an instructor in this course." assignment = Assignment.new(owner_id=user.id, course_id=course.id) launch_type = 'lti_launch_url' if menu != 'share' else 'iframe' endpoint = 'lti_assignments.index' if menu != 'share' else 'lti_assignments.shared' return jsonify(success=True, redirect=url_for('lti_assignments.edit_assignment', assignment_id=assignment.id), id= assignment.id, name= assignment.name, body= strip_tags(assignment.body)[:255], title= assignment.title(), select = url_quote(url_for(endpoint, assignment_id=assignment.id, _external=True))+"&return_type="+launch_type+"&title="+url_quote(assignment.title())+"&text=BlockPy%20Exercise&width=100%25&height=600", edit= url_for('lti_assignments.edit_assignment', assignment_id=assignment.id), date_modified = assignment.date_modified.strftime(" %I:%M%p on %a %d, %b %Y").replace(" 0", " "))
def api(self, cmd, extras='', no_session=False): ret = {} for _k1, api_url_k, _clienttype, _v in (('down', 'api_url', 'swjsq', 'do_down_accel'), ('up', 'api_up_url', 'uplink', 'do_up_accel')): if not getattr(self, _v): continue while True: # missing dial_account, (userid), os api_url = getattr(self, api_url_k) # TODO: phasing out time_and url = 'http://%s/v2/%s?%sclient_type=android-%s-%s&peerid=%s&time_and=%d&client_version=android%s-%s&userid=%s&os=android-%s%s' % ( api_url, cmd, ('sessionid=%s&' % self.xl_session) if not no_session else '', _clienttype, APP_VERSION, self.mac, time.time() * 1000, _clienttype, APP_VERSION, self.xl_uid, url_quote("%s.%s%s" % (OS_VERSION, OS_API_LEVEL, DEVICE_MODEL)), ('&%s' % extras) if extras else '', ) try: ret[_k1] = {} ret[_k1] = json.loads(http_req(url, headers=header_api)) break except URLError as ex: uprint( "Warning: error during %sapi connection: %s, use portal: %s" % (_k1, str(ex), api_url)) if (_k1 == 'down' and api_url == FALLBACK_PORTAL) or ( _k1 == 'up' and api_url == FALLBACK_UPPORTAL): print("Error: can't connect to %s api" % _k1) os._exit(5) if _k1 == 'down': setattr(self, api_url_k, FALLBACK_PORTAL) elif _k1 == 'up': setattr(self, api_url_k, FALLBACK_UPPORTAL) return ret
def verify_simple(environ, start_response): logging.debug(environ["tiddlyweb.query"]) domain = get_domain(environ["HTTP_HOST"]) try: redirect = environ["tiddlyweb.query"]["recaptcha_redirect"][0] except (KeyError, IndexError): redirect = environ["HTTP_REFERER"].split("?", 1)[0] query = environ["tiddlyweb.query"] formErrors = [] name = query["name"][0] if name == "": formErrors.append("name") email = query["email"][0] if email == "": formErrors.append("email") challenge_field = environ["tiddlyweb.query"]["recaptcha_challenge_field"][0] # logging.debug('challenge_field: '+challenge_field) response_field = environ["tiddlyweb.query"]["recaptcha_response_field"][0] # logging.debug('response_field: '+response_field) private_key = "6Ld8HAgAAAAAAAyOgYXbOtqAD1yuTaOuwP8lpzX0" ip_addr = environ["REMOTE_ADDR"] # logging.debug('ip_addr: '+ip_addr) resp = captcha.submit(challenge_field, response_field, private_key, ip_addr) if not resp.is_valid: formErrors.append("recaptcha_response_field") if formErrors: params = [] for key, value in query.iteritems(): params.append(key + "=" + url_quote(value[0])) redirect = str( redirect + "?formErrors=" + ",".join(formErrors) + "&" + "&".join(params) ) # JRL: not sure why I need to wrap this in str(), but if I don't, I get WSGI response header value %r is not a byte string else: try: emailAvox(query) redirect = redirect + "?success=1" except Exception as exc: logging.debug(exc) redirect = redirect + "?emailError=1" start_response("302 Found", [("Content-Type", "text/html"), ("Location", redirect), ("Pragma", "no-cache")]) return []
def lookup_url(self, url, **kwargs): """Lookup information about the URL. Args: url: URL kwargs: see possible values in `self.get_entity` Returns: Requested entity information fields wrapped in a DotAccessDict container. Ex: >>> api = ConnectApiClient(app_name='DocTest') >>> url = 'https://sites.google.com/site/unblockingnotice/' >>> type(api.lookup_url(url)) <class 'rfapi.datamodel.DotAccessDict'> """ return self.get_entity("url", url_quote(url, safe=''), **kwargs)
def update_playlist(self, member_id, playlist_id, playlist_name, _client): """Updates a playlist in the Harvest Media database. Essentially just a rename. :param member_id: The Harvest Media member identifer :param playlist_id: The Harvest Media playlist_id identifer :param playlist_name: The new name of the playlist" :param _client: An initialized instance of :class:`harvestmedia.api.client.Client` :param kwargs: """ method_uri = '/updateplaylist/{{service_token}}/%(member_id)s/%(playlist_id)s/%(playlist_name)s' % \ {'member_id': member_id, 'playlist_id': playlist_id, 'playlist_name': url_quote(playlist_name.encode('utf-8'))} _client.get_xml(method_uri)
def render(self): """ """ self.portal_state = getMultiAdapter((self.context, self.request), name=u'plone_portal_state') if not self.portal_state.anonymous(): return u"" url = self.request.get("URL") for okurl in self.anonurls: if okurl in url: return u"" login = self.portal_state.portal_url() + "/login_form" if url == login: return u"" redirect_url = '%s?came_from=%s' % ( login, url_quote(self.request.get('URL', ''))) self.request.response.redirect(redirect_url) return u""
def test_releases_urlquoting(self, mapp, server_version, simpypi): from pkg_resources import parse_version quoting_devpi_version = parse_version("4.3.1dev") if server_version < quoting_devpi_version: pytest.skip("devpi-server without mirror url quoting fix") mapp.create_and_login_user('mirror9') indexconfig = dict(type="mirror", mirror_url=simpypi.simpleurl, mirror_cache_expiry=0) mapp.create_index("mirror", indexconfig=indexconfig) mapp.use("mirror9/mirror") url_quoted_pkgver = url_quote('pkg-1!2017.4+devpi.zip') assert url_quoted_pkgver == 'pkg-1%212017.4%2Bdevpi.zip' simpypi.add_release('pkg', pkgver=url_quoted_pkgver) result = mapp.getreleaseslist("pkg") base = simpypi.baseurl.replace('http://', 'http_').replace(':', '_') assert len(result) == 1 assert result[0].endswith( '/mirror9/mirror/+e/%s_pkg/pkg-1!2017.4+devpi.zip' % base)
def test_releases_urlquoting_hash(self, mapp, server_version, simpypi): from pkg_resources import parse_version quoting_devpi_version = parse_version("4.3.1dev") if server_version < quoting_devpi_version: pytest.skip("devpi-server without mirror url quoting fix") mapp.create_and_login_user('mirror10') indexconfig = dict(type="mirror", mirror_url=simpypi.simpleurl, mirror_cache_expiry=0) mapp.create_index("mirror", indexconfig=indexconfig) mapp.use("mirror10/mirror") url_quoted_pkgver = "%s#sha256=1234" % url_quote( 'pkg-1!2017.4+devpi.zip') assert url_quoted_pkgver == 'pkg-1%212017.4%2Bdevpi.zip#sha256=1234' simpypi.add_release('pkg', pkgver=url_quoted_pkgver) result = mapp.getreleaseslist("pkg") assert len(result) == 1 assert result[0].endswith( '/mirror10/mirror/+f/123/4/pkg-1!2017.4+devpi.zip')
def search(self, series): """This searches TheTVDB.com for the series name and returns the result list """ series = url_quote(series.encode("utf-8")) log().debug("Searching for show %s" % series) seriesEt = self._getetsrc(self.config['url_getSeries'] % (series)) if not seriesEt: log().debug('Series result returned zero') raise tvdb_shownotfound("Show-name search returned zero results (cannot find show on TVDB)") allSeries = [] for series in seriesEt: series['lid'] = self.config['langabbv_to_id'][self.config['language']] series['language'] = self.config['language'] log().debug('Found series %(seriesName)s' % series) allSeries.append(series) return allSeries
def search(self, series): """This searches TheTVDB.com for the series name and returns the result list """ series = url_quote(series.encode("utf-8")) log().debug("Searching for show %s" % series) seriesEt = self._getetsrc(self.config['url_getSeries'] % (series)) allSeries = [] for series in seriesEt: result = dict( (k.tag.lower(), k.text) for k in series.getchildren()) result['id'] = int(result['id']) result['lid'] = self.config['langabbv_to_id'][result['language']] if 'aliasnames' in result: result['aliasnames'] = result['aliasnames'].split("|") log().debug('Found series %(seriesname)s' % result) allSeries.append(result) return allSeries
def translate(self, sl, tl, text, options=None): url = "http://www.iciba.com/index.php" req = {} req["a"] = "getWordMean" req["c"] = "search" req["word"] = url_quote(text) resp = self.http_get(url, req, None) if not resp: return None try: obj = json.loads(resp) obj = obj["baesInfo"]["symbols"][0] except: return None res = self.create_translation(sl, tl, text) res["paraphrase"] = self.get_paraphrase(obj) res["phonetic"] = self.get_phonetic(obj) res["explains"] = self.get_explains(obj) return res