def people_by_tag(request, tag=None): # tag is a slug if tag: limit_people = 50 url = "%s?tag=%s&limit=%d" % (APIConfig.USERPROFILE_API_URL, tag, limit_people) res = urllib.urlopen(url) people = simplejson.load(res) return render_to_response( "bfunweb/people_by_tag.html", {"people": people, "tag": tag}, context_instance=RequestContext(request) ) else: tags = UserProfile.tags.all() tag_userprofile_map = {} limit_people = 10 for tag in tags: url = "%s?tag=%s&limit=%d" % (APIConfig.USERPROFILE_API_URL, tag.slug, limit_people) res = urllib.urlopen(url) people = simplejson.load(res) tag_userprofile_map[tag] = people return render_to_response( "bfunweb/people_by_tag.html", {"tag_userprofile_map": tag_userprofile_map}, context_instance=RequestContext(request), )
def retrieveValues(cod): data = simplejson.load(open('pac_data.json','rb')) censo = simplejson.load(open('censo_sp.json','rb')) stats = {"total" : 0} sorted_x = sorted(data, key=itemgetter('reais_por_habitante')) sorted_x.reverse() data = sorted_x if cod: data = [obra for obra in data if obra['cod_ibge']==cod] if data: stats['nome'] = data[0]['municipio'] stats['municipio'] = {} for m in censo: if m['cod'] == cod: stats['municipio'] = m break template_values = { "obras": data, "cod": cod, "stats": stats, } return template_values
def current_user(self): if not hasattr(self, "_current_user"): self._current_user = None user_id = CookiesUtil.parseCookie(self.request.cookies.get('fsq_user'), FOURSQUARE_APP_SECRET) if user_id: self._current_user = User.get_by_key_name(user_id) if (self._current_user == None or self._current_user.access_token == None) and self.request.get("code"): args = dict(client_id=FOURSQUARE_APP_ID, redirect_uri=self.request.path_url, client_secret=FOURSQUARE_APP_SECRET, code=self.request.get("code"), grant_type='authorization_code') token_url = 'https://foursquare.com/oauth2/access_token?' response = json.load(urllib.urlopen(token_url + urllib.urlencode(args))) if 'access_token' not in response: self.redirect(self.request.path_url) return access_token = response['access_token'] response = json.load(urllib.urlopen('https://api.foursquare.com/v2/users/self?' + urllib.urlencode(dict(oauth_token=access_token)))) profile = response['response']['user'] user = User(key_name=str(profile["id"]), id=str(profile["id"]), name=profile["firstName"] + ' ' + profile["lastName"], photo=profile["photo"], access_token=access_token) user.put() CookiesUtil.setCookie(self.response, 'fsq_user', str(profile["id"]), expires=time.time() + 30 * 60 * 60 * 24 * 100, secret=FOURSQUARE_APP_SECRET) self._current_user = user elif (self._current_user == None or self._current_user.access_token == None) and not self.request.get("code"): self._current_user = None return self._current_user
def get(self, cod): data = simplejson.load(open('pac_data.json','rb')) censo = simplejson.load(open('censo_sp.json','rb')) stats = {"total" : 0} sorted_x = sorted(data, key=itemgetter('reais_por_habitante')) sorted_x.reverse() data = sorted_x if cod: data = [obra for obra in data if obra['cod_ibge']==cod] if data: stats['nome'] = data[0]['municipio'] stats['municipio'] = {} for m in censo: if m['cod'] == cod: stats['municipio'] = m break template_values = { "obras": data, "cod": cod, "stats": stats, } path = os.path.join(os.path.dirname(__file__), 'templates/base_pac.html') self.response.out.write(template.render(path, template_values))
def toolbox_index(request): res = urllib.urlopen(APIConfig.TOOLBOX_API_URL) toolboxes = simplejson.load(res) people = '' limit_people = 6 user = request.user # If the user is authenticated we do not want # to show the user as part of the people cloud if user.is_authenticated() : userProfile = user.get_profile() url = "%s?exclude=%d&limit=%d" % (APIConfig.USERPROFILE_API_URL, userProfile.id, limit_people) res = urllib.urlopen(url) # Else get all the users else: res = urllib.urlopen(APIConfig.USERPROFILE_API_URL) people = simplejson.load(res) return render_to_response('colorific/toolbox_index.html', { 'toolboxes': toolboxes, 'people': people}, context_instance=RequestContext(request))
def tweetvite_guest_list(eventid, yes=True, maybe=False, no=False): rsvp = [] if yes: rsvp.append('Y') if maybe: rsvp.append('M') if no: rsvp.append('N') url = TWEETVITE_GUEST_LIST_URL % eventid data = json.load(urllib.urlopen(url)) if not data.has_key('total_guests'): return [] num_pages = (int(data['total_guests']) + data['count'] - 1) / data['count'] guests = [ guest['profile']['display_name'] for guest in data['guests'] if guest['rsvp'] in rsvp ] for page in range(1, num_pages): data = json.load(urllib.urlopen(url + '&page=%d' % page)) guests += [ guest['profile']['display_name'] for guest in data['guests'] if guest['rsvp'] in rsvp ] return guests
def handle(self, *args, **options): try: for char in "%s%s" % (string.ascii_lowercase, string.digits): print char suggestions_url = "http://whereis.mit.edu/search?q=%s&type=suggest&output=json" % (char) for keyword in simplejson.load(urlopen(suggestions_url)): search_url = "http://whereis.mit.edu/search?type=query&q=%s" % (keyword) search_results = simplejson.load(urlopen(search_url)) for result in search_results: if not Location.objects.filter(name=result['name']).exists(): location = Location.objects.create(name=result['name'], keywords="", latitude = result['lat_wgs84'], longitude=result['long_wgs84']) location.keywords = "%s %s" % (location.keywords, keyword) if result.has_key("displayname") and result["displayname"] != "None": location.display_name = result['displayname'] if result.has_key("bldgnum") and result["bldgnum"] != "None": location.building_num = result['bldgnum'] if result.has_key("bldgimg") and result["bldgimg"] != "None": location.image_url = result['bldgimg'] if result.has_key('contents') and result['contents'] != "None": for content in result['contents']: if content.has_key('name') and content['name'] != "None": location.keywords = "%s %s" % (location.keywords, content['name']) location.save() except URLError, e: print "Error: %s" % (e)
def test_add_favorite_event(self): response = self.client.post("/users/addfavoriteevent/?id=1", {}, **self.extra) result = simplejson.load(StringIO(response.content)) self.assertEqual(result["response"], "Add favorite event successful!") response = self.client.post("/users/addfavoriteevent/?id=2", {}, **self.extra) result = simplejson.load(StringIO(response.content)) self.assertEqual(result["status_code"], 404)
def search(request): message = '' term= '' items = [] if request.method == 'POST': term = request.POST.get('q') toolbox_id = request.POST.get('toolbox_id') # If have information about the toolbox # it means the user used the search suggestion # to find a tool and the toolbox that contains it # So we retrieve this toolbox only if toolbox_id: url = "%s%s" % (APIConfig.TOOLBOX_API_URL, toolbox_id) res = urllib.urlopen(url) items.append(simplejson.load(res)) # else we search for all the tool names # or toolboxes that contain this tool else: url = "%s?term=%s" % (APIConfig.FULL_SEARCH_API_URL, term) res = urllib.urlopen(url) items = simplejson.load(res) return render_to_response('colorific/search.html', {'message':message, 'items':items, 'term':term, }, context_instance=RequestContext(request)) else: return HttpResponseRedirect('/colorific/toolboxes/')
def document(request, lang, version, url): # If either of these can't be encoded as ascii then later on down the line an # exception will be emitted by unipath, proactively check for bad data (mostly # from the Googlebot) so we can give a nice 404 error. try: version.encode("ascii") url.encode("ascii") except UnicodeEncodeError: raise Http404 if version == 'dev': rtd_version = 'latest' elif version >= '1.5': rtd_version = version + '.x' else: rtd_version = version + '.X' docroot = get_doc_root_or_404(lang, version) doc_path = get_doc_path_or_404(docroot, url) template_names = [ 'docs/%s.html' % docroot.rel_path_to(doc_path).replace(doc_path.ext, ''), 'docs/doc.html', ] return render_to_response(template_names, RequestContext(request, { 'doc': simplejson.load(open(doc_path, 'rb')), 'env': simplejson.load(open(docroot.child('globalcontext.json'), 'rb')), 'lang': lang, 'version': version, 'rtd_version': rtd_version, 'docurl': url, 'update_date': datetime.datetime.fromtimestamp(docroot.child('last_build').mtime()), 'home': urlresolvers.reverse('document-index', kwargs={'lang':lang, 'version':version}), 'redirect_from': request.GET.get('from', None), }))
def test_remove_favorite_team(self): self.client.post("/users/addfavoriteteam/?id=1", {}, **self.extra) response = self.client.post("/users/removefavoriteteam/?id=1", {}, **self.extra) result = simplejson.load(StringIO(response.content)) self.assertEqual(result["response"], "Remove favorite team successful!") response = self.client.post("/users/removefavoriteteam/?id=2", {}, **self.extra) result = simplejson.load(StringIO(response.content)) self.assertEqual(result["status_code"], 404)
def handle_noargs(self, **options): skills_1 = simplejson.load(open(os.path.join(settings.COMMANDS_ROOT[0], 'skills_1.json'))) skills_2 = simplejson.load(open(os.path.join(settings.COMMANDS_ROOT[0], 'skills_2.json'))) skills_3 = simplejson.load(open(os.path.join(settings.COMMANDS_ROOT[0], 'skills_3.json'))) skills = list(set(skills_1 + skills_2 + skills_3)) for skill in skills: new_skill = Skill() new_skill.name = skill new_skill.save() print 'Added: %s' % skill
def setUp(self): """ Setup OCR tests. These run directly, not via views. """ self.validscripts = {} self.invalidscripts = {} for fname in os.listdir(VALID_SCRIPTDIR): if fname.endswith("json"): with open(os.path.join(VALID_SCRIPTDIR, fname), "r") as f: self.validscripts[fname] = json.load(f) for fname in os.listdir(INVALID_SCRIPTDIR): if fname.endswith("json"): with open(os.path.join(INVALID_SCRIPTDIR, fname), "r") as f: self.invalidscripts[fname] = json.load(f)
def testProcessDuplicateWord(self): json_file = open("files/direct_message1.json") message_json = simplejson.load(json_file) twitter_dm = DirectMessage.NewFromJsonDict(message_json) processMessage(twitter_dm) json_file = open("files/direct_message1.json") message_json = simplejson.load(json_file) twitter_dm = DirectMessage.NewFromJsonDict(message_json) processMessage(twitter_dm) query = Dictionary.all() results = query.fetch(1) self.assertEqual(1, len(results))
def process(self): # Check if the link exists url_data = urlparse.urlparse(self.url) conn = httplib.HTTPConnection(url_data.hostname) full_path = url_data.path if url_data.query: full_path += '?' + url_data.query conn.request('HEAD', full_path) r1 = conn.getresponse() conn.close() # it exists! (302 is a redirect for sharing links i.e. youtu.be) if r1.status == 200 or r1.status == 302: self.content_provider = url_data.hostname.lstrip('www.') query = cgi.parse_qs(url_data.query) # youtube if self.content_provider == 'youtube.com' or self.content_provider == 'youtu.be': if self.content_provider == 'youtube.com': self.video_id = query['v'][0] else: self.video_id = self.url.split('/').pop() self.content_provider = 'youtube.com' self.url = 'http://www.youtube.com/watch?v=' + self.video_id data_url = 'http://gdata.youtube.com/feeds/api/videos/' + self.video_id + '?v=2&alt=json' youtube_data = simplejson.load(urllib.urlopen(data_url)) thumbs = youtube_data['entry']['media$group'][ 'media$thumbnail'] if len(thumbs) > 1: self.screenshot = thumbs[1]['url'] elif len(thumbs): self.screenshot = thumbs[0]['url'] self.title = youtube_data['entry']['title']['$t'] # vimeo elif self.content_provider == 'vimeo.com': self.video_id = url_data.path.lstrip('/') data_url = 'http://vimeo.com/api/v2/video/' + self.video_id + '.json' vimeo_data = simplejson.load(urllib.urlopen(data_url)) self.screenshot = vimeo_data[0]['thumbnail_large'] self.title = vimeo_data[0]['title']
def setUp(self): """ Setup OCR tests. These run directly, not via views. """ testutils.symlink_model_fixtures() self.validscripts = {} self.invalidscripts = {} for fname in os.listdir(VALID_SCRIPTDIR): if fname.endswith("json"): with open(os.path.join(VALID_SCRIPTDIR, fname), "r") as f: self.validscripts[fname] = json.load(f) for fname in os.listdir(INVALID_SCRIPTDIR): if fname.endswith("json"): with open(os.path.join(INVALID_SCRIPTDIR, fname), "r") as f: self.invalidscripts[fname] = json.load(f)
def process(self): # Check if the link exists url_data = urlparse.urlparse(self.url) conn = httplib.HTTPConnection(url_data.hostname) full_path = url_data.path if url_data.query: full_path += "?" + url_data.query conn.request("HEAD", full_path) r1 = conn.getresponse() conn.close() # it exists! (302 is a redirect for sharing links i.e. youtu.be) if r1.status == 200 or r1.status == 302: self.content_provider = url_data.hostname.lstrip("www.") query = cgi.parse_qs(url_data.query) # youtube if self.content_provider == "youtube.com" or self.content_provider == "youtu.be": if self.content_provider == "youtube.com": self.video_id = query["v"][0] else: self.video_id = self.url.split("/").pop() self.content_provider = "youtube.com" self.url = "http://www.youtube.com/watch?v=" + self.video_id data_url = "http://gdata.youtube.com/feeds/api/videos/" + self.video_id + "?v=2&alt=json" youtube_data = simplejson.load(urllib.urlopen(data_url)) thumbs = youtube_data["entry"]["media$group"]["media$thumbnail"] if len(thumbs) > 1: self.screenshot = thumbs[1]["url"] elif len(thumbs): self.screenshot = thumbs[0]["url"] self.title = youtube_data["entry"]["title"]["$t"] # vimeo elif self.content_provider == "vimeo.com": self.video_id = url_data.path.lstrip("/") data_url = "http://vimeo.com/api/v2/video/" + self.video_id + ".json" vimeo_data = simplejson.load(urllib.urlopen(data_url)) self.screenshot = vimeo_data[0]["thumbnail_large"] self.title = vimeo_data[0]["title"]
def fetchData(request): streets = [ 'Szeroka', 'Piekary', 'Zeglarska', 'Mostowa', 'Wielkie%20Garbary', 'Prosta', 'Rynek%20Staromiejski', 'Strumykowa', 'Piekary', 'Chelminska', 'Szewska', 'Przedzamcze', 'Kopernika', 'Rabianska', 'Podmurna', 'Prosta', 'Sukiennicza', 'Wysoka', 'Rynek%20Nowomiejski' ] response = u'' for city in streets: i = 0 response = response + u'<br><br><b>Ulica: ' + city + u'</b><br><br>' request_url = "http://maps.googleapis.com/maps/api/geocode/json?sensor=false&address=" + city + ",Torun,Poland" city_req = urllib2.Request(request_url) city_opener = urllib2.build_opener() city_f = city_opener.open(city_req) city_data = json.load(city_f) search_types = ['keyword', 'name', 'type'] for st in search_types: if st == 'type': keywords = [ 'night_club', 'restaurant', 'cafe', 'art_gallery', 'movie_theater', 'lodging', 'food', 'museum', 'zoo' ] else: keywords = ['restauracja', 'bar', 'pub', 'pizzeria', 'grill'] response = response + u'<br>Typ szukania: <i>' + st + u'</i><br>' for word in keywords: req_url = "https://maps.googleapis.com/maps/api/place/search/json?location=" + str( city_data['results'][0]['geometry']['location']['lat'] ) + "," + str(city_data['results'][0]['geometry']['location'] ['lng']) + "&radius=50&language=pl&" if st == 'type': req_url = req_url + "types=" + word else: if st == 'keyword': req_url = req_url + "keyword=" + word else: req_url = req_url + "name=" + word req_url = req_url + "&sensor=false&key=AIzaSyDDOcaI9GNdrmjoBTviEfIKU86U1QqxnBk" response = response + u'<br>Słowo kluczowe: <i>' + word + u'</i><br><br>' + u'URL zapytania: ' + req_url + u'<br><br>' req = urllib2.Request(req_url) opener = urllib2.build_opener() f = opener.open(req) data = json.load(f) resp = insertFetchData(data) response = response + resp return HttpResponse(response)
def process(self): # Check if the link exists url_data = urlparse.urlparse(self.url) conn = httplib.HTTPConnection(url_data.hostname) full_path = url_data.path if url_data.query: full_path += '?' + url_data.query conn.request('HEAD', full_path) r1 = conn.getresponse() conn.close() # it exists! (302 is a redirect for sharing links i.e. youtu.be) if r1.status == 200 or r1.status == 302: self.content_provider = url_data.hostname.lstrip('www.') query = cgi.parse_qs(url_data.query) # youtube if self.content_provider == 'youtube.com' or self.content_provider == 'youtu.be': if self.content_provider == 'youtube.com': self.video_id = query['v'][0] else: self.video_id = self.url.split('/').pop() self.content_provider = 'youtube.com' self.url = 'http://www.youtube.com/watch?v=' + self.video_id data_url = 'http://gdata.youtube.com/feeds/api/videos/' + self.video_id + '?v=2&alt=json' youtube_data = simplejson.load(urllib.urlopen(data_url)) thumbs = youtube_data['entry']['media$group']['media$thumbnail'] if len(thumbs) > 1: self.screenshot = thumbs[1]['url'] elif len(thumbs): self.screenshot = thumbs[0]['url'] self.title = youtube_data['entry']['title']['$t'] # vimeo elif self.content_provider == 'vimeo.com': self.video_id = url_data.path.lstrip('/') data_url = 'http://vimeo.com/api/v2/video/' + self.video_id + '.json' vimeo_data = simplejson.load(urllib.urlopen(data_url)) self.screenshot = vimeo_data[0]['thumbnail_large'] self.title = vimeo_data[0]['title']
def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" url = GITHUB_USER_DATA_URL + '?' + urlencode({ 'access_token': access_token }) try: data = simplejson.load(dsa_urlopen(url)) except ValueError: data = None # if we have a github organization defined, test that the current users # is a member of that organization. if data and self.GITHUB_ORGANIZATION: member_url = GITHUB_ORGANIZATION_MEMBER_OF_URL.format( org=self.GITHUB_ORGANIZATION, username=data.get('login') ) + '?' + urlencode({ 'access_token': access_token }) try: response = dsa_urlopen(member_url) except HTTPError: data = None else: # if the user is a member of the organization, response code # will be 204, see http://bit.ly/ZS6vFl if response.code != 204: raise AuthFailed('User doesn\'t belong to the ' 'organization') return data
def vkontakte_api(method, data): """Calls VKontakte OpenAPI method http://vkontakte.ru/apiclub, http://vkontakte.ru/pages.php?o=-1&p=%C2%FB%EF%EE%EB%ED%E5%ED%E8%E5%20 %E7%E0%EF%F0%EE%F1%EE%E2%20%EA%20 API """ # We need to perform server-side call if no access_token if not "access_token" in data: if not "v" in data: data["v"] = VKONTAKTE_API_VERSION if not "api_id" in data: data["api_id"] = _api_get_val_fun("id", "VKONTAKTE_APP_ID") data["method"] = method data["format"] = "json" url = VKONTAKTE_SERVER_API_URL secret = _api_get_val_fun("key", "VKONTAKTE_APP_SECRET") param_list = sorted(list(item + "=" + data[item] for item in data)) data["sig"] = md5("".join(param_list) + secret).hexdigest() else: url = VKONTAKTE_API_URL + method params = urlencode(data) url += "?" + params try: return simplejson.load(dsa_urlopen(url)) except (TypeError, KeyError, IOError, ValueError, IndexError): log("error", "Could not load data from VKontakte.", exc_info=True, extra=dict(data=data)) return None
def handle(self, *args, **options): url = "http://api.flickr.com/services/rest/?method=flickr.groups.pools.getPhotos&api_key=%s&group_id=%s&extras=geo,description&format=json&nojsoncallback=1" % ( settings.FLICKR_API_KEY, settings.FLICKR_GROUP) try: result = simplejson.load(urllib.urlopen(url)) if result["stat"] == "ok": photos = result["photos"] author = User.objects.get(pk=1) # default user for photo in photos["photo"]: # check for existing photo existing = Media.objects.filter(url__contains=photo["id"]) if len(existing) == 0: # add new photo media = Media() media.geometry = fromstr( "POINT(%s %s)" % (photo["longitude"], photo["latitude"]), srid=4326) media.desc = "**%s** \n%s" % ( photo["title"], photo["description"]["_content"]) media.url = "http://www.flickr.com/photos/%s/%s/in/pool-%s/" % ( photo["owner"], photo["id"], settings.FLICKR_GROUP) media.author = author media.save() elif result["stat"] == "fail": raise CommandError(result["message"]) except: raise CommandError("An Error occurred.") pass
def vkontakte_api(method, data): """Calls VKontakte OpenAPI method http://vkontakte.ru/apiclub, http://vkontakte.ru/pages.php?o=-1&p=%C2%FB%EF%EE%EB%ED%E5%ED%E8%E5%20 %E7%E0%EF%F0%EE%F1%EE%E2%20%EA%20 API """ # We need to perform server-side call if no access_token if not 'access_token' in data: if not 'v' in data: data['v'] = VKONTAKTE_API_VERSION if not 'api_id' in data: data['api_id'] = _api_get_val_fun('id', 'VKONTAKTE_APP_ID') data['method'] = method data['format'] = 'json' url = VKONTAKTE_SERVER_API_URL secret = _api_get_val_fun('key', 'VKONTAKTE_APP_SECRET') param_list = sorted(list(item + '=' + data[item] for item in data)) data['sig'] = md5(''.join(param_list) + secret).hexdigest() else: url = VKONTAKTE_API_URL + method params = urlencode(data) url += '?' + params try: return simplejson.load(dsa_urlopen(url)) except (TypeError, KeyError, IOError, ValueError, IndexError): log('error', 'Could not load data from VKontakte.', exc_info=True, extra=dict(data=data)) return None
def user_data(self, access_token): """Return user data provided""" # Bitbucket has a bit of an indirect route to obtain user data from an # authenticated query: First obtain the user's email via an # authenticated GET url = BITBUCKET_EMAIL_DATA_URL request = self.oauth_request(access_token, url) response = self.fetch_response(request) try: # Then retrieve the user's primary email address or the top email email_addresses = simplejson.loads(response) for email_address in reversed(email_addresses): if email_address['active']: email = email_address['email'] if email_address['primary']: break # Then return the user data using a normal GET with the # BITBUCKET_USER_DATA_URL and the user's email response = dsa_urlopen(BITBUCKET_USER_DATA_URL + email) user_details = simplejson.load(response)['user'] user_details['email'] = email return user_details except ValueError: return None return None
def document(request, url): docroot = os.path.join(settings.DOC_SOURCE, '_build', 'json') doc_path = get_doc_path(docroot, url) template_names = [ 'docs/doc.html', ] global_toc = simplejson.load(open(os.path.join(docroot, 'index.fjson'), 'rb')) return render_to_response(template_names, RequestContext(request, { 'doc': simplejson.load(open(doc_path, 'rb')), 'env': simplejson.load(open(os.path.join(docroot, 'globalcontext.json'), 'rb')), 'docurl': url, # 'update_date': datetime.datetime.fromtimestamp(docroot.child('last_build').mtime()), # 'home': urlresolvers.reverse('document-index', kwargs={'lang':lang, 'version':version}), 'redirect_from': request.GET.get('from', None), }))
def slice(request, query, bag): """ performs a tag based slice. ## Other options/methods avaliable ## * identity: sub-field(submitter,curator, owner, signer) * any_tags: matches documents with value found in the keys field * from: matches documents submitted on date from (format: YYYY-MM-DD, 1-day granularity) * from, until: matches documents submitted between from value (inclusive) and until value (non-inclusive). """ is_service_avaliable() url = "%sslice?any_tags=%s" % (NODE_URL, query) req = urllib2.Request(url) opener = urllib2.build_opener() data = opener.open(req) try: results = simplejson.load(data) except: results = [] bag['slice_results'] = results bag['slice_query'] = query bag['url'] = url return bag
def user_data(self, access_token): """Loads user data from service""" url = GITHUB_USER_DATA_URL + urlencode({'access_token': access_token}) try: return simplejson.load(urlopen(url)) except ValueError: return None
def translate(phrase, src="uk", to="en", debug=False): """ https://code.google.com/apis/language/translate/overview.html based on: http://code.google.com/p/py-gtranslate/source/browse/trunk/gtrans.py """ phrase = phrase.replace("\r\n", "<br />").replace("\r", "<br />").replace("\n", "<br />") data = urllib.urlencode({'v': '1.0', 'langpair': '%s|%s' % (src, to), 'q': phrase.encode('utf-8')}) url_opener = UrlOpener() url = '%s?%s' % (BASE_URL, data) if debug: print "Request %r" % url f = url_opener.open(url) resp = simplejson.load(f) if debug: print "Response: %s" % repr(resp) content = "" error = None if resp["responseStatus"] != 200: error = resp["responseDetails"] else: try: content = resp['responseData']['translatedText'] except KeyError, err: error = str(err) else:
def flow_pcap_details(request, flow_pcap_md5): log = Logger("Pcap file details", "DEBUG") flow = Flow.objects.get(hash_value=flow_pcap_md5) url = "".join([settings.BASE_URL, "/api/rest/all_protocols_by_hash/?format=json", "&parent_hash_value=", flow_pcap_md5]) log.message("URL: %s" % (url)) req = urllib2.Request(url, None) opener = urllib2.build_opener() f = opener.open(req) json_response = json.load(f) json_data = json.dumps(json_response) json_dir = os.path.join(settings.PROJECT_ROOT, "json_files") json_file = tempfile.NamedTemporaryFile(mode="w", dir=json_dir, delete=False) file_name = os.path.basename(json_file.name) # save the json data to the temporary file json_file.write(json_data) json_file.close() context = { 'page_title': " ".join([flow.file_name, "Details"]), 'flow': flow, 'pcap_operation': "file_details", 'json_file_url': os.path.join(settings.ALTERNATE_BASE_URL, "json_media", file_name), 'json_response': json_response, 'hash_value': flow_pcap_md5, 'select_update_li': ["file_details", "file_summary"] } return render_to_response("pcap/file_details.html", context_instance=RequestContext(request, context))
def check_language_name(version, ignore_statistic=False): if not ignore_statistic: fetch_count = SubtitleFetchCounters.objects.filter(video=version.language.video, date=date.today()) \ .aggregate(fetch_count=Count('count'))['fetch_count'] if fetch_count < 100: return text = '' for s in version.subtitles(): text += (' ' + s.text) if len(text) >= 400: break url = u'https://ajax.googleapis.com/ajax/services/language/detect?v=1.0&q=%s' url = url % urlquote_plus(text) request = urllib2.Request(url, None) response = urllib2.urlopen(request) results = simplejson.load(response) lang = version.language if results['responseStatus'] == 200: if not results['responseData']['isReliable'] or \ (lang.language and lang.language != results['responseData']['language']): send_alarm_email(version, u'Text does not look like language labeled')
def __init__(self, model_cls): self.model_cls = model_cls master_json_path = getattr(model_cls, 'MASTER_DATA_JSON_PATH', None) if not master_json_path: # JsonMasterModel本体のコンパイル時とかはここでガード return if isinstance(master_json_path, (list, tuple)): master_json_path = os.path.join(*master_json_path) if not os.path.isabs(master_json_path): # 絶対パスでない場合は if hasattr(settings, 'MASTER_JSON_DIR'): # settings.MASTER_JSON_DIR 連結する master_json_path = os.path.join(settings.MASTER_JSON_DIR, master_json_path) else: # MASTER_DATA_JSON_PATHが相対で、かつsettingsに MASTER_JSON_DIR が定義されていない raise model_cls.JsonMasterModelError( '{} MASTER_DATA_JSON_PATH is not absolute path, ' 'But settings.MASTER_JSON_DIR is not defined.' .format(model_cls.__name__)) self._all = [] self._id_dict = {} with open(master_json_path) as fp: for r in simplejson.load(fp): instance = self.model_cls(r['fields'], pk=r['pk']) self._all.append(instance) self._id_dict[r['pk']] = instance
def LoginHandler(request): if request.method == 'GET': verification_code = request.REQUEST.get("code", "") args = dict(client_id=FACEBOOK_APP_ID, redirect_uri=request.build_absolute_uri()) if request.REQUEST.get("code"): args["client_secret"] = FACEBOOK_APP_SECRET args["code"] = request.REQUEST.get("code", "") response = cgi.parse_qs( urllib.urlopen( "https://graph.facebook.com/oauth/access_token?" + urllib.urlencode(args)).read()) access_token = response["access_token"][-1] # Download the user profile and cache a local instance of the # basic profile info profile = json.load( urllib.urlopen("https://graph.facebook.com/me?" + urllib.urlencode(dict( access_token=access_token)))) user = User(key_name=str(profile["id"]), id=str(profile["id"]), name=profile["name"], access_token=access_token, profile_url=profile["link"]) """user.put()""" """set_cookie(response,"fb_user",str(profile["id"]),expires=time.time()+30*86400)""" return HttpResponse( str(profile["id"]) + '-' + str(profile["id"]) + '-' + profile["name"] + '-' + access_token + '-' + profile["link"]) """return HttpResponseRedirect('/');""" else: return HttpResponseRedirect( "https://graph.facebook.com/oauth/authorize?" + urllib.urlencode(args))
def get(self): verification_code = self.request.get("code") args = dict(client_id=FACEBOOK_APP_ID, redirect_uri=self.request.path_url) if self.request.get("code"): args["client_secret"] = FACEBOOK_APP_SECRET args["code"] = self.request.get("code") response = cgi.parse_qs( urllib.urlopen( "https://graph.facebook.com/oauth/access_token?" + urllib.urlencode(args)).read()) access_token = response["access_token"][-1] # Download the user profile and cache a local instance of the # basic profile info profile = json.load( urllib.urlopen("https://graph.facebook.com/me?" + urllib.urlencode(dict( access_token=access_token)))) user = FBUser(key_name=str(profile["id"]), id=str(profile["id"]), name=profile["name"], access_token=access_token, profile_url=profile["link"]) user.put() set_cookie(self.response, "fb_user", str(profile["id"]), expires=time.time() + 30 * 86400) self.redirect("/") else: self.redirect("https://graph.facebook.com/oauth/authorize?" + urllib.urlencode(args))
def auth_complete(self, *args, **kwargs): """Completes loging process, must return user instance""" if not 'assertion' in self.data: raise AuthMissingParameter(self, 'assertion') data = urlencode({ 'assertion': self.data['assertion'], 'audience': self.request.get_host() }) try: response = simplejson.load( dsa_urlopen(BROWSER_ID_SERVER, data=data)) except ValueError: log('error', 'Could not load user data from BrowserID.', exc_info=True) else: if response.get('status') == 'failure': log('debug', 'Authentication failed.') raise AuthFailed(self) kwargs.update({ 'auth': self, 'response': response, self.AUTH_BACKEND.name: True }) return authenticate(*args, **kwargs)
def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" url = LIVE_USER_DATA_URL + "?" + urlencode({"access_token": access_token}) try: return simplejson.load(urlopen(url)) except ValueError: return None
def handle(self, *fixture_labels, **options): if not len(fixture_labels): self.stderr.write( "No database fixture specified. Please provide the path of at least one fixture in the command line.\n" ) return self.verbosity = int(options.get('verbosity')) for fixture_label in fixture_labels: translator = JsonTranslator(fixture_label) self.setup_name_compression_dir(fixture_label.split('.')) fixture_files = self.find_fixtures(fixture_label, self.fixture_dirs) if not fixture_files: self.stderr.write("\nCould not find fixture: %s.\n" % fixture_label) for full_path, compression_format in fixture_files: try: fixture = open_fixture(full_path, compression_format) except IOError: if self.verbosity >= 2: self.stdout.write("Error opening fixture %s.\n" % full_path) else: for obj_json in simplejson.load(fixture): translator.translate_object(obj_json) finally: fixture.close() translator.out()
def read_news(f, source): es = ElasticSearchClass() data = json.load(f) try: for i in data: content = i['content'] content = content.replace('\n', '<br>') content = content.replace('\u3000', "") content = replaceAll('<br><br>', '<br>', content) try: if content.index('<br>') == 0: content = content[4:-1] except Exception: print("None extra <br>") es.index( "news", { "name": i['title'], "label": i['label'], "my_time": i['time'], "source": source, "link": i['url'], "content": content }) except Exception: print('File add error')
def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" url = GITHUB_USER_DATA_URL + '?' + urlencode( {'access_token': access_token}) try: data = simplejson.load(dsa_urlopen(url)) except ValueError: data = None # if we have a github organization defined, test that the current users # is a member of that organization. if data and self.GITHUB_ORGANIZATION: member_url = GITHUB_ORGANIZATION_MEMBER_OF_URL.format( org=self.GITHUB_ORGANIZATION, username=data.get('login')) + '?' + urlencode( {'access_token': access_token}) try: response = dsa_urlopen(member_url) except HTTPError: data = None else: # if the user is a member of the organization, response code # will be 204, see http://bit.ly/ZS6vFl if response.code != 204: raise AuthFailed('User doesn\'t belong to the ' 'organization') return data
def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" url = LIVE_USER_DATA_URL + "?" + urlencode({"access_token": access_token}) try: return simplejson.load(dsa_urlopen(url)) except (ValueError, IOError): raise AuthUnknownError("Error during profile retrieval, " "please, try again later")
def get_all_pages(url): """ Retrieves all pages. The url must return a list, and it should not contain the per_page parameter, that will be overriden to 100. Note: if there are lots of items, this may take very long. """ assert url.find("per_page") == -1 url = url + "&per_page=100" l = [] while True: #print url r = urlopen(url) data = load(r) assert isinstance(data, list) l.extend(data) link = r.headers.get("Link") if not link: break d = link2dict(link) url = d.get("next") if not url: break return l
def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" url = GITHUB_USER_DATA_URL + '?' + urlencode( {'access_token': access_token}) try: data = simplejson.load(dsa_urlopen(url)) except ValueError: data = None # if we have a github organization defined, test that the current users # is a member of that organization. if data and self.GITHUB_ORGANIZATION: member_url = GITHUB_ORGANIZATION_MEMBER_OF_URL.format( org=self.GITHUB_ORGANIZATION, username=data.get('login')) + '?' + urlencode( {'access_token': access_token}) try: response = dsa_urlopen(member_url) except HTTPError: data = None else: # if the user is a member of the organization, response code # will be 204, see: # http://developer.github.com/v3/orgs/members/#response-if-requester-is-an-organization-member-and-user-is-a-member if not response.code == 204: data = None return data
def new_items(self, item_file): import csv from tech_services_reports.utility_code import convert_date, AcquisitionMethod from tech_services_reports.models import Item items = csv.reader(open(item_file)) items.next() #Get mappings from service location_format_map = simplejson.load( urllib.urlopen(settings_app.LOCATION_FORMAT_URL)) location_format_map = location_format_map['result']['items'] #RECORD #(ITEM) CREATED(ITEM) LOCATION 999 RECORD #(BIBLIO) item_count = 0 for row in items: if len(row) > 1: item_count += 1 number = row[0] create_date = convert_date(row[1]) location = row[2].strip() acq_note = row[3] bibs = row[4:] item, obj_created = Item.objects.get_or_create(number=number) item.created = create_date print >> sys.stderr, item, obj_created #Normalize to lower case item.location = location.lower() item.format = location_format_map[location]['format'] item.acquisition_method = AcquisitionMethod(acq_note).note item.save()
def get(self): verification_code = self.request.get("code") args = dict(client_id=FACEBOOK_APP_ID, redirect_uri=self.request.path_url) if self.request.get("code"): args["client_secret"] = FACEBOOK_APP_SECRET args["code"] = self.request.get("code") response = cgi.parse_qs(urllib.urlopen( "https://graph.facebook.com/oauth/access_token?" + urllib.urlencode(args)).read()) access_token = response["access_token"][-1] # Download the user profile and cache a local instance of the # basic profile info profile = json.load(urllib.urlopen( "https://graph.facebook.com/me?" + urllib.urlencode(dict(access_token=access_token)))) user = User(key_name=str(profile["id"]), id=str(profile["id"]), name=profile["name"], access_token=access_token, profile_url=profile["link"]) user.put() set_cookie(self.response, "fb_user", str(profile["id"]), expires=time.time() + 30 * 86400) self.redirect("/") else: self.redirect( "https://graph.facebook.com/oauth/authorize?" + urllib.urlencode(args))
def geocode(self, address, sensor="false", **geo_args): geo_args.update({'address': address, 'sensor': sensor}) url = self.geocode_url + '?' + urllib.urlencode(geo_args) result = simplejson.load(urllib.urlopen(url)) return simplejson.dumps( [s['formatted_address'] for s in result['results']], indent=2)
def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" data = None params = backend_setting(self, self.EXTRA_PARAMS_VAR_NAME, {}) params['access_token'] = access_token url = FACEBOOK_ME + urlencode(params) try: data = simplejson.load(dsa_urlopen(url)) except ValueError: extra = {'access_token': sanitize_log_data(access_token)} log('error', 'Could not load user data from Facebook.', exc_info=True, extra=extra) except HTTPError: extra = {'access_token': sanitize_log_data(access_token)} log('error', 'Error validating access token.', exc_info=True, extra=extra) raise AuthTokenError(self) else: log('debug', 'Found user data for token %s', sanitize_log_data(access_token), extra={'data': data}) return data
def post(self): try: putArea = simplejson.load(self.request.body_file) except (ValueError, TypeError, IndexError): self.response.set_status(400) return code = putArea["code"] logging.info("posting area[%s]" % code) area = Area.getByCode(putArea["code"]) (area, created) = area and (area, False) or (Area(), True) for field in area.properties().keys(): newVal = putArea.get(field, None) if newVal: if field == "alias": setattr(area, field, newVal.split()) else: setattr(area, field, newVal) area.put() if created: logging.info("area[%s] is created " % code) self.response.set_status(201) self.response.headers["Location"] = str("/areas/%s" % code) else: logging.info("area[%s] is modified" % code) self.response.set_status(204)
def adventure_index(request): res = urllib2.urlopen(APIConfig.ADVENTURE_API_URL) adventures = simplejson.load(res) return render_to_response('bfunweb/adventures_index.html',{'adventures': adventures}, context_instance=RequestContext(request))
def post(self): access_token = self.request.get("access_token") # Download the user profile and cache a local instance of the # basic profile info profile = json.load(urllib.urlopen( "https://graph.facebook.com/me?" + urllib.urlencode(dict(access_token=access_token)))) if profile.get("error"): self.error(500) else: user_id = profile["id"] user_key = db.Key.from_path('User', user_id) current_user = User.get(user_key) current_user.happiness = current_user.happiness + 1 if current_user.happiness > 20: current_user.happiness = 20 current_user.put() random_pet_sayings = ["Thanks for petting me!", "One day I'll be a big dragon!", "You're the best!", "Will I be in this phone forever?" ] self.response.out.write(json.dumps(petToDict(current_user, msg=random.choice(random_pet_sayings))))
def parse(self, json_file): data = json.load(open(json_file, "rb")) features = data["features"] for feature in features: properties = feature["properties"] # Parse code key dynamically # Key can be Code_IVM or Code_QS, or something else. short_name_key = [i for i in properties.keys() if i.lower().startswith("code")][0] # Parse and save the properties try: measure = models.Measure.objects.get(short_name=properties[short_name_key]) except models.Measure.DoesNotExist: # print 'Measure not found: %s' % properties[short_name_key] continue else: # print 'Found: %s' % properties[short_name_key] measure.name = properties["titel"] measure.measure_type = properties["type"] measure.traject = properties["traject"] # XXX: Be aware that the River Reach must be coupled with # the measure measure.km_from = int(round(properties["km_van"])) measure.km_to = int(round(properties["km_tot"])) measure.save()
def post(self): access_token = self.request.get("access_token") # Download the user profile and cache a local instance of the # basic profile info profile = json.load(urllib.urlopen( "https://graph.facebook.com/me?" + urllib.urlencode(dict(access_token=access_token)))) if profile.get("error"): self.error(500) else: user_id = profile["id"] user_key = db.Key.from_path('User', user_id) current_user = User.get(user_key) if current_user == None: user = User(key_name=str(profile["id"]), id=str(profile["id"]), name=profile["name"], access_token=access_token, profile_url=profile["link"], pet_name=profile["first_name"], pet_type="dragon" , bathroom=1, age=0, happiness=20, hunger=10, accessory="none", last_fed=datetime.datetime.now(), last_bathroom=datetime.datetime.now(), last_checkin=datetime.datetime.now()) user.put() else: current_user.access_token = access_token current_user.put() self.response.out.write(json.dumps(profile))
def parse(request, doc_id): """ Parse the 'resource_data' xml of a given resource (by id) NOTE only works on dc docuemnts atm """ is_service_avaliable() bag = {} url = "%sobtain?request_ID=%s&by_doc_ID=true" % (NODE_URL, doc_id) req = urllib2.Request(url) opener = urllib2.build_opener() data = opener.open(req) result = simplejson.load(data) data = result['documents'][0]['document'][0]['resource_data'] soup = BeautifulStoneSoup(data) parsed_data = {} fields = ['title','identifier','creator','publisher','date','description','rights'] for field in fields: try: parsed_data[field] = soup.find('dc:' + field).text except: pass parsed_data['subject'] = ''.join([s.text for s in soup.findAll('dc:subject')]) bag['parsed_data'] = parsed_data return shortcuts.render_to_response("parse.html", bag, context_instance=context.RequestContext(request))
def load(request): ###load twitter #### url = "http://api.twitter.com/1/trends/2450022.json" req = urllib2.Request(url) req.add_header('User-Agent', 'Safari 3.2') response = urllib2.urlopen(req) output_json = json.load(response) for x in output_json: for y in x['trends']: s = Trend.get_or_insert(y['name'], title=y['name'], source="twitter") ###laod Google #### gurl = "http://www.google.com/trends/hottrends/atom/hourly" data = urllib2.urlopen(gurl) tree = ET.parse(data) root = tree.getroot() for child in root: grandchildren = child.getchildren() for grandchild in grandchildren: if grandchild.text: soup = BeautifulSoup(grandchild.text) hottrends = [] for ht in soup('li'): gkeyword = ht.a.string s = Trend.get_or_insert(gkeyword, title=str(gkeyword), source="google") return HttpResponse("loaded twitter and google")
def obtain(request, query, bag): """ performs a tag obtain (get) based on id or URL. """ is_service_avaliable() if 'http' in query: # search by URL url = "%sobtain?request_ID=%s&by_resource_ID=true" % (NODE_URL, query) else: # search by ID url = "%sobtain?request_ID=%s&by_doc_ID=true" % (NODE_URL, query) req = urllib2.Request(url) opener = urllib2.build_opener() data = opener.open(req) try: results = simplejson.load(data) except: results = [] bag['obtain_results'] = results bag['obtain_query'] = query bag['url'] = url return bag
def handle(self, file='', *args, **options): help = 'Loads the features from the file, or the default if none is provided.' if file == '': if hasattr(settings, 'FEATURE_FLIPPER_FEATURES_FILE'): file = settings.FEATURE_FLIPPER_FEATURES_FILE else: print "settings.FEATURE_FLIPPER_FEATURES_FILE is not set." return verbosity = int(options.get('verbosity', 1)) stream = open(file) features = simplejson.load(stream) for json_feature in features: name = json_feature['name'] try: feature = Feature.objects.get(name=name) except Feature.DoesNotExist: feature = Feature() feature.name = name feature.description = json_feature['description'] # Django will convert to a boolean for us feature.enabled = json_feature['enabled'] feature.save() if verbosity > 0: print "Loaded %d features." % len(features)
def auth_complete(self, *args, **kwargs): """Completes loging process, must return user instance""" if not 'assertion' in self.data: raise AuthMissingParameter(self, 'assertion') data = urlencode({ 'assertion': self.data['assertion'], 'audience': self.request.get_host() }) try: response = simplejson.load(dsa_urlopen(BROWSER_ID_SERVER, data=data)) except ValueError: log('error', 'Could not load user data from BrowserID.', exc_info=True) else: if response.get('status') == 'failure': log('debug', 'Authentication failed.') raise AuthFailed(self) kwargs.update({ 'auth': self, 'response': response, self.AUTH_BACKEND.name: True }) return authenticate(*args, **kwargs)
def vk_api(method, data, is_app=False): """Calls VK OpenAPI method https://vk.com/apiclub, https://vk.com/pages.php?o=-1&p=%C2%FB%EF%EE%EB%ED%E5%ED%E8%E5%20%E7%E0%EF%F0%EE%F1%EE%E2%20%EA%20API """ # We need to perform server-side call if no access_token if not 'access_token' in data: if not 'v' in data: data['v'] = VK_API_VERSION if not 'api_id' in data: data['api_id'] = setting('VKAPP_APP_ID' if is_app else 'VK_APP_ID') data['method'] = method data['format'] = 'json' url = VK_SERVER_API_URL secret = setting('VKAPP_API_SECRET' if is_app else 'VK_API_SECRET') param_list = sorted(list(item + '=' + data[item] for item in data)) data['sig'] = md5(''.join(param_list) + secret).hexdigest() else: url = VK_API_URL + method params = urlencode(data) url += '?' + params try: return simplejson.load(dsa_urlopen(url)) except (TypeError, KeyError, IOError, ValueError, IndexError): log('error', 'Could not load data from vk.com', exc_info=True, extra=dict(data=data)) return None
def geocode(lat, lng, **geo_args): geo_args.update({ 'key': settings.GOOGLE_KEY, 'output': 'json', 'q': str(lat) + ',' + str(lng), 'sensor': False }) url = GEOCODE_BASE_URL + urllib.urlencode(geo_args) result = json.load(urllib.urlopen(url)) try: city = result['Placemark'][0]['AddressDetails']['Country'][ 'AdministrativeArea']['Locality']['LocalityName'] except: try: city = result['Placemark'][0]['AddressDetails']['Country'][ 'AdministrativeArea']['SubAdministrativeArea']['Locality'][ 'LocalityName'] except: city = '' # problems reading the city try: country = result['Placemark'][0]['AddressDetails']['Country'][ 'CountryName'] return (city, country) except: country = '' # problems reading the country return (city, country)
def refresh_data(self, save=False): request = urllib2.Request('%s?%s' % ( 'http://maps.googleapis.com/maps/api/geocode/json', urllib.urlencode({ 'sensor': 'false', 'address': self.address.encode('utf-8'), }), )) result = urllib2.urlopen(request, timeout=2) # 2 seconds self._data = simplejson.load(result) try: results = self._data['results'][0] # use first result location = results['geometry']['location'] self.formatted_address = results['formatted_address'] self.latitude = location['lat'] self.longitude = location['lng'] except IndexError: self.formatted_address = u'geocoding failed' self.latitude = None self.longitude = None if save: self.save()