def wrapped( *args, **kwargs ): web.header( 'Content-Type', 'application/json' ) r = f( *args, **kwargs ) try: return json.dumps( r ) except: return json.dumps( { 'error' : str( r ) } )
def GET(self, path): path = common.Mapper.d2s(safepath(path)) web.header("Content-Type", "application/vnd.oasis.opendocument.presentation") web.header("Content-disposition", "attachment; filename=\"%s\"" % path.rsplit("/", 1)[1]) return open(path)
def layout_processor(handler): """Processor to wrap the output in site template.""" out = handler() path = web.ctx.path[1:] if out is None: out = RawText("") if isinstance(out, basestring): out = web.template.TemplateResult(__body__=out) if 'title' not in out: out.title = path # overwrite the content_type of content_type is specified in the template if 'content_type' in out: web.ctx.headers = [h for h in web.ctx.headers if h[0].lower() != 'content-type'] web.header('Content-Type', out.content_type) if hasattr(out, 'rawtext'): html = out.rawtext else: html = view.render_site(config.site, out) # cleanup references to avoid memory leaks web.ctx.site._cache.clear() web.ctx.pop('site', None) web.ctx.env = {} context.clear() return html
def GET(self, start): if not start: start = 0 else: start = int(start) #TODO: add Image PDFs to this query solrUrl = 'http://se.us.archive.org:8983/solr/select?q=mediatype%3Atexts+AND+format%3A(LuraTech+PDF)&fl=identifier,title,creator,oai_updatedate,date,contributor,publisher,subject,language&sort=updatedate+desc&rows='+str(numRows)+'&start='+str(start*numRows)+'&wt=json' f = urllib.urlopen(solrUrl) contents = f.read() f.close() obj = json.loads(contents) numFound = int(obj['response']['numFound']) titleFragment = 'books sorted by update date' title = 'Internet Archive - %d to %d of %d %s.' % (start*numRows, min((start+1)*numRows, numFound), numFound, titleFragment) opds = createOpdsRoot(title, 'opds:new:%d' % (start), '/new/%d'%(start), getDateString()) urlFragment = '/new/' createNavLinks(opds, titleFragment, urlFragment, start, numFound) for item in obj['response']['docs']: description = None if 'description' in item: description = item['description'] createOpdsEntryBook(opds, item) #self.makePrevNextLinksDebug(opds, letter, start, numFound) web.header('Content-Type', pubInfo['mimetype']) return prettyPrintET(opds)
def POST(self): web.header("Content-Type", "text/html") i = web.input(myfile = {}) (parent, child) = (safepath(i["path"]), i["myfile"].filename) if not auth.can_write(parent): return json.dumps({"success": False, "msg": "Unauthorized"}) srcp = common.Mapper.d2s(parent) if not srcp.startswith(config["juno-home"] + "/"): return json.dumps({"success": False, "msg": "Unauthorized"}) if not os.path.isdir(srcp) or not child or child[0] == "." \ or "/" in child: return json.dumps({"success": False, "msg": "Unauthorized"}) srcp = os.path.join(srcp, child) if os.path.exists(srcp): return json.dumps({"success": False, "msg": "Unauthorized"}) f = open(srcp, "w") f.write(i["myfile"].value) f.close() if not odptools.odf.Odp.is_odp(srcp): os.unlink(srcp) return json.dumps({"success": False, "msg": "Unauthorized"}) if subprocess.call(("./index.py", "-p", srcp)): os.unlink(srcp) return json.dumps({"success": False, "msg": "Unauthorized"}) return json.dumps({"success": True})
def GET(self): web.header('Content-type', 'application/xhtml+xml') agent = URIRef(web.ctx.environ['HTTP_X_FOAF_AGENT']) # todo- look up available bot uris for this agent store = RdfStore(bot) entries = [] for created, creator, content in store.queryData(""" SELECT ?created ?creator ?content WHERE { [ a sioc:Post; dc:creator ?creator; dc:created ?created; sioc:content ?content ] } ORDER BY desc(?created)"""): entries.append((created, creator, content)) def prettyDate(iso): t = xml.utils.iso8601.parse(str(iso)) d = datetime.date.fromtimestamp(t) return d.strftime("%Y-%m-%d %a") return render.diaryview( bot=bot, agent=agent, entries=entries, prettyDate=prettyDate, loginBar=getLoginBar())
def GET(self, account, name): """ Return all rules of a given subscription id. HTTP Success: 200 OK HTTP Error: 401 Unauthorized 404 Not Found :param scope: The scope name. """ header('Content-Type', 'application/x-json-stream') state = None if ctx.query: params = parse_qs(ctx.query[1:]) if 'state' in params: state = params['state'][0] try: subscriptions = [subscription['id'] for subscription in list_subscriptions(name=name, account=account)] if len(subscriptions) > 0: if state == 'OK': state = RuleState.OK if state == 'Replicating': state = RuleState.REPLICATING if state == 'Stuck': state = RuleState.STUCK for rule in list_replication_rules({'subscription_id': subscriptions[0], 'state': state}): yield dumps(rule, cls=APIEncoder) + '\n' except RuleNotFound, e: raise generate_http_error(404, 'RuleNotFound', e.args[0][0])
def GET(self): i = web.input(key=None) changes = db.get_recent_changes(key=i.key, limit=50) site = web.ctx.home def diff(key, revision): b = db.get_version(key, revision) rev_a = revision -1 if rev_a is 0: a = web.ctx.site.new(key, {}) a.revision = 0 else: a = db.get_version(key, revision=rev_a) diff = render.diff(a, b) #@@ dirty hack to extract diff table from diff import re rx = re.compile(r"^.*(<table.*<\/table>).*$", re.S) return rx.sub(r'\1', str(diff)) web.header('Content-Type', 'application/rss+xml') for c in changes: c.diff = diff(c.key, c.revision) c.created = self._format_date(c.created) print render.feed(site, changes)
def POST(self): input = web.input() web.header('Content-Type', 'application/json') return json.dumps({ # Do trivial operations: 'txt' : input.mod.lower(), 'dat' : "%.3f" % float(input.num) })
def GET(self): web.header('Content-Type', 'text/plain') try: data = open('static/robots.txt').read() raise web.HTTPError("200 OK", {}, data) except IOError: raise web.notfound()
def GET(self): i = web.input() buildString = '' selectedBuilds = db.select('builds', where='task_id="' + str(i.task_id) + '"') if selectedBuilds: for x in selectedBuilds: buildString = json.JSONEncoder().encode({ 'task_id': i.task_id, 'repos': x.repos, 'branch': x.branch, 'version': x.version, 'author': x.author, 'latin': x.latin, 'demo_data': x.demo_data, 'styleguide_repo': x.styleguide_repo, 'styleguide_branch': x.styleguide_branch, 'sidecar_repo': x.sidecar_repo, 'sidecar_branch': x.sidecar_branch, 'package_list': x.package_list, 'upgrade_package': x.upgrade_package, 'expired_tag': x.expired_tag }) web.header('Content-type', 'application/json') return buildString
def GET(self, req_path): inputs = web.input() FIRST_PAGE = 0 offset = int(inputs.get("offset", FIRST_PAGE)) page_limit = config_agent.config.getint("pagination", "page_limit") limit = int(inputs.get("limit", page_limit)) if req_path == "~recent": return page.wp_get_recent_changes_from_cache( config_agent=config_agent, tpl_render=tpl_render, req_path=req_path, limit=limit, offset=offset ) elif req_path == "~all": return page.wp_get_all_pages( config_agent=config_agent, tpl_render=tpl_render, req_path=req_path, limit=limit, offset=offset ) elif req_path == "~settings": return page.wp_view_settings(config_agent=config_agent, tpl_render=tpl_render, req_path=req_path) elif req_path == "~stat": return page.wp_stat(config_agent=config_agent, tpl_render=tpl_render, req_path=req_path) elif req_path == "~new": return page.wp_new(config_agent=config_agent, tpl_render=tpl_render, req_path=req_path) elif req_path == "~atom": buf = atom_output.generate_feed(config_agent=config_agent, req_path=req_path, tpl_render=tpl_render) web.header("Content-Type", "text/xml; charset=utf-8") return buf else: return web.BadRequest()
def GET(self): folder_pages_full_path = config_agent.get_full_path("paths", "pages_path") path = os.path.join(folder_pages_full_path, "robots.txt") content = commons.shutils.cat(path) web.header("Content-Type", "text/plain") return content
def GET(self, sitename, offset): i = web.input(timestamp=None, limit=1000) if not config.writelog: raise web.notfound("") else: log = self.get_log(offset, i) limit = min(1000, common.safeint(i.limit, 1000)) try: # read the first line line = log.readline(do_update=False) # first line can be incomplete if the offset is wrong. Assert valid. self.assert_valid_json(line) web.header('Content-Type', 'application/json') yield '{"data": [\n' yield line.strip() for i in range(1, limit): line = log.readline(do_update=False) if line: yield ",\n" + line.strip() else: break yield '], \n' yield '"offset": ' + simplejson.dumps(log.tell()) + "\n}\n" except Exception, e: print 'ERROR:', str(e)
def auth_user(global_config, desired_path='/home'): auth = web.ctx.env.get('HTTP_AUTHORIZATION') authreq = False if auth is None: authreq = True else: auth = re.sub('^Basic ','',auth) username,password = base64.decodestring(auth).split(':') if logged_out_users.has_key(username): del logged_out_users[username] else: session = DbSession.open_db_session(global_config['users_db_name'] + global_config['this_season']) user = UsersDataModel.getUser(session, username) session.remove() if user: if user.state == 'Disabled': raise web.seeother('/accountdisabled') #if (username,password) in allowed: if user.check_password(password) == True: raise web.seeother(desired_path) authreq = True if authreq: web.header('WWW-Authenticate','Basic realm="FRC1073 ScoutingAppCentral"') web.ctx.status = '401 Unauthorized' return
def SaveToInstapaper(self, user, action, orgUrl): web.header('Content-type', "text/html; charset=utf-8") T_INFO = u"""<html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"/> <title>%s</title></head><body><p style="text-align:center;font-size:1.5em;">%s</p></body></html>""" if not user.instapaper_username or not user.instapaper_password: info = T_INFO % ('No authorize info', 'Instapaper username and password have to provided fistly!<br/>Please fill them in your KindleEar application.') return info.encode('utf-8') title = web.input().get('t', '') name = web.input().get("n", '') if user.instapaper_username != name: info = T_INFO % ('Action rejected', 'Username not match!<br/>KindleEar refuse to execute your command.') return info.encode('utf-8') opener = URLOpener() password = ke_decrypt(user.instapaper_password, user.secret_key or '') apiParameters = {'username': user.instapaper_username, 'password':password, 'title':title.encode('utf-8'), 'selection':'KindleEar', 'url':orgUrl} ret = opener.open(INSTAPAPER_API_ADD_URL, data=apiParameters) if ret.status_code in (200, 201): info = _("'%s'<br/><br/>Saved to your Instapaper account.") % title info += '<br/><p style="text-align:right;color:red;">by KindleEar </p>' info = T_INFO % ('Saved to Instapaper', info) elif ret.status_code == 403: info = _("Failed save to Instapaper<br/>'%s'<br/><br/>Reason : Invalid username or password.") % title info += '<br/><p style="text-align:right;color:red;">by KindleEar </p>' info = T_INFO % ('Failed to save', info) else: info = _("Failed save to Instapaper<br/>'%s'<br/><br/>Reason : Unknown(%d).") % (title, ret.status_code) info += '<br/><p style="text-align:right;color:red;">by KindleEar </p>' info = T_INFO % ('Failed to save', info) return info.encode('utf-8')
def POST(self): data = web.data() query_data=json.loads(data) start_time=query_data["start_time"] end_time=query_data["end_time"] parameter=query_data["parameter"] query="SELECT "+parameter+",timestamp FROM jplug_data WHERE timestamp BETWEEN "+str(start_time)+" AND "+str(end_time) retrieved_data=list(db.query(query)) LEN=len(retrieved_data) x=[0]*LEN y=[0]*LEN X=[None]*LEN for i in range(0,LEN): x[i]=retrieved_data[i]["timestamp"] y[i]=retrieved_data[i][parameter] X[i]=datetime.datetime.fromtimestamp(x[i],pytz.timezone(TIMEZONE)) #print retrieved_data["timestamp"] with lock: figure = plt.gcf() # get current figure plt.axes().relim() plt.title(parameter+" vs Time") plt.xlabel("Time") plt.ylabel(units[parameter]) plt.axes().autoscale_view(True,True,True) figure.autofmt_xdate() plt.plot(X,y) filename=randomword(12)+".jpg" plt.savefig("/home/muc/Desktop/Deployment/jplug_view_data/static/images/"+filename, bbox_inches=0,dpi=100) plt.close() web.header('Content-Type', 'application/json') return json.dumps({"filename":filename})
def POST(self): creds = json.loads(web.data()) username = None password = None passwd_creds = creds.get('passwordCredentials', None) if passwd_creds != None: username = creds.get('passwordCredentials').get('username', None) password = creds.get('passwordCredentials').get('password', None) tenant_id = creds.get('tenantID', None) tenant_name = creds.get('tenantName', None) token_id = creds.get('tokenID', None) keystone = identity.auth(usr=username, passwd=password, token_id=token_id, tenant_id=tenant_id, tenant_name=tenant_name, url=_endpoint, api="NATIVE") token = identity.get_token(keystone, usr=username, passwd=password, api="NATIVE", tenant_id=tenant_id, tenant_name=tenant_name, token_id=token_id) if token == -1: return web.webUnauthorized() auth_token = {} auth_token['token'] = token.id body = json.dumps(auth_token) web.header("Content-Type", "application/json") return body
def SaveToPocket(self, user, action, orgUrl): INSTAPAPER_API_ADD_URL = 'https://www.instapaper.com/api/add' web.header('Content-type', "text/html; charset=utf-8") T_INFO = u"""<html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"/> <title>%s</title></head><body><p style="text-align:center;font-size:1.5em;">%s</p></body></html>""" if not user.pocket_access_token: info = T_INFO % ('Pocket unauthorized', 'Unauthorized Pocket!<br/>Please authorize your KindleEar application firstly.') return info.encode('utf-8') title = web.input().get('t', '') tkHash = web.input().get("h", '') if hashlib.md5(user.pocket_acc_token_hash).hexdigest() != tkHash: info = T_INFO % ('Action rejected', 'Hash not match!<br/>KindleEar refuse to execute your command.') return info.encode('utf-8') pocket = Pocket(POCKET_CONSUMER_KEY) pocket.set_access_token(user.pocket_access_token) try: item = pocket.add(url=orgUrl, title=title, tags='KindleEar') except Exception as e: info = T_INFO % ('Failed to save', _('Failed save to Pocket.<br/>') + str(e)) else: info = _("'%s'<br/><br/>Saved to your Pocket account.") % title info += u'''<br/><p style="text-align:right;color:red;">by KindleEar </p> <br/><hr/><p style="color:silver;">''' info += _('See details below:<br/><br/>%s') % repr(item) info = T_INFO % ('Saved to pocket', info) return info.encode('utf-8')
def GET(self): # note view_wpad does not return a string data = str(view_wpad()) web.header('Content-Type', 'application/x-ns-proxy-autoconfig') web.header('Content-Length',len(data)) return data
def GET(self): web.header('Access-Control-Allow-Origin', self.allow_origin) data = web.input() machine_array = [] try: if 'fast' in data and data['fast'] == 'True': # !! TODO parse out the config.json file for the label # !! TODO use current users home directory instead of /root if os.path.isdir('/root/.docker/machine/machines'): out = subprocess.check_output("ls -1 /root/.docker/machine/machines", shell=True) out = str(out) out = out.split("\n") for machine in out[:-1]: machine_array.append(machine) else: out = "" else: out = subprocess.check_output("/usr/local/bin/docker-machine ls --filter label=vcontrol_managed=yes", shell=True) out = str(out) out = out.split("\n") for machine in out[1:-1]: i = machine.split(" ") machine_array.append(i[0]) except: print(sys.exc_info()) return str(machine_array)
def GET(self): s = "" sdb = sqldb() rec = sdb.cu.execute("""select * from msgs""") dbre = sdb.cu.fetchall() web.header("Content-Type", "text/html; charset=utf-8") return render.index(dbre)
def GET(self): json_par = 0 points = 0 i = web.input(json_par=None, points=None) #print(i.lon1) #print(i.lat2) """ Here comes a map page request (without a route) """ if (i.json_par is None and i.points is None): web.header('Content-Type', 'text/html') return render.map() if not (i.points is None): data = build_list2(libc.get_access_size()) return json.dumps({ "array": data }) """ Processing the route right here """ #myPoints = {'arr': } #web.header('Content-Type', 'application/json') #data = [30, 60, 31, 61, 32, 59] ll = json.loads(i.json_par) #print ll["lon1"] #print ll["lat1"] #print ll["lon2"] #print ll["lat2"] libc.mysearch(ll["lon1"], ll["lat1"], ll["lon2"], ll["lat2"]) data = build_list(libc.get_size()) libc.free_mem() return json.dumps({ "array": data })
def GET(self): web.header('Content-Type', mime['html']) return ('<ul>' '<li><a href="/lastfm.xml">last.fm example</a></li>' '<li><a href="/group.xml">tributaries-project "group" xml</a></li>' '<li><a href="/group.json">tributaries-project "group" json</a></li>' '</ul>')
def GET(self): #json objc w/ automatic and threshold web.header('Access-Control-Allow-Origin', '*') web.header('Access-Control-Allow-Credentials', 'true') global automatic global threshold return json_p({'automatic':automatic, 'threshold':threshold, 'success':True})
def POST(self): # disable nginx buffering web.header('X-Accel-Buffering', 'no') i = web.input(fast=False) #get app config if not exist will create it servers = get_servers(i.app_name) if not servers: servers = ['deploy'] save_app_option(i.app_name, 'deploy_servers', 'deploy') yield "%d:%s" % (logging.INFO, render_ok("Application allowed to deploy those servers")) yield "%d:%s" % (logging.INFO, render_ok(','.join(servers))) servers = escape_servers(servers) result = {} data = {'app_name': i.app_name, 'app_url': i.app_url} for server in servers: url = SUFFIX % server try: opener = FancyURLopener() f = opener.open(url, urlencode(data)) line = '' # to avoid NameError for line if f has no output at all. for line in iter(f.readline, ''): logger.info(line) yield line if not any(word in line for word in ['succeeded', 'failed']): result[server] = 'Failed' else: result[server] = 'Succeeded' except Exception, e: yield "%d:%s" % (logging.ERROR, render_err(str(e))) result[server] = 'Failed'
def POST(self, blog_id=None): """ Handle creating and editing blog posts """ inp = web.input() logging.debug(inp) logging.debug(blog_id) if blog_id: blogpost = BlogPost.get(BlogPost.id == blog_id) else: blogpost = BlogPost() blogpost.title = inp.blog_title blogpost.content = inp.blog_content blogpost.slug = slugify(blogpost.title) if 'blog_online' in inp: blogpost.online = 1 else: blogpost.online = 0 blogpost.save() web.header("Content_Type", "application/json; charset=utf=8") return json.dumps( {"blog_id": blogpost.id, "blog_slug": blogpost.slug}, sort_keys=True, indent=4, separators=(",", ": "))
def GET(self): #theme可选项:light|dark theme=web.input().get("theme","light") #output可选项:html|json output=web.input().get("output","html") #类型的可选项:model|automate t=web.input().get("type","model") import os l=os.listdir(cwd('static','files')) ext=".json" if t=="automate": ext=".txt" l= filter(lambda x:x.endswith(ext),l) import base64,urllib2 #base64解码 l= map(lambda x: to_unicode(base64.b64decode(x[:-len(ext)])),l) #为了能在html和json中显示 l= map(lambda x: (x,urllib2.quote(x.encode("utf-8"))),l) if output=='html': static=cwd("static") render=web.template.render(cwd('templates'),globals=locals()) return render.list() elif output=='json': import json web.header('Content-Type', 'application/json') return json.dumps(l)
def GET(self, scope, name): """ get locks for a given scope, name. HTTP Success: 200 OK HTTP Error: 404 Not Found 500 InternalError :returns: JSON dict containing informations about the requested user. """ header('Content-Type', 'application/x-json-stream') did_type = None if ctx.query: params = parse_qs(ctx.query[1:]) if 'did_type' in params: did_type = params['did_type'][0] try: if did_type == 'dataset': for lock in get_dataset_locks(scope, name): yield render_json(**lock) + '\n' else: raise InternalError('Wrong did_type specified') except RucioException, e: raise generate_http_error(500, e.__class__.__name__, e.args[0])
def POST(self): web.header('Content-Type', 'text/html') f = login_form() if not f.validates(): raise web.seeother('/') authOptions = [am for am in web.config.auth.methods if am.can_handle_user(f.d.username)] if len(authOptions) == 0: raise web.internalerror("No appropriate login method available") for ao in authOptions: try: success, res = ao.login(f.d.username, f.d.password, web.config) if success == True: web.config.session.loggedin = True web.config.session.userid = res['userid'] web.config.session.userfullname = res['userfullname'] web.config.session.userrights = res['rights'] raise web.seeother("/") except RequireRegistrationException, info: web.config.session.showIdentifierRegistration = True web.config.session.userid = info.username web.config.session.userfullname = info.userfullname web.config.session.userrights = "none" raise web.seeother('/register')
def handle_content_encoding(self, ext): """Content-Encoding设置,这里应该是二进制编码格式 """ if ext in self.encodings: web.header("Content-Encoding", self.encodings[ext])
def POST(self): web.header("Access-Control-Allow-Origin","*") class_info = Class_model.getByArgs() response = util.Response(Status. __success__,body = class_info) return util.objtojson(response)
def POST(self): web.header("Access-Control-Allow-Origin", "*") x = web.input(myfile = {})#获得文件流 must_params = ('cl_id',) file_range = ['xls','xlsx',] if util.paramsok(must_params,x) == Status.__params_not_ok__: return util.objtojson({"error":"参数错误"}) # excel_save_site = "../examTransplant1.7/source/excel" excel_save_site = student_source current_site = os.getcwd() print(current_site) current_site =current_site.replace('\\','/') real_site =current_site.split('/') save_site = excel_save_site.split('/') flag = 1 i = -1 # print(real_site) # print(save_site) while i>-4: if real_site[i] != save_site[i]: flag = 0 break # print(i) i = i -1 print(flag) if flag == 0: try: os.chdir(excel_save_site) except: print('os.chdir error') os.chdir(current_site) return 0 if 'myfile' in x: filepath=x.myfile.filename.replace('\\','/') # filename = unicode(filepath.split('/')[-1],'utf-8') filename = filepath.split('/')[-1] file_suffix = filename.split('.')[-1] file_truth = 0 for i in range(len(file_range)): if file_suffix == file_range[i]: file_truth = 1 if file_truth == 1: print(filename) # origin_site = os.getcwdu() # 获取当前工作目录 origin_site = os.getcwd() # 获取当前工作目录 try: print(origin_site +'/'+"%s"% filename) with open(origin_site +'/'+"%s"% filename,'wb+') as fout:# creates the file where the uploaded file should be stored print('opened') fout.write(x.myfile.file.read()) # writes the uploaded file to the newly created file. print('writed') # fout.close() # closes the file, upload complete. # print('closed') file_site = origin_site +'/'+filename except IOError as err : os.chdir(current_site) return util.objtojson({"error":"文件上传失败!"}) try: data = xlrd.open_workbook(file_site) table = data.sheets()[0] nrows = table.nrows params = dict.fromkeys(Student_model.__attr__) for i in range(5,nrows-1):#行数 if i>0: params['st_id']=int(table.row_values(i)[1]) params['st_name'] = table.row_values(i)[2] params['st_sex'] = table.row_values(i)[3] params['st_specialty'] = table.row_values(i)[4] params['st_phone'] = table.row_values(i)[5] params['st_picture'] = table.row_values(i)[6] params = Student_model(**params) try: params.insert() db.insert("student_has_class",student_st_id = params['st_id'],class_cl_id = x.cl_id) except Exception as e: db.insert("student_has_class",student_st_id = params['st_id'],class_cl_id = x.cl_id) os.chdir(current_site) return 1 except: r = {"success":0,"error":"导入数据库失败"} os.chdir(current_site) return util.objtojson(r) else: os.chdir(current_site) return 0 else: os.chdir(current_site) return 0
def OPTIONS(self): web.header("Access-Control-Allow-Origin", "*") web.header("Access-Control-Allow-Methods", "POST, PUT, OPTIONS") return True
def POST(self): web.header('Content-Type', 'application/json') param = web.input(page_id_or_code='') #print param if param['page_id_or_code'] == '': return json.dumps({'ret': -4, 'msg': '无效的page_id_or_code'}) if len(param['page_id_or_code']) < 20: # _id长度为24 r1 = db.pages.find_one( { 'page_code': param['page_id_or_code'].upper(), #'page_type' : 0, 'available': 1 }, {'history': 0}) if r1 is None: r1 = db.pages.find_one( { 'page_code': param['page_id_or_code'].upper() + '#1', # 加 '#1' 再试一次 #'page_type' : 0, 'available': 1 }, {'history': 0}) if r1 is None: return json.dumps({'ret': -7, 'msg': '页面不可用!'}) else: real_page_id = param[ 'page_id_or_code'] #app_helper.realid(param['page_id_or_code']) # 恢复原始id if real_page_id is None: return json.dumps({'ret': -4, 'msg': '无效的page_id'}) r1 = db.pages.find_one({'_id': ObjectId(real_page_id)}, {'history': 0}) if r1 is None: return json.dumps({'ret': -7, 'msg': '页面不可用!'}) r1['_id'] = str(r1['_id']) if not r1.has_key('start_node'): return json.dumps({'ret': -5, 'msg': '页面类型错误!'}) r2 = db.nodes.find_one({'_id': ObjectId(r1['start_node'])}) if r2 is None: return json.dumps({'ret': -6, 'msg': '页面数据错误!'}) if len(r2.get('child', [])) > 0: r1['type'] = 'map' # 显示规则树 else: r1['type'] = 'text' # 显示纯文本 app_helper.log_app_api('external', 'document', param) # 返回 return json.dumps({ 'ret': 0, 'data': { 'gid': str(r1['_id']), #app_helper.randomid(r1['_id']), 'type': r1['type'], 'title': r1['page_name'], 'page_code': r1['page_code'], 'start_node': str(r1['start_node']), #app_helper.randomid(r1['start_node']), 'text': r1['rich_text'], } })
def GET(self, url_string): web.header('Access-Control-Allow-Origin', '*') web.header('Cache-control', 'no-cache') return json.dumps(GameResponse(GameManager.current_game.prepare(), 0).prepare())
def send(self): web.header('Allow', ",".join(self.methods)) return APIError.send(self)
def GET(self): params = web.input(page=1, ed="", d_id="", caller="web") edit_val = params.ed session = get_session() districts = db.query( "SELECT id, name FROM locations WHERE type_id = " "(SELECT id FROM locationtype WHERE name = 'district') ORDER by name" ) allow_edit = False try: edit_val = int(params.ed) allow_edit = True except ValueError: pass if params.ed and allow_edit: res = db.query( "SELECT id, name, subcounty, get_location_name(subcounty) subcounty_name , " " get_location_name(district_id) district FROM distribution_points " " WHERE id = $id", {'id': edit_val}) if res: r = res[0] district = r.district subcounty = r.subcounty subcounty_name = r.subcounty_name name = r.name villages = db.query( "SELECT id, name FROM locations WHERE id IN " "(SELECT village_id FROM distribution_point_villages " " WHERE distribution_point = $id)", {'id': params.ed}) allow_del = False try: del_val = int(params.d_id) allow_del = True except ValueError: pass if params.d_id and allow_del: if session.role in ('Micro Planning', 'Administrator'): db.query( "DELETE FROM distribution_point_villages WHERE distribution_point=$id", {'id': params.d_id}) db.query("DELETE FROM distribution_points WHERE id=$id", {'id': params.d_id}) if params.caller == "api": # return json if API call web.header("Content-Type", "application/json; charset=utf-8") return json.dumps({'message': "success"}) if session.role == 'Administrator': dpoints_SQL = ( "SELECT id, name, get_location_name(subcounty) as subcounty, " " get_location_name(district_id) as district, " " get_distribution_point_locations(id) villages FROM distribution_points " " ORDER by id DESC") else: dpoints_SQL = ( "SELECT id, name, get_location_name(subcounty) as subcounty, " " get_location_name(district_id) as district, " " get_distribution_point_locations(id) villages FROM distribution_points" " WHERE created_by = $user ORDER BY id DESC") dpoints = db.query(dpoints_SQL, {'user': session.sesid}) l = locals() del l['self'] return render.dpoints(**l)
def msgpackData(data): web.header('Content-Type', 'application/msgpack') return msgpack.packb(data)
def GET(self): try: s = settings() s.ReadSettings() if s.instagramRSSSecrets and not verifySign(s.instagramRSSSecrets): web.HTTPError('401 Unauthorized') return '' db = InstaDatabase() i = InstaAPI(db, s.instagramUsername, s.instagramPassword) user = web.input().get("u") if user is None or user == '': user = web.input().get("user") typ = web.input().get("t") if typ is None or typ not in ['rss', 'json', 'atom']: typ = web.input().get("type") if typ is None or typ not in ['rss', 'json', 'atom']: typ = 'rss' contain_id = web.input().get("cid") if contain_id is None: contain_id = web.input().get("contain_id") contain_id = contain_id is not None tagged = web.input().get("tagged") tagged = True if tagged is not None else False fetch_post = web.input().get("fetch_post") fetch_post = True if fetch_post is not None else False proxy = web.input().get("proxy") proxy = True if proxy is not None else False cacheTime = s.instagramCacheTime if user is not None: idd = f"user/{user}/init" r = db.get_cache(idd, cacheTime) new_cache = False if r is None: i._get_init_csrftoken() r = i.get_user_info(user) c = db.save_cache(idd, r) new_cache = True else: c = r[1] r = r[0] if tagged: idd2 = f"user/{user}/tagged" r2 = None new_cache = False r2 = db.get_cache(idd2, cacheTime) if r2 is not None: c = r2[1] r2 = r2[0] if r2 is None: r2 = i.get_user_tagged(r['id']) c = db.save_cache(idd2, r2) new_cache = True sendCacheInfo(cacheTime * 60, c) if fetch_post: edges = r2['edge_user_to_photos_of_you']['edges'] for e in edges: if e['node']['__typename'] == 'GraphSidecar': shortCode = e['node']['shortcode'] idd4 = f"post/{shortCode}" r4 = db.get_cache(idd4, cacheTime) if r4 is not None: r4 = r4[0] if r4 is None: r4 = i.get_post(shortCode) db.save_cache(idd4, r4) e['node'] = r4 if typ == "json": web.header("Content-Type", "application/json; charset=UTF-8") return dumps(r2, ensure_ascii=False, separators=jsonsep) elif typ == "rss": r3 = None if s.isntagramCacheRSS and not new_cache: d = { "contain_id": str(contain_id), "proxy": str(proxy) } idd3 = f"user/{user}/tagged/rss?" + urlencode(d) r3 = db.get_cache(idd3, cacheTime) if r3 is not None: r3 = r3[0] if r3 is None: from RSSGenerator import RSSGen, RSS2_TYPE from instaHTMLGen import genItemList g = RSSGen(RSS2_TYPE) if not contain_id: ti = f"Instagram Tagged {r['full_name']}(@{r['username']})" # noqa: E501 else: ti = f"Instagram Tagged {r['full_name']}(@{r['username']}, {r['id']})" # noqa: E501 g.meta.title = ti url = f"https://www.instagram.com/{r['username']}/" if 'external_url' in r: te = r['external_url'] if te is not None and isinstance( te, str) and len(te): # noqa: E501 url = te g.meta.link = url g.meta.description = r['biography'] image = r['profile_pic_url_hd'] if proxy: from instaRSSP import genUrl image = genUrl(image, s.RSSProxySerects) g.meta.image = image g.meta.lastBuildDate = c / 1E9 g.meta.ttl = cacheTime g.list = genItemList(r2, RSS2_TYPE, proxy=proxy) r3 = g.generate() if s.isntagramCacheRSS: db.save_cache(idd3, r3) web.header("Content-Type", "application/xml; charset=UTF-8") return r3 return sendCacheInfo(cacheTime * 60, c) if typ == 'json': web.header("Content-Type", "application/json; charset=UTF-8") return dumps(r, ensure_ascii=False, separators=jsonsep) elif typ == "rss": r2 = None if s.isntagramCacheRSS and not new_cache: d = { "contain_id": str(contain_id), "proxy": str(proxy) } idd2 = f"user/{user}/rss?" + urlencode(d) r2 = db.get_cache(idd2, cacheTime) if r2 is not None: r2 = r2[0] if r2 is None: from RSSGenerator import RSSGen, RSS2_TYPE from instaHTMLGen import genItemList g = RSSGen(RSS2_TYPE) if not contain_id: ti = f"Instagram {r['full_name']}(@{r['username']})" # noqa: E501 else: ti = f"Instagram {r['full_name']}(@{r['username']}, {r['id']})" # noqa: E501 g.meta.title = ti url = f"https://www.instagram.com/{r['username']}/" if 'external_url' in r: te = r['external_url'] if te is not None and isinstance( te, str) and len(te): # noqa: E501 url = te g.meta.link = url g.meta.description = r['biography'] image = r['profile_pic_url_hd'] if proxy: from instaRSSP import genUrl image = genUrl(image, s.RSSProxySerects) g.meta.image = image g.meta.lastBuildDate = c / 1E9 g.meta.ttl = cacheTime g.list = genItemList(r, RSS2_TYPE, proxy=proxy) r2 = g.generate() if s.isntagramCacheRSS: db.save_cache(idd2, r2) web.header("Content-Type", "application/xml; charset=UTF-8") return r2 except NeedVerifyError as e: z = [('gourl', web.ctx.path), ('nc', e.sign)] web.HTTPError('302 Found') web.header("Location", "/instaVerify?" + urlencode(z)) return '' except: web.HTTPError('500 Internal Server Error') try: s = settings() s.ReadSettings() if s.debug: return format_exc() except: pass return ''
def POST(self): """ POST request """ web.header('Content-Type', 'application/json') post_input = web.data() try: decoded_input = json.loads(post_input) except: return json.dumps({ "correct": None, "score": 0, "msg": "<p>Internal grader error: cannot decode POST</p>" }) if "xqueue_body" not in decoded_input: return json.dumps({ "correct": None, "score": 0, "msg": "<p>Internal grader error: no xqueue_body in POST</p>" }) try: edx_input = json.loads(decoded_input["xqueue_body"]) taskid = json.loads(edx_input["grader_payload"])["tid"] except: return json.dumps({ "correct": None, "score": 0, "msg": "<p>Internal grader error: cannot decode JSON</p>" }) try: task = course.get_task(taskid) except: return json.dumps({ "correct": None, "score": 0, "msg": "<p>Internal grader error: unknown task {}</p>".format( taskid) }) if not task.input_is_consistent( edx_input, self.default_allowed_file_extensions, self.default_max_file_size): return json.dumps({ "correct": None, "score": 0, "msg": "<p>Internal grader error: input not consistent with task</p>" }) try: job_return = job_manager_sync.new_job(task, edx_input, "Plugin - EDX") except: return json.dumps({ "correct": None, "score": 0, "msg": "<p>Internal grader error: error while grading submission</p>" }) try: text = "" if "text" in job_return: text = job_return["text"] if "problems" in job_return: for prob in job_return["problems"]: text += "<br/><h4>" + job_return["task"].get_problems( )[prob].get_name( ) + "</h4>" + job_return["problems"][prob] score = (1 if job_return["result"] == "success" else 0) if "score" in job_return: score = job_return["score"] import tidylib out, dummy = tidylib.tidy_fragment(text, options={ 'output-xhtml': 1, 'enclose-block-text': 1, 'enclose-text': 1 }) return json.dumps({ "correct": (True if (job_return["result"] == "success") else None), "score": score, "msg": out }) except: return json.dumps({ "correct": None, "score": 0, "msg": "<p>Internal grader error: error converting submission result</p>" })
def POST(self): web.header('Content-Type', 'application/json') if not can_write(): raise web.HTTPError('403 Forbidden') i = web.input() require_marc = not (i.get('require_marc') == 'false') bulk_marc = i.get('bulk_marc') == 'true' if 'identifier' not in i: return self.error('bad-input', 'identifier not provided') identifier = i.identifier # First check whether this is a non-book, bulk-marc item if bulk_marc: # Get binary MARC by identifier = ocaid/filename:offset:length re_bulk_identifier = re.compile(r"([^/]*)/([^:]*):(\d*):(\d*)") try: ocaid, filename, offset, length = re_bulk_identifier.match( identifier).groups() data, next_offset, next_length = get_from_archive_bulk( identifier) next_data = { 'next_record_offset': next_offset, 'next_record_length': next_length } rec = MarcBinary(data) edition = read_edition(rec) except MarcException as e: details = "%s: %s" % (identifier, str(e)) logger.error("failed to read from bulk MARC record %s", details) return self.error('invalid-marc-record', details, **next_data) actual_length = int(rec.leader()[:MARC_LENGTH_POS]) edition['source_records'] = 'marc:%s/%s:%s:%d' % ( ocaid, filename, offset, actual_length) local_id = i.get('local_id') if local_id: local_id_type = web.ctx.site.get('/local_ids/' + local_id) prefix = local_id_type.urn_prefix id_field, id_subfield = local_id_type.id_location.split('$') def get_subfield(field, id_subfield): if isinstance(field, str): return field subfields = field[1].get_subfield_values(id_subfield) return subfields[0] if subfields else None _ids = [ get_subfield(f, id_subfield) for f in rec.read_fields([id_field]) if f and get_subfield(f, id_subfield) ] edition['local_id'] = [ 'urn:%s:%s' % (prefix, _id) for _id in _ids ] # Don't add the book if the MARC record is a non-book item self.reject_non_book_marc(rec, **next_data) result = add_book.load(edition) # Add next_data to the response as location of next record: result.update(next_data) return json.dumps(result) # Case 1 - Is this a valid Archive.org item? metadata = ia.get_metadata(identifier) if not metadata: return self.error('invalid-ia-identifier', '%s not found' % identifier) # Case 2 - Does the item have an openlibrary field specified? # The scan operators search OL before loading the book and add the # OL key if a match is found. We can trust them and attach the item # to that edition. if metadata.get('mediatype') == 'texts' and metadata.get( 'openlibrary'): edition_data = self.get_ia_record(metadata) edition_data['openlibrary'] = metadata['openlibrary'] edition_data = self.populate_edition_data(edition_data, identifier) return self.load_book(edition_data) # Case 3 - Can the item be loaded into Open Library? status = ia.get_item_status(identifier, metadata) if status != 'ok': return self.error(status, 'Prohibited Item %s' % identifier) # Case 4 - Does this item have a marc record? marc_record = get_marc_record_from_ia(identifier) if marc_record: self.reject_non_book_marc(marc_record) try: edition_data = read_edition(marc_record) except MarcException as e: logger.error('failed to read from MARC record %s: %s', identifier, str(e)) return self.error('invalid-marc-record') elif require_marc: return self.error('no-marc-record') else: try: edition_data = self.get_ia_record(metadata) except KeyError: return self.error("invalid-ia-metadata") # Add IA specific fields: ocaid, source_records, and cover edition_data = self.populate_edition_data(edition_data, identifier) return self.load_book(edition_data)
def GET(self, machine): web.header('Access-Control-Allow-Origin', self.allow_origin) # TODO return "not implemented yet"
def jsonData(data): web.header('Content-Type', 'application/json') return json.dumps(data)
def GET(self): web.header("Access-Control-Allow-Origin", "*") c = random.randint(60, 100) print c return c
def GET(self): web.header('Content-Type', 'text/plain') return 'Import API only supports POST requests.'
def GET(self): web.header("Content-Type", "text/html; charset=utf-8") return """<html><head><title>RHadoop短文本分类平台</title></head><body>
def POST(self, sub): web.header('Content-Type', 'application/json') return json.dumps(tables)
def POST(self): cached = utils.mcache.get("codes") print("这里确认执行了多少次!!") web.header('Content-Type', 'application/json') return json.dumps(cached)
def handler(self, path): action, subaction = path.split('/') web.header('Content-type', 'application/json') args = web.input() print "Calling {0}/{1} with args {2}".format(action, subaction, args) return self.methods[action][subaction](self, args)
def json_app_resp(cls): """Define the response format for web user""" web.header('Access-Control-Allow-Origin', WebHttpThread._allow_origin) web.header('Access-Control-Allow-Methods', WebHttpThread._allow_methods) web.header('Content-Type', 'application/json; charset=utf-8')
def global_headers(handler): web.header('Content-Type', 'application/json') return handler()
def POST(self): if self._method is None: self._method = 'POST' web.header("Content-Type", "application/json; charset=utf-8") try: callback = None if self._method == 'GET': callback = web.input().get("callback") if callback is not None and callback != '': web.header("Content-Type", "application/javascript; charset=utf-8") else: callback = None cors = web.input().get("cors") if cors is not None: web.header('Access-Control-Allow-Origin', '*') s = settings() s.ReadSettings() sg = s.proxyAPISecrets if sg is None: d = dumps({"code": -500, "msg": "proxyAPISecrets must be set in settings."}, ensure_ascii=False, separators=jsonsep) return d if callback is None else f'{callback}({d})' if not verifySign(sg): d = dumps({"code": -401, "msg": "Unauthorized"}, ensure_ascii=False, separators=jsonsep) return d if callback is None else f'{callback}({d})' t = web.input().get("t") if t is None: d = dumps({"code": -1, "msg": "current time(t) is needed."}, ensure_ascii=False, separators=jsonsep) return d if callback is None else f'{callback}({d})' try: t = int(t) except: d = dumps({"code": -2, "msg": "current time(t) must be a integer."}, ensure_ascii=False, separators=jsonsep) return d if callback is None else f'{callback}({d})' nt = round(time()) if nt > (t + 300) or t < (t - 300): d = dumps({"code": -3, "msg": "Emm. Seems the current time is not right."}, ensure_ascii=False, separators=jsonsep) return d if callback is None else f'{callback}({d})' idd = web.input().get("id") if idd is None: d = dumps({"code": -4, "msg": "id is needed."}, ensure_ascii=False, separators=jsonsep) return d if callback is None else f'{callback}({d})' act = web.input().get("a") if act is None or act == '': act = web.input().get("action") if act is None or act == '': d = dumps({"code": -5, "msg": "action type (a/action) is needed."}, ensure_ascii=False, separators=jsonsep) return d if callback is None else f'{callback}({d})' if act != 'delete': d = dumps({"code": -6, "msg": "action type (a/action) must be 'delete'."}, ensure_ascii=False, separators=jsonsep) return d if callback is None else f'{callback}({d})' db = ProxyDb() r = db.delete_proxy(idd) d = dumps({"code": 0, "result": r}, ensure_ascii=False, separators=jsonsep) return d if callback is None else f'{callback}({d})' except: t = '' try: s = settings() s.ReadSettings() if s.debug: t = format_exc() except: pass return dumps({"code": -500, "msg": t}, ensure_ascii=False, separators=jsonsep)
def GET(self): web.header('Content-Type', 'application/json') text = requests.get( "http://www.archive.org/download/stats/numUniqueIPsOL.json").text return delegate.RawText(text)
def GET(self): ram = axi2s_u.axi2s_u(_g.AXI2S_IBASE, _g.AXI2S_ISIZE) r = ram.rfdata() ram.deinit() web.header('Content-Type', 'text/json') return json.dumps(r)
def GET(self, *args, **kwargs): # prepare parameters params = self._get_query_string_params() params.update(kwargs) # override implementation class if needed if params.get('_force_api_impl'): impl_code = params['_force_api_impl'] file_name, class_name = self.file_and_class.rsplit('.', 1) implementations = dict( self.config.implementations.implementation_list) try: base_module_path = implementations[impl_code] except KeyError: raise BadRequest('Implementation code "%s" does not exist' % impl_code) try: module = __import__('%s.%s' % (base_module_path, file_name), globals(), locals(), [class_name]) except ImportError: raise BadRequest( "Unable to import %s.%s.%s (implementation code is %s)" % (base_module_path, file_name, class_name, impl_code)) instance = getattr(module, class_name)(config=self.config, all_services=self.all_services) else: instance = self.cls(config=self.config, all_services=self.all_services) # find the method to call default_method = kwargs.pop('default_method', 'get') if default_method not in ('get', 'post', 'put', 'delete'): raise ValueError('%s not a recognized method' % default_method) method_name = default_method if len(args) > 1: method_name = args[0] try: method = getattr(instance, method_name) except AttributeError: try: if (method_name == 'post' and getattr(instance, 'create', None)): # use the `create` alias method = instance.create elif (method_name == 'put' and getattr(instance, 'update', None)): # use the `update` alias method = instance.update elif (default_method == 'post' and getattr(instance, 'create_%s' % method_name, None)): # use `create_<method>` method = getattr(instance, 'create_%s' % method_name) elif (default_method == 'put' and getattr(instance, 'update_%s' % method_name, None)): # use `update_<method>` method = getattr(instance, 'update_%s' % method_name) else: if method_name.startswith(default_method): raise AttributeError method = getattr(instance, '%s_%s' % (default_method, method_name)) except AttributeError: self.config.logger.warning( 'The method %r does not exist on %r' % (method_name, instance)) raise web.webapi.NoMethod(instance) try: result = method(**params) if isinstance(result, tuple): web.header('Content-Type', result[1]) return result[0] web.header('Content-Type', 'application/json') dumped = ujson.dumps(result) web.header('Content-Length', len(dumped)) return dumped except (MissingArgumentError, BadArgumentError), msg: raise BadRequest({'error': {'message': str(msg)}})
def GET(self): pyDict = {'one':1,'two':2} web.header('Content-Type', 'application/json') return json.dumps(pyDict)
def emit_json(self, data): web.header('Content-Type', 'application/json') return infogami_delegate.RawText(simplejson.dumps(data))
def GET(self): web.header('Content-Type', 'text/html; charset=UTF-8') return templates.index(os.listdir(LOGDIR), Cmd.CMDS)
def POST(self): i = web.input() print i web.header('Content-Type', 'text/json') return json.dumps({'ret': 'ok'})