def test_url_unescape_unicode(self): tests = [ ('%C3%A9', u('\u00e9'), 'utf8'), ('%C3%A9', u('\u00c3\u00a9'), 'latin1'), ('%C3%A9', utf8(u('\u00e9')), None), ] for escaped, unescaped, encoding in tests: # input strings to url_unescape should only contain ascii # characters, but make sure the function accepts both byte # and unicode strings. self.assertEqual(url_unescape(to_unicode(escaped), encoding), unescaped) self.assertEqual(url_unescape(utf8(escaped), encoding), unescaped)
def test_url_escape_quote_plus(self): unescaped = '+ #%' plus_escaped = '%2B+%23%25' escaped = '%2B%20%23%25' self.assertEqual(url_escape(unescaped), plus_escaped) self.assertEqual(url_escape(unescaped, plus=False), escaped) self.assertEqual(url_unescape(plus_escaped), unescaped) self.assertEqual(url_unescape(escaped, plus=False), unescaped) self.assertEqual(url_unescape(plus_escaped, encoding=None), utf8(unescaped)) self.assertEqual(url_unescape(escaped, encoding=None, plus=False), utf8(unescaped))
def get_link(self, link): try: data = yield http_client(link, c_try=5, c_delay=self.delay) try: data = escape.url_unescape(data.split('proxy.link=',1)[1].split('"',1)[0].split('&',1)[0].strip()) except: data = data.split('<param name="movie" value="',1)[1].split('"',1)[0] data = escape.url_unescape(data) if 'youtube' in data: data = data.split('?',1)[0].strip() print(link, data) return data except Exception as e: traceback.print_exc(file=sys.stdout) return None
def query(self,option,q): url_unescape(q,'utf-8') if(option == 'option1'): info = self.db.query('SELECT U.UID,U.USERNAME,U.CHINAME,D.DETECTTIME,D.STATUS FROM USER U LEFT OUTER JOIN \ DETECT D ON U.UID = D.OWNER WHERE U.CHINAME=\'%s\'' % (q)) elif(option == 'option2'): info = self.db.query('SELECT U.UID,U.USERNAME,U.CHINAME,D.DETECTTIME,D.STATUS FROM USER U LEFT OUTER JOIN \ DETECT D ON U.UID = D.OWNER WHERE U.UID=%s' % (q)) elif(option == 'option3'): info = self.db.query('SELECT U.UID,U.USERNAME,U.CHINAME,D.DETECTTIME,D.STATUS FROM USER U LEFT OUTER JOIN \ DETECT D ON U.UID = D.OWNER WHERE U.USERNAME=\'%s\'' % (q)) for item in info: item['DETECTTIME'] = datetime_handler(item['DETECTTIME']) return info
def post(self): next_page = escape.url_unescape(self.get_argument("next")) id = int(self.get_argument("id")) type = self.get_argument("type") if type == "entry": meidodb.delete_entry(id) meidodb.delete_comment_by_entry(id) elif type == "comment": meidodb.delete_comment_by_id(id) # if next page is the blog just removed, jump to / if type == "entry" and next_page.find("/blog/") != -1: self.redirect("/") else: self.redirect(escape.url_unescape(next_page))
def parse_request(self, request): self._log.debug("Path: %s", request.path) self._log.debug("Query: %s", request.query) # If a request object exists then it contains the method type self._command_type = request.method # Parse the command name self.parse_path(request.path) # If a request object exists then it should contain useful trace information # Check for the remote IP if request.remote_ip: self._trace[CommandTrace.origin_address] = request.remote_ip # Check for the username of the client if 'User' in request.headers: self._trace[CommandTrace.user] = request.headers['User'] # Check for the creation time of the request if 'Creation-Time' in request.headers: self._trace[CommandTrace.creation_time] = request.headers['Creation-Time'] # Check for the user agent (client application) if 'User-Agent' in request.headers: self._trace[CommandTrace.origin_type] = request.headers['User-Agent'] self._log.debug("Parsed request [%s], trace: %s", self._command_type, self._trace) # Parse any parameters self.parse_parameters(request.query) # Check request body to see if we can parse it if request.body: try: self._parameters.update(json.loads(request.body)) except: #self.parse_parameters(str(request.body.encode('ascii'))) self.parse_parameters(str(escape.url_unescape(request.body))) #.decode("utf-8")))
def environ(request): """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment. """ hostport = request.host.split(":") if len(hostport) == 2: host = hostport[0] port = int(hostport[1]) else: host = request.host port = 443 if request.protocol == "https" else 80 environ = { "REQUEST_METHOD": request.method, "SCRIPT_NAME": "", "PATH_INFO": to_wsgi_str(escape.url_unescape( request.path, encoding=None, plus=False)), "QUERY_STRING": request.query, "REMOTE_ADDR": request.remote_ip, "SERVER_NAME": host, "SERVER_PORT": str(port), "SERVER_PROTOCOL": request.version, "wsgi.version": (1, 0), "wsgi.url_scheme": request.protocol, "wsgi.input": BytesIO(escape.utf8(request.body)), "wsgi.errors": sys.stderr, "wsgi.multithread": False, "wsgi.multiprocess": True, "wsgi.run_once": False, } if "Content-Type" in request.headers: environ["CONTENT_TYPE"] = request.headers.pop("Content-Type") if "Content-Length" in request.headers: environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length") for key, value in request.headers.items(): environ["HTTP_" + key.replace("-", "_").upper()] = value return environ
def get(self, leaderboard_type, leaderboard_object="", slash_separated_players=None, conjunction=None): if slash_separated_players: players = filter(None, slash_separated_players.split("/")) else: players = [] if leaderboard_object in ["", "players/"]: unique_players = leaderboard_object == "players/" all_high_scores = get_all_high_scores(10, leaderboard_type, players, conjunction, unique_players=unique_players) elif leaderboard_object in ["games/"]: all_high_scores = get_all_high_games(10, leaderboard_type, players, conjunction) try: time_offset = int(url_unescape(self.get_cookie("time_offset"))) except: time_offset = 0 self.render( "leaderboard.html", players=players, all_high_scores=all_high_scores, leaderboard_types=[('alltime', 'All Time'), ('thisweek', 'This Week'), ('today', 'Today')], selected_leaderboard_type=leaderboard_type, leaderboard_object=leaderboard_object, time_offset=time_offset, conjunction=conjunction, game_type_info=GAME_TYPE_INFO)
def get_blink_cookie(self, name): """Gets a blink cookie value""" value = self.get_cookie(name) if value != None: self.clear_cookie(name) return escape.url_unescape(value)
def post(self, pathname): rhs = self.get_argument('rhs', default=None) vtype = self.get_argument('type', default=None) if (rhs and vtype): obj, dot, var = escape.url_unescape(pathname).partition('.') if vtype == 'str': command = '%s.set(%r, %r)' % (obj, var, rhs) else: command = '%s.set(%r, %s)' % (obj, var, rhs) result = '' try: cserver = self.get_server() result = cserver.onecmd(command) except Exception as exc: print >>sys.stderr, "VariableHandler: Error issuing command %s: %s" \ % (command, str(exc) or repr(exc)) result += str(sys.exc_info()) if result: result += '\n' self.content_type = 'text/html' self.write(result) else: self.send_error(400) # bad request
def get(self, task, status_list): status_list = escape.url_unescape(status_list.lower()).split(',') self.write(pd.io.json.dumps( {status: list(map(tansform_bson_id, self.application.mongo.tasks.find({'status': status, 'task': task}))) for status in status_list}))
def post(self,path=None): try: result = {} root = os.path.join(self.root,path) if path else self.root self._check_dir_(root) for key in self.request.files: for fileinfo in self.request.files[key]: fname = url_unescape(fileinfo['filename']) fNo = 0 fActual,fExt = os.path.splitext(fname) files = os.listdir(root) while fname in files: fNo = fNo + 1 fname = "%s_%s%s" % (fActual,fNo,fExt) file_path = os.path.join(root,fname) logging.info("uploading %s", file_path) with open(file_path,'wb') as fh: fh.write(fileinfo['body']) info = {} result[key]={ "name":fname, "info":info } self.write({"result":result}) except Exception as ex: logging.exception(path) self.write({"error":str(ex)})
def initialize(self): super(RequestHandler, self).initialize() self._force_rollback = False self._session = None self._force_redirect = None flash_cookie = self.get_cookie('flash') if flash_cookie: self.flash = Flash.load(url_unescape(flash_cookie)) else: self.flash = Flash() host = self.request.host.lower() self.production = (host == 'graffspotting.com' or host.endswith('.graffspotting.com')) mobile = None mobile_cookie = self.get_cookie('m', None) if mobile_cookie is None: if detect_mobile(self.request.headers.get('User-Agent')): self.set_cookie('m', '1') mobile = True if host == 'graffspotting.com': self._force_redirect = 'm.graffspotting.com' return else: mobile = False self.set_cookie('m', '0') elif host == 'm.graffspotting.com' and mobile_cookie == '0': self._force_redirect = 'graffspotting.com' return elif host == 'graffspotting.com' and mobile_cookie == '1': self._force_redirect = 'm.graffspotting.com' return if mobile is None: mobile = mobile_cookie == '1' user_id = self.get_secure_cookie('s') if user_id: user_id, = struct.unpack('<I', user_id) self.user = db.User.by_id(self.session, user_id) else: self.user = None # ensure the user has a unique visitor cookie if self.get_secure_cookie('v', None) is None: self.set_secure_cookie('v', os.urandom(10)) self.env = { 'config': config, 'debug': self.settings['debug'], 'esc': url_escape, 'flash': self.flash, 'gmaps_api_key': config.get('gmaps_api_key', 'AIzaSyCTd_7j6ZeXATLOfTvpAqaqCkxM0zFP5Oc'), 'is_error': False, 'mobile': mobile, 'today': datetime.date.today(), 'user': self.user }
def get(self): wechat_uuid = None cli = AsyncHTTPClient() uri = url_unescape(self.get_argument('redirect_uri', None)) with db_session: user = User.get(uuid=self.current_user) wechat_uuid = user.wechat_uuid yield cli.fetch(uri, self.callback)
def parse(data): """Parse url-encoded data from jQuery.param()""" ret = {} for part in data.split('&'): if part: key, value = url_unescape(part).split('=',1) value = get_type(value) parse_one(key, value, ret) return ret
def post(self): username = self.get_argument("username") next_url = escape.url_unescape(self.get_argument("next")) sha256_password = hashlib.sha256(self.get_argument("password")).hexdigest() if username == meidodb.get_siteinfo("username") and sha256_password == meidodb.get_siteinfo("password"): self.set_secure_cookie("user", "ling0322") self.redirect(next_url) else: self.redirect("/message?m=用户名密码错了呢> <")
def get_link(self, link): try: data = yield http_client(link, c_try=5, c_delay=self.delay) data = escape.url_unescape(data.split('&proxy.link=',1)[1].split('&',1)[0].strip()) print(link, data) return data except Exception as e: traceback.print_exc(file=sys.stdout) return None
def get(self, *args): realm = url_unescape(self.request.query) if self.request.header["PHP_AUTH_USER"]: self.set_status(401) self.set_header("WWW-Authenticate", "basic realm=\"%s\"" % realm) self.write( "If you are seeing this text, authentication did not work.") else: self.write("If you are seeing this text, authentication worked.")
def post(self): global data, columns_name # data = read_csv('data.csv') val = dict(map(lambda x : x.split('='), self.get_argument('val').split('&'))) logging.info(val) # print(data) if val['action'] == 'change_col': j = int(val['j']) columns_name[j] = url_unescape(val['ch']) # old_name = data.columns[j] # new_name = url_unescape(val['ch']) # data.rename(columns={old_name:new_name}, inplace=True) if val['action'] == 'change': i = int(val['i']) j = int(val['j']) data.iloc[i,j] = url_unescape(val['ch']) if val['action'] == 'new_column': columns_name.append('New column') data[str(len(data.columns))] = '' if val['action'] == 'del_column': j = int(val['j']) del columns_name[j] data = data.drop(data.columns[j], axis=1) if val['action'] == 'new_row': data.loc[len(data)] = ['']*len(data.columns) if val['action'] == 'del_row': i = int(val['i']) data.drop(i, inplace=True,) data.reset_index(drop=True) if val['action'] == 'save_csv': data2 = data.copy() data2.columns = columns_name data2.to_csv('data.csv',index=False) if val['action'] == 'reload_csv': read_data() self.finish("{}")
def get(self, secure, netloc, url): proto = 'http' + secure netloc = url_unescape(netloc) if '/?' in url: url, query = url.rsplit('/?', 1) else: query = None remote_url = u"{}://{}/{}".format(proto, netloc, quote(url)) if query: remote_url = remote_url + '?' + query if not url.endswith('.ipynb'): # this is how we handle relative links (files/ URLs) in notebooks # if it's not a .ipynb URL and it is a link from a notebook, # redirect to the original URL rather than trying to render it as a notebook refer_url = self.request.headers.get('Referer', '').split('://')[-1] if refer_url.startswith(self.request.host + '/url'): self.redirect(remote_url) return parse_result = urlparse(remote_url) robots_url = parse_result.scheme + "://" + parse_result.netloc + "/robots.txt" public = False # Assume non-public try: robots_response = yield self.fetch(robots_url) robotstxt = response_text(robots_response) rfp = robotparser.RobotFileParser() rfp.set_url(robots_url) rfp.parse(robotstxt.splitlines()) public = rfp.can_fetch('*', remote_url) except httpclient.HTTPError as e: app_log.debug("Robots.txt not available for {}".format(remote_url), exc_info=True) public = True except Exception as e: app_log.error(e) response = yield self.fetch(remote_url) try: nbjson = response_text(response, encoding='utf-8') except UnicodeDecodeError: app_log.error("Notebook is not utf8: %s", remote_url, exc_info=True) raise web.HTTPError(400) yield self.finish_notebook(nbjson, download_url=remote_url, msg="file from url: %s" % remote_url, public=public, request=self.request, format=self.format)
def get_previous_queries(self): previous_queries = self.get_cookie("queries", None) if previous_queries: try: previous_queries = json.loads(url_unescape(previous_queries)) except Exception: previous_queries = [] else: previous_queries = [] return previous_queries
def get(self, player): try: time_offset = int(url_unescape(self.get_cookie("time_offset"))) except: time_offset = 0 self.render( "graph.html", player=player, graph_data=get_graph_data(player), time_offset=time_offset)
def get(self): self._context.title = "Submit project" website = self.request.arguments.get("url", [""])[0] if website: http = AsyncHTTPClient() http.fetch(escape.url_unescape(website), self._on_fetch) else: self._context.metainfos = {} self.render(self._submit_template)
def _unquote_or_none(s: Optional[str]) -> Optional[bytes]: # noqa: F811 """None-safe wrapper around url_unescape to handle unmatched optional groups correctly. Note that args are passed as bytes so the handler can decide what encoding to use. """ if s is None: return s return url_unescape(s, encoding=None, plus=False)
def post(self): fm = ProfileForm(self) self._context.openid_name = const.OpenID.NAME[self.current_user.openid_api] next = escape.url_unescape(fm._parmas.get("next", "")) if not next: next = "/" self._context.next = next if fm.validate(): self.redirect(self._context.next) else: fm.render("user/profile.html", const=const)
def put(self): pic_content = requests.get(url_unescape(self.arg.url)).content h = b'-----------------------------311092004222736\r\nContent-Disposition: form-data; name="file"; filename="logo1.png"\r\nContent-Type: image/png\r\n\r\n' f = b'\r\n-----------------------------297391254920134--\r\n' try: response = requests.post(CONFIG.FD_CHAT_SERVER + '/file/uploader', data=h + pic_content + f) j = json.loads(response.text) img = j['md5'] + '.png' r = DBPersonal().modifyPersonalInfo(self.arg.user_id, 2, None, img, None, None) self.write({'is_success': r, 'url': img}) except Exception as e: self.write({'is_success': False, 'err': str(e)})
def get(self, score_id): score = get_score(score_id) if score is None: raise tornado.web.HTTPError(404) try: time_offset = int(url_unescape(self.get_cookie("time_offset"))) except: time_offset = 0 try: deck = list(reversed(map(tuple, score.game.deck))) except: deck = [] need_deck = not deck taus = [] num_taus = [] tau_times = [] targets = [] wrong_properties = [] players = [] total_elapsed_time = 0 for state in score.game.states: players.append(state.player.name) tau_times.append(state.elapsed_time - total_elapsed_time) total_elapsed_time = state.elapsed_time space = fingeo.get_space(score.game_type) target = space.sum_cards(state.cards) targets.append(target) wrong_property = get_wrong_property(space, state.cards) wrong_properties.append(wrong_property) taus.append(state.cards) game = Game(score.game_type, deck=reversed(filter(None, state.board)), targets=[target], wrong_properties=[wrong_property]) num_taus.append(game.count_taus()) if need_deck: for card in state.board: if card and not tuple(card) in deck: deck.append(tuple(card)) (percentile, rank) = get_rank(score.elapsed_time, "alltime", score.num_players, score.game_type, "all", "exact", None) self.render( "recap.html", players=map(lambda x: x.name, score.players), num_taus=zip(tau_times, map(str, num_taus), players), avg_taus=sum(num_taus)/float(len(num_taus)), score=score, time_offset=time_offset, game_type_info=dict(GAME_TYPE_INFO), percentile=percentile, time=score.elapsed_time, rank=rank)
def open(self, game_id): self.opened = False game_id = int(game_id) self.name = url_unescape(self.get_secure_cookie("name")) try: self.game = lobby.game_id_to_game[game_id] except KeyError: self.close() return self.game.open_game_socket(self) self.opened = True
def post(self) : session = url_unescape(self.get_argument("session", None)) sessions_lock.acquire() if (not session) or (session not in sessions): print "ignoring ping from non-session" self.write("Error") sessions_lock.release() return else : t = sessions[session] sessions_timer[session] = time.time() sessions_lock.release() print "handled ping"
def get(self, error=None, message=None): if self.get_argument("action", None) == 'logout': self.clear_cookie(self.cookie_name) self.redirect('/') return email = self.get_argument("email", None) if email: email = url_unescape(email) self.render(resource_filename("kew","pe/templates/login.html"), email=email, error=error, message=message)
def get(self, task): task = escape.url_unescape(task) if task not in self.server.tasks: self.send_error(404) return with log_errors(): self.render( "task.html", title="Task: " + task, Task=task, scheduler=self.server, **merge(self.server.__dict__, ns, self.extra, rel_path_statics), )
def post(self): params = json_decode(url_unescape(self.request.body)) source = urlparse(self.request.headers['Referer']).path set = params['set'] resize = params['resize'] uid = uuid.uuid4().hex for raw in params['links']: DownList.create(source=source, raw=raw, set=set, uid=uid, resize=resize) w = json_encode('{}'.format(len(params['links']))) self.write(w)
async def get(self, key): with log_errors(): key = escape.url_unescape(key) call_stack = await self.server.get_call_stack(keys=[key]) if not call_stack: self.write("<p>Task not actively running. " "It may be finished or not yet started</p>") else: self.render( "call-stack.html", title="Call Stack: " + key, call_stack=call_stack, **merge(self.extra, rel_path_statics), )
def environ(request): """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment. """ # 构建一个 environ 给 wsgi 程序 hostport = request.host.split(":") if len(hostport) == 2: host = hostport[0] port = int(hostport[1]) else: host = request.host port = 443 if request.protocol == "https" else 80 environ = { "REQUEST_METHOD": request.method, "SCRIPT_NAME": "", "PATH_INFO": to_wsgi_str( escape.url_unescape(request.path, encoding=None, plus=False)), "QUERY_STRING": request.query, "REMOTE_ADDR": request.remote_ip, "SERVER_NAME": host, "SERVER_PORT": str(port), "SERVER_PROTOCOL": request.version, "wsgi.version": (1, 0), "wsgi.url_scheme": request.protocol, "wsgi.input": BytesIO(escape.utf8(request.body)), "wsgi.errors": sys.stderr, "wsgi.multithread": False, "wsgi.multiprocess": True, "wsgi.run_once": False, } if "Content-Type" in request.headers: environ["CONTENT_TYPE"] = request.headers.pop("Content-Type") if "Content-Length" in request.headers: environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length") for key, value in request.headers.items(): environ["HTTP_" + key.replace("-", "_").upper()] = value return environ
def get(self, worker): worker = escape.url_unescape(worker) if worker not in self.server.workers: self.send_error(404) return with log_errors(): self.render( "worker.html", title="Worker: " + worker, scheduler=self.server, Worker=worker, **merge(self.server.__dict__, ns, self.extra, rel_path_statics), )
def redirect(self, url, *args, **kwargs): purl = urlparse(url) eurl = urlunparse(( purl.scheme, purl.netloc, "/".join([ url_escape(url_unescape(p), plus=False) for p in purl.path.split("/") ]), purl.params, purl.query, purl.fragment, )) return super().redirect(eurl, *args, **kwargs)
def post(self): session = url_unescape(self.get_argument("session", None)) sessions_lock.acquire() if (not session) or (session not in sessions): print "ignoring input from non-session" self.write("Error") self.finish() sessions_lock.release() return else: print "getting input" t = sessions[session] sessions_timer[session] = time.time() sessions_lock.release() command = self.get_argument("command", default="") t.game_context.io.receive_input(str(command)) self.write("received")
def post(self): tag = self.get_argument("tag") if not tag: tag = None projects = [url_unescape(p).encode('utf-8') for p in self.get_arguments("projects")] num_swabs = map(int, self.get_arguments("swabs")) num_kits = map(int, self.get_arguments("kits")) kits = [] fields = "" try: kits = db.create_ag_kits(zip(num_swabs, num_kits), tag, projects) fields = ','.join(kits[0]._fields) except Exception as e: raise HTTPError(500, "ERROR: %s" % e.message.encode('utf-8')) self.write({'kitinfo': kits, 'fields': fields})
async def get_notebook_data(self, secure, netloc, url): proto = 'http' + secure netloc = url_unescape(netloc) if '/?' in url: url, query = url.rsplit('/?', 1) else: query = None remote_url = u"{}://{}/{}".format(proto, netloc, quote(url)) if query: remote_url = remote_url + '?' + query if not url.endswith('.ipynb'): # this is how we handle relative links (files/ URLs) in notebooks # if it's not a .ipynb URL and it is a link from a notebook, # redirect to the original URL rather than trying to render it as a notebook refer_url = self.request.headers.get('Referer', '').split('://')[-1] if refer_url.startswith(self.request.host + '/url'): self.redirect(remote_url) return parse_result = urlparse(remote_url) robots_url = parse_result.scheme + "://" + parse_result.netloc + "/robots.txt" public = False # Assume non-public try: robots_response = await self.fetch(robots_url) robotstxt = response_text(robots_response) rfp = robotparser.RobotFileParser() rfp.set_url(robots_url) rfp.parse(robotstxt.splitlines()) public = rfp.can_fetch('*', remote_url) except httpclient.HTTPError as e: self.log.debug( "Robots.txt not available for {}".format(remote_url), exc_info=True) public = True except Exception as e: self.log.error(e) return remote_url, public
def post(self): # create barcodes msg = "" newbc = [] assignedbc = [] projects = [] action = self.get_argument("action") num_barcodes = int(self.get_argument('numbarcodes')) if action == "create": newbc = db.create_barcodes(num_barcodes) msg = ("%d Barcodes created! Please wait for barcode download" % num_barcodes) elif action == "assign": projects = [ url_unescape(p).encode('utf-8') for p in self.get_arguments('projects') ] new_project = self.get_argument('newproject').strip() try: if new_project: db.create_project(new_project) projects.append(new_project) assignedbc = db.assign_barcodes(num_barcodes, projects) except ValueError as e: msg = u"ERROR! %s" % e.message else: projects = [p.decode('utf-8') for p in projects] tmp = u"%d barcodes assigned to %s, please wait for download." msg = tmp % (num_barcodes, ", ".join(projects)) else: raise HTTPError(400, 'Unknown action: %s' % action) project_names = db.getProjectNames() remaining = len(db.get_unassigned_barcodes()) self.render("ag_new_barcode.html", currentuser=self.current_user, projects=project_names, remaining=remaining, msg=msg, newbc=newbc, assignedbc=assignedbc, assign_projects=", ".join(projects))
def open(self, token): global game_tokens token = url_unescape(token) if token not in game_tokens: if "default" in game_tokens: token = "default" else: self.write_message( json.dumps({ 'type': 'error', 'error': { 'type': 'Invalid Token', 'msg': 'Invalid Token' } })) print(game_tokens) self.close() return game = game_tokens[token] self.game = game self.player = game.get_player_from_token(token) self.player.connection = self self.write_message( json.dumps({ 'type': 'assign_player', 'player': self.player.as_dict(), })) self.write_message( json.dumps({ 'type': 'game', 'game': game.as_dict(), })) game.recv_move(self.player, {'type': 'reconnect'}) self.write_message( json.dumps({ 'type': 'can_trade', 'can_trade': game.can_trade, })) log.debug(self.player.name + " joined " + game.name)
def get(self, slug=None, method=None): """Handle get request.""" if slug is None: self.list() return slug = url_unescape(slug) if slug == 'create': self.create() else: if method is None: self.show(slug) elif method == 'edit': self.edit(slug) elif method == 'create': self.create(slug) else: self.render("404.html")
def post(self): body = url_unescape(self.request.body) key_values = dict( key_value.split("=") for key_value in [data_set for data_set in body.split("&")]) for whole_key, value in key_values.items(): split = whole_key.split("-") if len(split) == 2: continue day, period, thing = split DATA[days.index(day)]["periods"][periods.index( period)][thing] = value with open(DATA_FILE_NAME, "w") as fw: json.dump(DATA, fw, indent=4) self._render_time_template()
def post(self): skid = self.current_user tl = text_locale['handlers'] ag_login_id = ag_data.get_user_for_kit(skid) survey_id = self.get_argument('survey_id', None) survey_type = self.get_argument('type') participant_name = url_unescape(self.get_argument('participant_name')) sitebase = media_locale['SITEBASE'] if not survey_id: survey_id = binascii.hexlify(os.urandom(8)) sec_survey = self.sec_surveys[survey_type] survey_class = make_survey_class(sec_survey.groups[0], survey_type='SecondarySurvey') form = survey_class() form.process(data=self.request.arguments) data = {'questions': form.data} consent = { 'login_id': ag_login_id, 'participant_name': participant_name, 'survey_id': survey_id, 'secondary': True } redis.hset(survey_id, 'consent', dumps(consent)) redis.hset(survey_id, 0, dumps(data)) redis.expire(survey_id, 86400) store_survey(sec_survey, survey_id) if survey_id: message = urlencode([ ('errmsg', tl['SUCCESSFULLY_EDITED'] % participant_name) ]) else: message = urlencode([ ('errmsg', tl['SUCCESSFULLY_ADDED'] % participant_name) ]) url = '%s/authed/portal/?%s' % (sitebase, message) self.redirect(url)
def on_headers_recd(self, line: str) -> None: if not self.need_content_length and not self.need_content_disposition: return line = line.strip() rc_match = re.match(r"HTTP/\d.?\d? (\d+)", line) if rc_match is not None: self.request_ok = rc_match.group(1) == "200" return if not self.request_ok: return parts = line.split(":", 1) if len(parts) < 2: return hname = parts[0].strip().lower() hval = parts[1].strip() if hname == "content-length" and self.need_content_length: self.download_size = int(hval) self.need_content_length = False logging.debug(f"Content-Length header received: " f"size = {self.download_size}") elif (hname == "content-disposition" and self.need_content_disposition): fnr = r"filename[^;\n=]*=(['\"])?(utf-8\'\')?([^\n;]*)(?(1)\1|)" matches: List[Tuple[str, str, str]] = re.findall(fnr, hval) is_utf8 = False for (_, encoding, fname) in matches: if encoding.startswith("utf-8"): # Prefer the utf8 filename if included self.filename = url_unescape(fname, encoding="utf-8", plus=False) is_utf8 = True break self.filename = fname self.need_content_disposition = False # Use the filename extracted from the content-disposition header self.dest_file = self.dest_file.parent.joinpath(self.filename) logging.debug("Content-Disposition header received: filename = " f"{self.filename}, utf8: {is_utf8}")
def get(self): url = url_unescape(self.request.path) template_data = TEMPLATE.replace("FOO", url) t = tornado.template.Template(template_data) self.write(t.generate())
def get(self, topic): cserver = self.get_server() cserver.add_subscriber(escape.url_unescape(topic), True)
run_complete_regression_test() sys.exit() try: node_list = json.loads(get_nodes()) except: print("Error accessing api.openwifimap.net") node_list = None for nodename in os.listdir('/var/opt/ffmapdata/'): if nodename.endswith(".json"): try: nodefile = '/var/opt/ffmapdata/' + nodename with open(nodefile, 'r') as myfile: data = myfile.read() nodename = nodename.replace(".json", "") nodename = url_unescape(nodename) process_node_json(nodename, data) except Exception as e: print("Error processing node %s (%s), skipping" % (nodename, str(e))) timestamp = datetime.datetime.utcnow().isoformat() if node_list is not None: http_client = httpclient.AsyncHTTPClient() for row in node_list["rows"]: url = "https://api.openwifimap.net/db/" + row["id"].strip() nodejson = cache.get(url, None) if nodejson is None: i += 1 http_client.fetch(
async def get(self): url = url_unescape(self.get_query_argument("clone_from")) if not url.endswith(".ipynb"): raise web.HTTPError(415) # Try to find kernelspec at designated source location # This is the root of the git repository if notebook is on GitHub try: kernelspec_source = self.get_query_argument("kernelspec_source") kernelspec = await self.fetch_utf8_file( os.path.join(kernelspec_source, "kernel.json") ) except Exception as e: global_kernelspec_error = e else: global_kernelspec_error = None # Try to find kernelspec in same directory as notebook # If it exists, overwrite any existing kernelspec dirname = os.path.dirname(url) try: kernelspec = await self.fetch_utf8_file( os.path.join(dirname, "kernel.json") ) except Exception as e: local_kernelspec_error = e else: local_kernelspec_error = None # If kernelspec can't be found at either location, report warning if ( global_kernelspec_error is not None and local_kernelspec_error is not None ): self.log.warning("Failed to load kernel.json") self.log.warning(global_kernelspec_error) self.log.warning(local_kernelspec_error) kernelspec = None try: kernel_name = self.get_query_argument("kernel_name") except web.MissingArgumentError: kernel_name = os.path.basename(dirname) else: # If kernel_name is specified and kernelspec found locally # Avoid overwriting any global kernelspecs with same name if local_kernelspec_error is None: # Deal with edge case where global and local kernelspec are the same if kernelspec_source != dirname: kernel_name += "-{}".format( dirname.replace("/", "_").replace(".", "_") ) # Try to install the kernelspec, but even if this fails clone notebook anyway try: self.clone_kernelspec(kernelspec, kernel_name) except Exception as e: self.log.warning("Failed to install kernelspec.") self.log.warning(e) clone_to = self.get_query_argument("clone_to", default="/") self.log.info("Cloning notebook from URL: %s", url) nb = await self.fetch_utf8_file(url) self.clone_to_directory(nb, url, clone_to)
def delete(self, topic): cserver = self.get_server() cserver.add_subscriber(escape.url_unescape(topic), False)
def get_movie_link_cache(self, link, cache_time=3600): try: # youtube if 'youtube.com' in link[:28] or 'youtu.be' in link[:16]: if 'youtu.be/' in link[:17]: vd_id = link.split('youtu.be/',2) if len(vd_id) > 1: return "https://www.youtube.com/watch?v=%s" % vd_id[1] return link # picasaweb elif 'picasaweb' in link[:18]: # find cache cache_result = yield self.site.cache.get(source=link, lock=True, lock_count=30) if cache_result and 'data' in cache_result: return cache_result['data'] source = yield function.http_client(link, c_delay=0) source = escape.json_decode(source) # print('source', source) json = source['feed']['media']['content'] expire = 0 video = [] for v in json: if not expire and 'expire' in v['url']: expire = int(v['url'].split('expire=',1)[1].split('&',1)[0]) if 'type' in v and (v['type'].startswith('video/') or v['type'].startswith('application/')): video.append(v) # store cache cache_time = cache_time + int(time()) if expire > cache_time: self.set_movie_link_cache(link, video, expire - cache_time) return video elif 'docs.google.com' in link[:25]: source = yield function.http_client(link, c_delay=0) s_format = source.split('["fmt_list","',1)[1].split('"]',1)[0].split(',') v_format = {} for f in s_format: f = f.split('/', 2) size = f[1].split('x') v_format[f[0]] = size video = [] s_video = source.split('["url_encoded_fmt_stream_map","',1)[1].split('"]',1)[0] s_video = escape.json_decode('["%s"]'% s_video)[0].split(',itag=') for v in s_video: itag = v.split('&url=',1)[0] if '=' in itag: itag = itag.rsplit('=',1)[1] if itag in v_format: iurl = v.split('&url=',1)[1].split('&',1)[0] itype = escape.url_unescape(v.split('&type=',1)[1]).split('&',1)[0].split(';',1)[0] video.append({ "url": escape.url_unescape(iurl), "width": int(v_format[itag][0]), "height": int(v_format[itag][1]), "type": itype }) # print(video) # ko cache vi part cache da du 3600s return video # https://plus.google.com/_/photos/lightbox/photo/117469308172362315957/6068501600309862578?soc-app=2&cid=0&soc-platform=1&ozv=es_oz_20141009.12_p1&avw=phst%3A31&f.sid=-7178491292173930655&_reqid=235130&rt=j # https://plus.google.com/photos/103168733314236184036/albums/6063013498393185633/6063426189374545202?pid=6063426189374545202&oid=103168733314236184036 # https://plus.google.com/_/photos/lightbox/photo/117224281821248286118/6072825042706372306 elif 'plus.google.com' in link[:25]: # find cache cache_result = yield self.site.cache.get(source=link, lock=True, lock_count=30) if cache_result and 'data' in cache_result: return cache_result['data'] source = yield function.http_client(link, c_delay=0) # get photoid source = source.split(',"6063426189374545202",',1)[1].split('video.googleusercontent.com',1)[0].split('redirector.googlevideo.com') # print(source) expire = 0 video = [] for i, v in enumerate(source): if i > 0: size = source[i-1].rsplit('[',1)[1].split(',') url = "https://redirector.googlevideo.com" + v.split('"]',1)[0] url = escape.url_unescape(escape.json_decode('["%s"]'% url)[0]) if not expire and 'expire' in url: expire = int(url.split('expire=',1)[1].split('&',1)[0]) video.append({ "url": url, "width": int(size[1]), "height": int(size[2]), "type": "video/mpeg4" }) # store cache cache_time = cache_time + int(time()) if expire > cache_time: self.set_movie_link_cache(link, video, expire - cache_time) return video except: traceback.print_exc(file=sys.stdout)
def get(self, worker): worker = escape.url_unescape(worker) with log_errors(): self.render('worker.html', title='Worker: ' + worker, Worker=worker, **toolz.merge(self.server.__dict__, ns, self.extra))
def put(self, path, request): # pylint: disable=W0613 """ Implementation of the HTTP PUT verb for OdinDataAdapter :param path: URI path of the PUT request :param request: Tornado HTTP request object :return: ApiAdapterResponse object to be returned to the client """ status_code = 200 response = {} logging.debug("PUT path: %s", path) logging.debug("PUT request: %s", request) logging.debug("PUT request.body: %s", str(escape.url_unescape(request.body))) request_command = path.strip('/') # Request should either be a config file or else start with config/ if request_command == 'config/config_file': # Special case when we have been asked to load a config file to submit to clients # The config file should contain a JSON representation of a list of config dicts, # one for each client self._config_file = str(escape.url_unescape( request.body)).strip('"') logging.error("Loading configuration file {}".format( self._config_file)) try: with open(self._config_file) as config_file: config_obj = json.load(config_file) # Verify the number of items in the config file match the length of clients if len(self._clients) != len(config_obj): logging.error( "Mismatch between config items [{}] and number of clients [{}]" .format(len(config_obj), len(self._clients))) status_code = 503 response = { 'error': OdinDataAdapter.ERROR_PUT_MISMATCH } else: for client, config_item in zip(self._clients, config_obj): try: logging.error( "Sending configuration {} to client". format(str(config_item))) client.send_configuration(config_item) except Exception as err: logging.debug( OdinDataAdapter.ERROR_FAILED_TO_SEND) logging.error("Error: %s", err) status_code = 503 response = { 'error': OdinDataAdapter.ERROR_FAILED_TO_SEND } except IOError as io_error: logging.error( "Failed to open configuration file: {}".format(io_error)) status_code = 503 response = { 'error': "Failed to open configuration file: {}".format(io_error) } except ValueError as value_error: logging.error( "Failed to parse json config: {}".format(value_error)) status_code = 503 response = { 'error': "Failed to parse json config: {}".format(value_error) } elif request_command.startswith("config/"): request_command = remove_prefix( request_command, "config/") # Take the rest of the URI logging.debug("Configure URI: %s", request_command) client_index = -1 # Check to see if the URI finishes with an index # eg hdf5/frames/0 # If it does then we are only interested in setting that single client uri_items = request_command.split('/') # Check for an integer try: index = int(uri_items[-1]) if index >= 0: # This is a valid index so remove the value from the URI request_command = remove_suffix(request_command, "/" + uri_items[-1]) # Set the client index for submitting config to client_index = index logging.debug("URI without index: %s", request_command) except ValueError: # This is OK, there is simply no index provided pass try: parameters = json.loads(str(escape.url_unescape(request.body))) except ValueError: # If the body could not be parsed into an object it may be a simple string parameters = str(escape.url_unescape(request.body)) # Check if the parameters object is a list if isinstance(parameters, list): logging.debug("List of parameters provided: %s", parameters) # Check the length of the list matches the number of clients if len(parameters) != len(self._clients): status_code = 503 response['error'] = OdinDataAdapter.ERROR_PUT_MISMATCH elif client_index != -1: # A list of items has been supplied but also an index has been specified logging.error( "URI contains an index but parameters supplied as a list" ) status_code = 503 response['error'] = OdinDataAdapter.ERROR_PUT_MISMATCH else: # Loop over the clients and parameters, sending each one for client, param_set in zip(self._clients, parameters): if param_set: try: command, parameters = OdinDataAdapter.uri_params_to_dictionary( request_command, param_set) client.send_configuration(parameters, command) except Exception as err: logging.debug( OdinDataAdapter.ERROR_FAILED_TO_SEND) logging.error("Error: %s", err) status_code = 503 response = { 'error': OdinDataAdapter.ERROR_FAILED_TO_SEND } else: logging.debug("Single parameter set provided: %s", parameters) if client_index == -1: # We are sending the value to all clients command, parameters = OdinDataAdapter.uri_params_to_dictionary( request_command, parameters) for client in self._clients: try: client.send_configuration(parameters, command) except Exception as err: logging.debug(OdinDataAdapter.ERROR_FAILED_TO_SEND) logging.error("Error: %s", err) status_code = 503 response = { 'error': OdinDataAdapter.ERROR_FAILED_TO_SEND } else: # A client index has been specified try: command, parameters = OdinDataAdapter.uri_params_to_dictionary( request_command, parameters) self._clients[client_index].send_configuration( parameters, command) except Exception as err: logging.debug(OdinDataAdapter.ERROR_FAILED_TO_SEND) logging.error("Error: %s", err) status_code = 503 response = { 'error': OdinDataAdapter.ERROR_FAILED_TO_SEND } return ApiAdapterResponse(response, status_code=status_code)
def get(self, user, repo, ref, path): raw_url = u"https://raw.githubusercontent.com/{user}/{repo}/{ref}/{path}".format( user=user, repo=repo, ref=ref, path=quote(path)) blob_url = u"{github_url}{user}/{repo}/blob/{ref}/{path}".format( user=user, repo=repo, ref=ref, path=quote(path), github_url=_github_url()) with self.catch_client_error(): tree_entry = yield self.github_client.get_tree_entry( user, repo, path=url_unescape(path), ref=ref) if tree_entry['type'] == 'tree': tree_url = "/github/{user}/{repo}/tree/{ref}/{path}/".format( user=user, repo=repo, ref=ref, path=quote(path), ) app_log.info("%s is a directory, redirecting to %s", self.request.path, tree_url) self.redirect(tree_url) return # fetch file data from the blobs API with self.catch_client_error(): response = yield self.github_client.fetch(tree_entry['url']) data = json.loads(response_text(response)) contents = data['content'] if data['encoding'] == 'base64': # filedata will be bytes filedata = base64_decode(contents) else: # filedata will be unicode filedata = contents if path.endswith('.ipynb'): dir_path = path.rsplit('/', 1)[0] base_url = "/github/{user}/{repo}/tree/{ref}".format( user=user, repo=repo, ref=ref, ) breadcrumbs = [{ 'url': base_url, 'name': repo, }] breadcrumbs.extend(self.breadcrumbs(dir_path, base_url)) try: # filedata may be bytes, but we need text if isinstance(filedata, bytes): nbjson = filedata.decode('utf-8') else: nbjson = filedata except Exception as e: app_log.error("Failed to decode notebook: %s", raw_url, exc_info=True) raise web.HTTPError(400) yield self.finish_notebook(nbjson, raw_url, provider_url=blob_url, breadcrumbs=breadcrumbs, msg="file from GitHub: %s" % raw_url, public=True, format=self.format, request=self.request, **PROVIDER_CTX) else: mime, enc = mimetypes.guess_type(path) self.set_header("Content-Type", mime or 'text/plain') self.cache_and_finish(filedata)
def get(self, pathname): cserver = self.get_server() value = cserver.get_value(escape.url_unescape(pathname)) self.content_type = 'application/javascript' self.write(value)
def put(self, path, request): # pylint: disable=W0613 """ Implementation of the HTTP PUT verb for OdinDataAdapter :param path: URI path of the PUT request :param request: Tornado HTTP request object :return: ApiAdapterResponse object to be returned to the client """ status_code = 200 response = {} logging.debug("PUT path: %s", path) logging.debug("PUT request: %s", request) # First check if we are interested in the config items # # Store these parameters locally: # config/hdf/file/path # config/hdf/file/name # config/hdf/file/extension # # When this arrives write all params into a single IPC message # config/hdf/write try: self.clear_error() if path in self._param: logging.debug("Setting {} to {}".format( path, str(escape.url_unescape(request.body)).replace('"', ''))) if path == 'config/hdf/frames': self._param[path] = int( str(escape.url_unescape(request.body)).replace( '"', '')) else: self._param[path] = str(escape.url_unescape( request.body)).replace('"', '') # Merge with the configuration store elif path == self._command: write = bool_from_string(str(escape.url_unescape( request.body))) config = {'hdf': {'write': write}} logging.debug("Setting {} to {}".format(path, config)) if write: # Before attempting to write files, make some simple error checks # Check the file path is valid if not os.path.isdir( str(self._param['config/hdf/file/path'])): raise RuntimeError( "Invalid path specified [{}]".format( str(self._param['config/hdf/file/path']))) # Check the filename exists if str(self._param['config/hdf/file/name']) == '': raise RuntimeError("File name must not be empty") # First setup the rank for the frameProcessor applications self.setup_rank() rank = 0 for client in self._clients: # Send the configuration required to setup the acquisition # The file path is the same for all clients parameters = { 'hdf': { 'frames': self._param['config/hdf/frames'] } } # Send the number of frames first client.send_configuration(parameters) parameters = { 'hdf': { 'acquisition_id': self._param['config/hdf/acquisition_id'], 'file': { 'path': str(self._param['config/hdf/file/path']), 'name': str(self._param['config/hdf/file/name']), 'extension': str(self. _param['config/hdf/file/extension']) } } } client.send_configuration(parameters) rank += 1 for client in self._clients: # Send the configuration required to start the acquisition client.send_configuration(config) else: return super(FrameProcessorAdapter, self).put(path, request) except Exception as ex: logging.error("Error: %s", ex) self.set_error(str(ex)) status_code = 503 response = {'error': str(ex)} return ApiAdapterResponse(response, status_code=status_code)
def get(self): '''This handles GET requests. Returns the requested checkplot pickle's information as JSON. Requires a pre-shared secret `key` argument for the operation to complete successfully. This is obtained from a command-line argument. ''' provided_key = self.get_argument('key', default=None) if not provided_key: LOGGER.error('standalone URL hit but no secret key provided') retdict = { 'status': 'error', 'message': ('standalone URL hit but ' 'no secret key provided'), 'result': None, 'readonly': True } self.set_status(401) self.write(retdict) raise tornado.web.Finish() else: provided_key = xhtml_escape(provided_key) if not _time_independent_equals(provided_key, self.secret): LOGGER.error('secret key provided does not match known key') retdict = { 'status': 'error', 'message': ('standalone URL hit but ' 'no secret key provided'), 'result': None, 'readonly': True } self.set_status(401) self.write(retdict) raise tornado.web.Finish() # # actually start work here # LOGGER.info('key auth OK') checkplotfname = self.get_argument('cp', default=None) if checkplotfname: try: # do the usual safing cpfpath = xhtml_escape( base64.b64decode(url_unescape(checkplotfname))) except Exception: msg = 'could not decode the incoming payload' LOGGER.error(msg) resultdict = { 'status': 'error', 'message': msg, 'result': None, 'readonly': True } self.set_status(400) self.write(resultdict) raise tornado.web.Finish() LOGGER.info('loading %s...' % cpfpath) if not os.path.exists(cpfpath): msg = "couldn't find checkplot %s" % cpfpath LOGGER.error(msg) resultdict = { 'status': 'error', 'message': msg, 'result': None, 'readonly': True } self.set_status(404) self.write(resultdict) raise tornado.web.Finish() # # load the checkplot # # this is the async call to the executor cpdict = yield self.executor.submit(_read_checkplot_picklefile, cpfpath) ##################################### ## continue after we're good to go ## ##################################### LOGGER.info('loaded %s' % cpfpath) # break out the initial info objectid = cpdict['objectid'] objectinfo = cpdict['objectinfo'] varinfo = cpdict['varinfo'] if 'pfmethods' in cpdict: pfmethods = cpdict['pfmethods'] else: pfmethods = [] for pfm in PFMETHODS: if pfm in cpdict: pfmethods.append(pfm) # handle neighbors for this object neighbors = [] if ('neighbors' in cpdict and cpdict['neighbors'] is not None and len(cpdict['neighbors'])) > 0: nbrlist = cpdict['neighbors'] # get each neighbor, its info, and its phased LCs for nbr in nbrlist: if 'magdiffs' in nbr: nbrmagdiffs = nbr['magdiffs'] else: nbrmagdiffs = None if 'colordiffs' in nbr: nbrcolordiffs = nbr['colordiffs'] else: nbrcolordiffs = None thisnbrdict = { 'objectid': nbr['objectid'], 'objectinfo': { 'ra': nbr['ra'], 'decl': nbr['decl'], 'xpix': nbr['xpix'], 'ypix': nbr['ypix'], 'distarcsec': nbr['dist'], 'magdiffs': nbrmagdiffs, 'colordiffs': nbrcolordiffs } } try: nbr_magseries = nbr['magseries']['plot'] thisnbrdict['magseries'] = nbr_magseries except Exception: LOGGER.error("could not load magseries plot for " "neighbor %s for object %s" % (nbr['objectid'], cpdict['objectid'])) try: for pfm in pfmethods: if pfm in nbr: thisnbrdict[pfm] = { 'plot': nbr[pfm][0]['plot'], 'period': nbr[pfm][0]['period'], 'epoch': nbr[pfm][0]['epoch'] } except Exception: LOGGER.error("could not load phased LC plots for " "neighbor %s for object %s" % (nbr['objectid'], cpdict['objectid'])) neighbors.append(thisnbrdict) # load object comments if 'comments' in cpdict: objectcomments = cpdict['comments'] else: objectcomments = None # load the xmatch results, if any if 'xmatch' in cpdict: objectxmatch = cpdict['xmatch'] else: objectxmatch = None # load the colormagdiagram object if 'colormagdiagram' in cpdict: colormagdiagram = cpdict['colormagdiagram'] else: colormagdiagram = None # these are base64 which can be provided directly to JS to # generate images (neat!) if 'finderchart' in cpdict: finderchart = cpdict['finderchart'] else: finderchart = None if ('magseries' in cpdict and isinstance(cpdict['magseries'], dict) and 'plot' in cpdict['magseries']): magseries = cpdict['magseries']['plot'] time0 = cpdict['magseries']['times'].min() magseries_ndet = cpdict['magseries']['times'].size else: magseries = None time0 = 0.0 magseries_ndet = 0 LOGGER.warning("no 'magseries' key present in this " "checkplot, some plots may be broken...") if 'status' in cpdict: cpstatus = cpdict['status'] else: cpstatus = 'unknown, possibly incomplete checkplot' # load the uifilters if present if 'uifilters' in cpdict: uifilters = cpdict['uifilters'] else: uifilters = { 'psearch_magfilters': None, 'psearch_sigclip': None, 'psearch_timefilters': None } # this is the initial dict resultdict = { 'status': 'ok', 'message': 'found checkplot %s' % os.path.basename(cpfpath), 'readonly': True, 'result': { 'time0': '%.3f' % time0, 'objectid': objectid, 'objectinfo': objectinfo, 'colormagdiagram': colormagdiagram, 'objectcomments': objectcomments, 'varinfo': varinfo, 'uifilters': uifilters, 'neighbors': neighbors, 'xmatch': objectxmatch, 'finderchart': finderchart, 'magseries': magseries, # fallback in case objectinfo doesn't have ndet 'magseries_ndet': magseries_ndet, 'cpstatus': cpstatus, 'pfmethods': pfmethods } } # now get the periodograms and phased LCs for key in pfmethods: # get the periodogram for this method periodogram = cpdict[key]['periodogram'] # get the phased LC with best period if 0 in cpdict[key] and isinstance(cpdict[key][0], dict): phasedlc0plot = cpdict[key][0]['plot'] phasedlc0period = float(cpdict[key][0]['period']) phasedlc0epoch = float(cpdict[key][0]['epoch']) else: phasedlc0plot = None phasedlc0period = None phasedlc0epoch = None # get the associated fitinfo for this period if it # exists if (0 in cpdict[key] and isinstance(cpdict[key][0], dict) and 'lcfit' in cpdict[key][0] and isinstance(cpdict[key][0]['lcfit'], dict)): phasedlc0fit = { 'method': (cpdict[key][0]['lcfit']['fittype']), 'redchisq': (cpdict[key][0]['lcfit']['fitredchisq']), 'chisq': (cpdict[key][0]['lcfit']['fitchisq']), 'params': (cpdict[key][0]['lcfit']['fitinfo']['finalparams'] if 'finalparams' in cpdict[key][0]['lcfit']['fitinfo'] else None) } else: phasedlc0fit = None # get the phased LC with 2nd best period if 1 in cpdict[key] and isinstance(cpdict[key][1], dict): phasedlc1plot = cpdict[key][1]['plot'] phasedlc1period = float(cpdict[key][1]['period']) phasedlc1epoch = float(cpdict[key][1]['epoch']) else: phasedlc1plot = None phasedlc1period = None phasedlc1epoch = None # get the associated fitinfo for this period if it # exists if (1 in cpdict[key] and isinstance(cpdict[key][1], dict) and 'lcfit' in cpdict[key][1] and isinstance(cpdict[key][1]['lcfit'], dict)): phasedlc1fit = { 'method': (cpdict[key][1]['lcfit']['fittype']), 'redchisq': (cpdict[key][1]['lcfit']['fitredchisq']), 'chisq': (cpdict[key][1]['lcfit']['fitchisq']), 'params': (cpdict[key][1]['lcfit']['fitinfo']['finalparams'] if 'finalparams' in cpdict[key][1]['lcfit']['fitinfo'] else None) } else: phasedlc1fit = None # get the phased LC with 3rd best period if 2 in cpdict[key] and isinstance(cpdict[key][2], dict): phasedlc2plot = cpdict[key][2]['plot'] phasedlc2period = float(cpdict[key][2]['period']) phasedlc2epoch = float(cpdict[key][2]['epoch']) else: phasedlc2plot = None phasedlc2period = None phasedlc2epoch = None # get the associated fitinfo for this period if it # exists if (2 in cpdict[key] and isinstance(cpdict[key][2], dict) and 'lcfit' in cpdict[key][2] and isinstance(cpdict[key][2]['lcfit'], dict)): phasedlc2fit = { 'method': (cpdict[key][2]['lcfit']['fittype']), 'redchisq': (cpdict[key][2]['lcfit']['fitredchisq']), 'chisq': (cpdict[key][2]['lcfit']['fitchisq']), 'params': (cpdict[key][2]['lcfit']['fitinfo']['finalparams'] if 'finalparams' in cpdict[key][2]['lcfit']['fitinfo'] else None) } else: phasedlc2fit = None resultdict['result'][key] = { 'nbestperiods': cpdict[key]['nbestperiods'], 'periodogram': periodogram, 'bestperiod': cpdict[key]['bestperiod'], 'phasedlc0': { 'plot': phasedlc0plot, 'period': phasedlc0period, 'epoch': phasedlc0epoch, 'lcfit': phasedlc0fit, }, 'phasedlc1': { 'plot': phasedlc1plot, 'period': phasedlc1period, 'epoch': phasedlc1epoch, 'lcfit': phasedlc1fit, }, 'phasedlc2': { 'plot': phasedlc2plot, 'period': phasedlc2period, 'epoch': phasedlc2epoch, 'lcfit': phasedlc2fit, }, } # # end of processing per pfmethod # self.set_header('Content-Type', 'application/json; charset=UTF-8') self.write(resultdict) self.finish() else: LOGGER.error('no checkplot file requested') resultdict = { 'status': 'error', 'message': "This checkplot doesn't exist.", 'readonly': True, 'result': None } self.status(400) self.write(resultdict) self.finish()
def get_all_argument(self): data = {} for i in self.request.body.decode().split('&'): _, data[_] = i.split("=") data[_] = escape.url_unescape(data[_]) return data
def _unquote_or_none(s): if s is None: return s return escape.url_unescape(s, encoding=None, plus=False)