def identifier_search(search, tree_name, needle, complete, fold_case): needle = re.sub(r'\\(.)', r'\1', needle) pieces = re.split(r'\.|::', needle) # If the last segment of the search needle is too short, return no results # because we're worried that would return too many results. if not complete and len(pieces[-1]) < 3: return {} # Fixup closure for use by add_qualified_results to reduce the range of the # match's bounds to the prefix that was included in the search needle from # the full bounds of the search result. (So if the search was "foo::bar" # and we matched "foo::bartab" and "foo::barhat", the idea I guess is that # only the "bar" portion would be highlighted assuming the bounds # previously were referencing "bartab" and "barhat".) def line_modifier(line): if 'bounds' in line: (start, end) = line['bounds'] end = start + len(pieces[-1]) line['bounds'] = [start, end] ids = identifiers.lookup(tree_name, needle, complete, fold_case) for (i, (qualified, sym)) in enumerate(ids): if i > 500: break q = demangle(sym) if q == sym: q = qualified results = crossrefs.lookup(tree_name, sym) search.add_qualified_results(q, results, line_modifier)
def process_request(self): url = urlparse.urlparse(self.path) path_elts = url.path.split('/') # Strip any extra slashes. path_elts = [elt for elt in path_elts if elt != ''] if not path_elts: filename = os.path.join(index_path('mozilla-central'), 'help.html') data = open(filename).read() self.generate(data, 'text/html') elif len(path_elts) >= 2 and path_elts[1] == 'source': tree_name = path_elts[0] filename = os.path.join(index_path(tree_name), 'file', '/'.join(path_elts[2:])) try: data = open(filename).read() except: filename = os.path.join(index_path(tree_name), 'dir', '/'.join(path_elts[2:]), 'index.html') try: data = open(filename).read() except: return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET( self) self.generate(data, 'text/html') elif len(path_elts) >= 2 and path_elts[1] == 'search': tree_name = path_elts[0] query = urlparse.parse_qs(url.query) j = get_json_search_results(tree_name, query) if 'json' in self.headers.getheader('Accept', ''): self.generate(j, 'application/json') else: j = j.replace("</", "<\\/").replace("<script", "<\\script").replace( "<!", "<\\!") template = os.path.join(index_path(tree_name), 'templates/search.html') self.generateWithTemplate( { '{{BODY}}': j, '{{TITLE}}': 'Search' }, template) elif path_elts[1] == 'define': tree_name = path_elts[0] query = urlparse.parse_qs(url.query) symbol = query['q'][0] results = crossrefs.lookup(tree_name, symbol) definition = results['Definitions'][0] filename = definition['path'] lineno = definition['lines'][0]['lno'] url = '/' + tree_name + '/source/' + filename + '#' + str(lineno) self.send_response(301) self.send_header("Location", url) self.end_headers() else: return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def f(kind): results = crossrefs.lookup(tree_name, sym) results = results.get(kind, []) for path in results: for line in path['lines']: if 'bounds' in line: (start, end) = line['bounds'] end = start + len(pieces[-1]) line['bounds'] = [start, end] return results
def process_request(self): url = urlparse.urlparse(self.path) path_elts = url.path.split('/') # Strip any extra slashes. path_elts = [ elt for elt in path_elts if elt != '' ] # Warning: many of the branches in this branch condition don't actually # get executed, because nginx handles them directly. See scripts/nginx-setup.py # for which paths are proxied or handled directly by nginx before you # start mucking around in here. if not path_elts: filename = os.path.join(index_path(get_main_tree()), 'help.html') data = open(filename).read() self.generate(data, 'text/html') elif len(path_elts) >= 2 and path_elts[1] == 'source': tree_name = path_elts[0] filename = os.path.join(index_path(tree_name), 'file', '/'.join(path_elts[2:])) try: data = open(filename).read() except: filename = os.path.join(index_path(tree_name), 'dir', '/'.join(path_elts[2:]), 'index.html') try: data = open(filename).read() except: return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) self.generate(data, 'text/html') elif len(path_elts) >= 2 and path_elts[1] == 'search': tree_name = path_elts[0] query = urlparse.parse_qs(url.query) j = get_json_search_results(tree_name, query) if 'json' in self.headers.getheader('Accept', ''): self.generate(j, 'application/json') else: j = j.replace("</", "<\\/").replace("<script", "<\\script").replace("<!", "<\\!") template = os.path.join(index_path(tree_name), 'templates/search.html') self.generateWithTemplate({'{{BODY}}': j, '{{TITLE}}': 'Search'}, template) elif len(path_elts) >= 2 and path_elts[1] == 'define': tree_name = path_elts[0] query = urlparse.parse_qs(url.query) symbol = query['q'][0] results = crossrefs.lookup(tree_name, symbol) definition = results['Definitions'][0] filename = definition['path'] lineno = definition['lines'][0]['lno'] url = '/' + tree_name + '/source/' + filename + '#' + str(lineno) self.send_response(301) self.send_header("Location", url) self.end_headers() else: return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def process_request(self): url = urlparse.urlparse(self.path) path_elts = url.path.split('/') # Strip any extra slashes. path_elts = [ elt for elt in path_elts if elt != '' ] if not path_elts: filename = os.path.join(index_path('mozilla-central'), 'help.html') data = open(filename).read() self.generate(data, 'text/html') elif len(path_elts) >= 2 and path_elts[1] == 'source': tree_name = path_elts[0] filename = os.path.join(index_path(tree_name), 'file', '/'.join(path_elts[2:])) try: data = open(filename).read() except: filename = os.path.join(index_path(tree_name), 'dir', '/'.join(path_elts[2:]), 'index.html') try: data = open(filename).read() except: return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) self.generate(data, 'text/html') elif len(path_elts) >= 2 and path_elts[1] == 'search': tree_name = path_elts[0] query = urlparse.parse_qs(url.query) j = get_json_search_results(tree_name, query) if 'json' in self.headers.getheader('Accept', ''): self.generate(j, 'application/json') else: j = j.replace("</", "<\\/") template = os.path.join(index_path(tree_name), 'templates/search.html') self.generateWithTemplate({'{{BODY}}': j, '{{TITLE}}': 'Search'}, template) elif path_elts[1] == 'define': tree_name = path_elts[0] query = urlparse.parse_qs(url.query) symbol = query['q'][0] results = crossrefs.lookup(tree_name, symbol) definition = results['Definitions'][0] filename = definition['path'] lineno = definition['lines'][0]['lno'] url = '/' + tree_name + '/source/' + filename + '#' + str(lineno) self.send_response(301) self.send_header("Location", url) self.end_headers() else: return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def identifier_search(search, tree_name, needle, complete, fold_case): needle = re.sub(r'\\(.)', r'\1', needle) pieces = re.split(r'\.|::', needle) if not complete and len(pieces[-1]) < 3: return {} def line_modifier(line): if 'bounds' in line: (start, end) = line['bounds'] end = start + len(pieces[-1]) line['bounds'] = [start, end] ids = identifiers.lookup(tree_name, needle, complete, fold_case) for (i, (qualified, sym)) in enumerate(ids): if i > 500: break q = demangle(sym) if q == sym: q = qualified results = crossrefs.lookup(tree_name, sym) search.add_qualified_results(q, results, line_modifier)
def get_json_search_results(tree_name, query): try: search_string = query['q'][0] except: search_string = '' try: fold_case = query['case'][0] != 'true' except: fold_case = True try: regexp = query['regexp'][0] == 'true' except: regexp = False try: path_filter = query['path'][0] except: path_filter = '' parsed = parse_search(search_string) # Should we just be leaving this in parsed? context_lines = 0 if 'context_lines' in parsed: context_lines = parsed['context_lines'] if path_filter: parsed['pathre'] = parse_path_filter(path_filter) if regexp: if 'default' in parsed: del parsed['default'] if 're' in parsed: del parsed['re'] parsed['re'] = search_string if 'default' in parsed and len(parsed['default']) == 0: del parsed['default'] if is_trivial_search(parsed): results = {} return json.dumps(results) title = search_string if not title: title = 'Files ' + path_filter search = SearchResults() work_limit = False hit_timeout = False if 'symbol' in parsed: search.set_path_filter(parsed.get('pathre')) symbols = parsed['symbol'] title = 'Symbol ' + symbols search.add_results(crossrefs.lookup(tree_name, symbols)) elif 're' in parsed: path = parsed.get('pathre', '.*') (substr_results, timed_out) = codesearch.search(parsed['re'], fold_case, path, tree_name, context_lines) search.add_results({'Textual Occurrences': substr_results}) hit_timeout |= timed_out elif 'id' in parsed: search.set_path_filter(parsed.get('pathre')) identifier_search(search, tree_name, parsed['id'], complete=True, fold_case=fold_case) elif 'default' in parsed: work_limit = True path = parsed.get('pathre', '.*') (substr_results, timed_out) = codesearch.search(parsed['default'], fold_case, path, tree_name, context_lines) search.add_results({'Textual Occurrences': substr_results}) hit_timeout |= timed_out if 'pathre' not in parsed: file_results = search_files(tree_name, parsed['default']) search.add_results({'Files': file_results}) identifier_search(search, tree_name, parsed['default'], complete=False, fold_case=fold_case) elif 'pathre' in parsed: path = parsed['pathre'] search.add_results({'Files': search_files(tree_name, path)}) else: assert False results = {} results = search.get(work_limit) results['*title*'] = title results['*timedout*'] = hit_timeout return json.dumps(results)
def get_json_search_results(tree_name, query): try: search_string = query['q'][0] except: search_string = '' try: fold_case = query['case'][0] != 'true' except: fold_case = True try: regexp = query['regexp'][0] == 'true' except: regexp = False try: path_filter = query['path'][0] except: path_filter = '' parsed = parse_search(search_string) if path_filter: parsed['pathre'] = parse_path_filter(path_filter) if regexp: if 'default' in parsed: del parsed['default'] if 're' in parsed: del parsed['re'] parsed['re'] = search_string if 'default' in parsed and len(parsed['default']) == 0: del parsed['default'] if is_trivial_search(parsed): results = {} return json.dumps(results) title = search_string if not title: title = 'Files ' + path_filter search = SearchResults() if 'symbol' in parsed: search.set_path_filter(parsed.get('pathre')) symbols = parsed['symbol'] title = 'Symbol ' + symbols search.add_results(crossrefs.lookup(tree_name, symbols)) elif 're' in parsed: path = parsed.get('pathre', '.*') substr_results = codesearch.search(parsed['re'], fold_case, path, tree_name) search.add_results({'Textual Occurrences': substr_results}) elif 'id' in parsed: search.set_path_filter(parsed.get('pathre')) identifier_search(search, tree_name, parsed['id'], complete=True, fold_case=fold_case, limit5=False) elif 'default' in parsed: path = parsed.get('pathre', '.*') substr_results = codesearch.search(parsed['default'], fold_case, path, tree_name) search.add_results({'Textual Occurrences': substr_results}) if 'pathre' not in parsed: file_results = search_files(tree_name, parsed['default']) search.add_results({'Files': file_results}) identifier_search(search, tree_name, parsed['default'], complete=False, fold_case=fold_case) elif 'pathre' in parsed: path = parsed['pathre'] search.add_results({'Files': search_files(tree_name, path)}) else: assert False results = {} results = search.get() results['*title*'] = title return json.dumps(results)