def wiki(self, query): res = [] for entry in g.current_wiki.get_index(): name = filename_to_cname(entry['name']) name = re.sub(r"//+", '/', name) toremove = '/-_' for char in toremove: nametmp = name.replace(char, '-') page = g.current_wiki.get_page(name) for tag in query.split(): found = False for word in nametmp.split('-'): if tag in word: if not dict(name=name, content=page.data) in res: res.append(dict(name=name, content=page.data)) found = True break if not found: for word in page.data.split(): if tag in word: if not dict(name=name, content=page.data) in res: res.append(dict(name=name, content=page.data)) found = True break return res
def rebuild_index(): """ Rebuild search index """ app = create_app() if app.config.get('SEARCH_TYPE') == 'simple': click.echo("Search type is simple, try using elasticsearch.") return with app.app_context(): # Wiki search.delete_index('wiki') wiki = Wiki(app.config['WIKI_PATH']) for entry in wiki.get_index(): page = wiki.get_page(entry['name']) if not page: # Some non-markdown files may have issues continue name = filename_to_cname(page['name']) # TODO add email? body = dict(name=name, content=page['data'], message=page['info']['message'], username=page['info']['author'], updated_on=entry['mtime'], created_on=entry['ctime']) search.index_wiki(name, body)
def wiki(self, query): res = [] for entry in g.current_wiki.get_index(): name = filename_to_cname(entry['name']) if set(query.split()).intersection(name.replace('/', '-').split('-')): page = g.current_wiki.get_page(name) res.append(dict(name=name, content=page['data'])) return res
def wiki(self, query): res = [] for entry in g.current_wiki.get_index(): name = filename_to_cname(entry['name']) if set(query.split()).intersection(name.split('-')): page = g.current_wiki.get_page(name) res.append(dict(name=name, content=page['data'])) return res
def wiki(self, query): res = [] for entry in g.current_wiki.get_index(): name = filename_to_cname(entry["name"]) if set(query.split()).intersection(name.split("-")): page = g.current_wiki.get_page(name) res.append(dict(name=name, content=page["data"])) return res
def wiki(self, query): res = [] for entry in g.current_wiki.get_index(): name = filename_to_cname(entry['name']) name = re.sub(r"//+", '/', name) if set(query.split()).intersection(name.replace('/', '-').split('-')): page = g.current_wiki.get_page(name) # this can be None, not sure how if page: res.append(dict(name=name, content=page.data)) return res
def wiki(self, query): res = [] for entry in g.current_wiki.get_index(): name = filename_to_cname(entry['name']) name = re.sub(r"//+", '/', name) if set(query.split()).intersection( name.replace('/', '-').split('-')): page = g.current_wiki.get_page(name) # this can be None, not sure how if page: res.append(dict(name=name, content=page.data)) return res
def get_page(self, name, sha='HEAD'): """Get page data, partials, commit info. :param name: Name of page. :param sha: Commit sha. :return: dict """ cached = cache.get(name) if cached: return cached # commit = gittle.utils.git.commit_info(self.repo[sha]) filename = cname_to_filename(name).encode('latin-1') sha = sha.encode('latin-1') namespace_path = os.path.join(self.path, os.path.splitext(filename)[0]) namespace_cname = to_canonical(os.path.splitext(filename)[0]) if not os.path.exists(os.path.join( self.path, filename)) and os.path.isdir(namespace_path): files = [ "[%s](%s_%s)" % (x, namespace_cname, filename_to_cname(x)) for x in os.listdir(namespace_path) ] print(files) return { 'data': "# Namespace %s \n\n This is an automatically generated list of pages in this namespace.\n\n %s" % (os.path.splitext(filename)[0], '\n'.join(files)) } try: data = self.gittle.get_commit_files(sha, paths=[filename]).get(filename) if not data: return None partials = {} if data.get('data'): meta = self.get_meta(data['data']) if meta and 'import' in meta: for partial_name in meta['import']: partials[partial_name] = self.get_page(partial_name) data['partials'] = partials data['info'] = self.get_history(name, limit=1)[0] return data except KeyError: # HEAD doesn't exist yet return None
def get_index(self): """Get repo index of head. :return: list -- List of dicts """ rv = [] index = self.repo.open_index() for name in index: rv.append(dict(name=filename_to_cname(name), filename=name, ctime=index[name].ctime[0], mtime=index[name].mtime[0], sha=index[name].sha, size=index[name].size)) return rv
def get_page(self, name, sha='HEAD'): """Get page data, partials, commit info. :param name: Name of page. :param sha: Commit sha. :return: dict """ cached = cache.get(name) if cached: return cached # commit = gittle.utils.git.commit_info(self.repo[sha]) filename = cname_to_filename(name).encode('latin-1') sha = sha.encode('latin-1') namespace_path = os.path.join(self.path, os.path.splitext(filename)[0]) namespace_cname = to_canonical(os.path.splitext(filename)[0]) if not os.path.exists(os.path.join(self.path, filename)) and os.path.isdir(namespace_path): files = ["[%s](%s_%s)" % (x, namespace_cname, filename_to_cname(x)) for x in os.listdir(namespace_path)] print(files) return {'data': "# Namespace %s \n\n This is an automatically generated list of pages in this namespace.\n\n %s" % (os.path.splitext(filename)[0], '\n'.join(files))} try: data = self.gittle.get_commit_files(sha, paths=[filename]).get(filename) if not data: return None partials = {} if data.get('data'): meta = self.get_meta(data['data']) if meta and 'import' in meta: for partial_name in meta['import']: partials[partial_name] = self.get_page(partial_name) data['partials'] = partials data['info'] = self.get_history(name, limit=1)[0] return data except KeyError: # HEAD doesn't exist yet return None
def test_filename_to_cname(self): eq_(filename_to_cname('test-1-2-3.md'), 'test-1-2-3')