def get_list(self): p = self.c.vnode2position(self.v) if not p: return # return [(i.h, i.b.split('\n', 1)[0]) # for i in p.subtree()] def strip(s): if s.startswith('@url'): s = s[4:] return s.strip() result, urls = [], [] for p in p.subtree(): if p.b and p.b[0] == '#': # prevent node url ending with name of file which exists being confused url = p.b.split('\n', 1)[0] else: url = g.getUrlFromNode(p) h = strip(p.h) data = (h, url) if url and data not in result and url not in urls: result.append(data) urls.append(url) return result
def update_jupyter(self, s, keywords): '''Update @jupyter node in the vr pane.''' pc = self c = pc.c if pc.must_change_widget(QtWebKitWidgets.QWebView): # g.trace('===== instantiating QWebView') w = QtWebKitWidgets.QWebView() n = c.config.getInt('qweb_view_font_size') if n: settings = w.settings() settings.setFontSize(settings.DefaultFontSize, n) pc.embed_widget(w) assert(w == pc.w) else: w = pc.w url = g.getUrlFromNode(c.p) if url and nbformat: s = urlopen(url).read().decode() try: nb = nbformat.reads(s, as_version=4) e = HTMLExporter() (s, junk_resources) = e.from_notebook_node(nb) except nbformat.reader.NotJSONError: # Assume the result is html. pass elif url: s = 'can not import nbformt: %r' % url else: s = g.u('') if isQt5: w.hide() # This forces a proper update. w.setHtml(s) w.show() c.bodyWantsFocusNow()
def recurse_bm(node, result, ancestors=None): if ancestors is None: ancestors = [] for p in node.children(): if p.b and p.b[0] == '#': # prevent node url ending with name of # file which exists being confused url = p.b.split('\n', 1)[0] else: url = g.getUrlFromNode(p) if url: url = url.replace(' ', '%20') h = self.fix_text(p.h) children = [] bm = self.Bookmark(h, url, ancestors, result, children, p.v) result.append(bm) if levels == 0: # non-hierarchical recurse_bm(p, result) else: recurse_bm(p, children, ancestors=ancestors + [bm])
def recurse_bm(node, result, ancestors=None): if ancestors is None: ancestors = [] for p in node.children(): if p.b and p.b[0] == '#': # prevent node url ending with name of # file which exists being confused url = p.b.split('\n', 1)[0] else: url = g.getUrlFromNode(p) if url: url = url.replace(' ', '%20') h = self.fix_text(p.h) children = [] bm = self.Bookmark( h, url, ancestors, result, children, p.v) result.append(bm) if levels == 0: # non-hierarchical recurse_bm(p, result) else: recurse_bm(p, children, ancestors=ancestors+[bm])
def get_list(self): p = self.c.vnode2position(self.v) if not p: return # return [(i.h, i.b.split('\n', 1)[0]) # for i in p.subtree()] def strip(s): if s.startswith('@url'): s = s[4:] return s.strip() result,urls = [],[] for p in p.subtree(): if p.b and p.b[0] == '#': # prevent node url ending with name of file which exists being confused url = p.b.split('\n', 1)[0] else: url = g.getUrlFromNode(p) h = strip(p.h) data = (h,url) if url and data not in result and url not in urls: result.append(data) urls.append(url) return result
def cmd_open_node(event): c = event.get('c') if not c: return p = c.p url = g.getUrlFromNode(p) if url: # No need to handle url hooks here. g.handleUrl(url,c=c,p=p)
def cmd_open_node(event): c = event.get('c') if not c: return p = c.p url = g.getUrlFromNode(p) if url: # No need to handle url hooks here. g.handleUrl(url, c=c, p=p)
def add_bookmark(self, te, pos): url = g.getUrlFromNode(self.c.p) if not url or '//' not in url: # first line starting with '#' is misinterpreted as url url = None if not url: url = '#'+self.c.p.get_UNL(with_file=False) # check it's not already present try: self.already = [i[1] for i in self.current_list].index(url) except ValueError: self.already = -1 if self.already != -1: g.es("Bookmark for this node already present") return self.show_list(self.current_list) prev = str(te.anchorAt(QtCore.QPoint(pos.x()-12, pos.y()))) next = str(te.anchorAt(QtCore.QPoint(pos.x()+12, pos.y()))) new_list = [] placed = False h = self.c.p.anyAtFileNodeName() or self.c.p.h while h and h[0] == '@': h = h[1:] new_anchor = h, url for anchor in self.current_list: if not placed and anchor[1] == next: placed = True new_list.append(new_anchor) new_list.append(anchor) if not placed and anchor[1] == prev: placed = True new_list.append(new_anchor) if not placed: new_list.append(new_anchor) idx = new_list.index(new_anchor) nd = self.c.vnode2position(self.v).insertAsNthChild(idx) nd.h = new_anchor[0] nd.b = new_anchor[1] self.c.redraw() self.current_list = new_list self.show_list(self.current_list) return None # do not stop processing the select1 hook
def add_bookmark(self, te, pos): url = g.getUrlFromNode(self.c.p) if not url or '//' not in url: # first line starting with '#' is misinterpreted as url url = None if not url: url = '#' + self.c.p.get_UNL(with_file=False) # check it's not already present try: self.already = [i[1] for i in self.current_list].index(url) except ValueError: self.already = -1 if self.already != -1: g.es("Bookmark for this node already present") return self.show_list(self.current_list) prev = str(te.anchorAt(QtCore.QPoint(pos.x() - 12, pos.y()))) next = str(te.anchorAt(QtCore.QPoint(pos.x() + 12, pos.y()))) new_list = [] placed = False h = self.c.p.anyAtFileNodeName() or self.c.p.h while h and h[0] == '@': h = h[1:] new_anchor = h, url for anchor in self.current_list: if not placed and anchor[1] == next: placed = True new_list.append(new_anchor) new_list.append(anchor) if not placed and anchor[1] == prev: placed = True new_list.append(new_anchor) if not placed: new_list.append(new_anchor) idx = new_list.index(new_anchor) nd = self.c.vnode2position(self.v).insertAsNthChild(idx) nd.h = new_anchor[0] nd.b = new_anchor[1] self.c.redraw() self.current_list = new_list self.show_list(self.current_list) return None # do not stop processing the select1 hook
def parse_feed(self, feed): c = self.c g.es("Parsing feed: %s" % feed.h, color="blue") feedurl = g.getUrlFromNode(feed) data = feedparser.parse(feedurl) # check for bad feed if data.bozo == 1: g.es("Error: bad feed data.", color="red") return # grab config settings sort_newest_first = c.config.getBool("rss-sort-newest-first", default=True) body_format = c.config.getData("rss-body-format") or [ "@url <link>", "\\n", "<title>", "<date>", "\\n", "<summary>", ] body_format = "\n".join(body_format) body_format = body_format.replace("\\n", "") headline_format = c.config.getString("rss-headline-format") or "[<date>] <title>" date_format = c.config.getString("rss-date-format") or "%Y-%m-%d %I:%M %p" # process entries stories = sorted(data.entries, key=lambda entry: self.grab_date_parsed(entry)) if sort_newest_first: stories.reverse() pos = feed for entry in stories: if not self.entry_in_history(feed, entry): date = time.strftime(date_format, self.grab_date_parsed(entry)) name = entry.get("title", default=self._NO_NAME) link = entry.get("link", default=self._NO_LINK) desc = entry.get("summary", default=self._NO_SUMMARY) headline = ( headline_format.replace("<date>", date) .replace("<title>", name) .replace("<summary>", desc) .replace("<link>", link) ) body = ( body_format.replace("<date>", date) .replace("<title>", name) .replace("<summary>", desc) .replace("<link>", link) ) newp = pos.insertAsLastChild() newp.h = headline newp.b = body self.add_entry_to_history(feed, entry) self.c.redraw_now()
def openurl_rclick(c,p,menu): """ open an url """ url = g.getUrlFromNode(p) if url: def openurl_rclick_cb(): if not g.doHook("@url1",c=c,p=p,url=url): g.handleUrl(url,c=c,p=p) g.doHook("@url2",c=c,p=p) action = menu.addAction("Open URL") action.triggered.connect(openurl_rclick_cb)
def openurl_rclick(c, p, menu): """ open an url """ url = g.getUrlFromNode(p) if url: def openurl_rclick_cb(): if not g.doHook("@url1", c=c, p=p, url=url): g.handleUrl(url, c=c, p=p) g.doHook("@url2", c=c, p=p) action = menu.addAction("Open URL") action.triggered.connect(openurl_rclick_cb)
def openurl_rclick(c, p, menu): """ open an url """ url = g.getUrlFromNode(p) if not url: return def openurl_rclick_cb(): if not g.doHook("@url1", c=c, p=p, v=p, url=url): g.handleUrl(url, c=c, p=p) g.doHook("@url2", c=c, p=p, v=p) a = menu.addAction("Open URL") a.connect(a, QtCore.SIGNAL("triggered()"), openurl_rclick_cb)
def openurl_rclick(c,p, menu): """ open an url """ url = g.getUrlFromNode(p) if not url: return def openurl_rclick_cb(): if not g.doHook("@url1",c=c,p=p,v=p,url=url): g.handleUrl(url,c=c,p=p) g.doHook("@url2",c=c,p=p,v=p) a = menu.addAction("Open URL") a.connect(a,QtCore.SIGNAL("triggered()"),openurl_rclick_cb)
def parse_feed(self, feed): c = self.c g.es("Parsing feed: %s" % feed.h, color='blue') feedurl = g.getUrlFromNode(feed) # pylint: disable=no-member # feedparser.parse *does* exist. data = feedparser.parse(feedurl) # check for bad feed if data.bozo == 1: g.es("Error: bad feed data.", color='red') return # grab config settings sort_newest_first = c.config.getBool('rss-sort-newest-first', default=True) body_format = ( c.config.getData('rss-body-format') or ['@url <link>', '\\n', '<title>', '<date>', '\\n', '<summary>'] ) body_format = "\n".join(body_format) body_format = body_format.replace('\\n', '') headline_format = c.config.getString('rss-headline-format') or '[<date>] <title>' date_format = c.config.getString('rss-date-format') or '%Y-%m-%d %I:%M %p' # process entries # pylint: disable=unnecessary-lambda stories = sorted(data.entries, key=lambda entry: self.grab_date_parsed(entry)) if sort_newest_first: stories.reverse() pos = feed for entry in stories: if not self.entry_in_history(feed, entry): date = time.strftime(date_format, self.grab_date_parsed(entry)) name = entry.get('title', default=self._NO_NAME) link = entry.get('link', default=self._NO_LINK) desc = entry.get('summary', default=self._NO_SUMMARY) headline = ( headline_format.replace('<date>', date). replace('<title>', name). replace('<summary>', desc). replace('<link>', link) ) body = ( body_format.replace('<date>', date). replace('<title>', name). replace('<summary>', desc). replace('<link>', link)) newp = pos.insertAsLastChild() newp.h = headline newp.b = body self.add_entry_to_history(feed, entry) self.c.redraw()
def parse_feed(self, feed): c = self.c g.es("Parsing feed: %s" % feed.h, color='blue') feedurl = g.getUrlFromNode(feed) data = feedparser.parse(feedurl) # check for bad feed if data.bozo == 1: g.es("Error: bad feed data.", color='red') return # grab config settings sort_newest_first = c.config.getBool('rss-sort-newest-first', default=True) body_format = c.config.getData('rss-body-format') or ['@url <link>','\\n','<title>','<date>','\\n','<summary>'] body_format = "\n".join(body_format) body_format = body_format.replace('\\n','') headline_format = c.config.getString('rss-headline-format') or '[<date>] <title>' date_format = c.config.getString('rss-date-format') or '%Y-%m-%d %I:%M %p' # process entries # pylint: disable=unnecessary-lambda stories = sorted(data.entries, key=lambda entry: self.grab_date_parsed(entry)) if sort_newest_first: stories.reverse() pos = feed for entry in stories: if not self.entry_in_history(feed, entry): date = time.strftime(date_format,self.grab_date_parsed(entry)) name = entry.get('title',default=self._NO_NAME) link = entry.get('link',default=self._NO_LINK) desc = entry.get('summary',default=self._NO_SUMMARY) headline = headline_format.replace('<date>',date).replace('<title>',name).replace('<summary>',desc).replace('<link>',link) body = body_format.replace('<date>',date).replace('<title>',name).replace('<summary>',desc).replace('<link>',link) newp = pos.insertAsLastChild() newp.h = headline newp.b = body self.add_entry_to_history(feed, entry) self.c.redraw_now()
def get_list(self): p = self.c.vnode2position(self.v) if not p: return # return [(i.h, i.b.split('\n', 1)[0]) # for i in p.subtree()] def strip(s): if s.startswith('@url'): s = s[4:] return s.strip() result, urls = [], [] for p in p.subtree(): url = g.getUrlFromNode(p) h = strip(p.h) data = (h, url) if url and data not in result and url not in urls: result.append(data) urls.append(url) return result
def get_list(self): p = self.c.vnode2position(self.v) if not p: return # return [(i.h, i.b.split('\n', 1)[0]) # for i in p.subtree()] def strip(s): if s.startswith('@url'): s = s[4:] return s.strip() result,urls = [],[] for p in p.subtree(): url = g.getUrlFromNode(p) h = strip(p.h) data = (h,url) if url and data not in result and url not in urls: result.append(data) urls.append(url) return result
def is_feed(self, pos): ## a feed definition is a vnode where v.h.startswith('@feed') and g.getUrlFromNode(p) is truthy return pos.v.h.startswith('@feed') and g.getUrlFromNode(pos)