def get_summary_html( token, fullURL = 'http://localhost:32400', preambleText = '', postambleText = '', name = None ): """ Creates a Plex_ newsletter summary HTML email of the media on the Plex_ server. Used by the email GUI to send out summary emails. :param str token: the Plex_ access token. :param str fullURL: the Plex_ server URL. :param str preambleText: optional argument. The reStructuredText_ formatted preamble text (text section *before* summary), if non-empty. Default is ``""``. :param str postambleText: optional argument. The reStructuredText_ formatted text, in a section *after* the summary. if non-empty. Default is ``""``. :param str name: optional argument. If given, the recipient's name. :returns: a two-element :py:class:`tuple`. The first element is an HTML :py:class:`string <str>` document of the Plex_ newletter email. The second element is the full reStructuredText_ :py:class:`string <str>`. :rtype: str .. seealso:: :py:meth:`get_summary_body <howdy.email.email.get_summary_body>`. .. _reStructuredText: https://en.wikipedia.org/wiki/ReStructuredText """ nameSection = False if len(preambleText.strip( ) ) != 0: nameSection = True if name is None: name = 'Friend' tup_formatting = ( name, preambleText, get_summary_body( token, nameSection = nameSection, fullURL = fullURL ), postambleText, ) wholestr = open( os.path.join( resourceDir, 'howdy_template.rst' ), 'r' ).read( ) wholestr = wholestr % tup_formatting htmlString = html_parts( wholestr )[ 'whole' ] html = BeautifulSoup( htmlString, 'lxml' ) return html.prettify( ), wholestr
def rstToHtml(directory): from docutils.examples import html_parts for filePath, htmlPath in iterFiles(directory, '.html'): print filePath print 'to html: ', htmlPath rst = open(filePath).read().decode('UTF-8') html = html_parts(rst, filePath, htmlPath) open(htmlPath, 'w').write(html['whole'].encode('UTF-8'))
def read_blogpost(filename, rawhtml, rawhtmltitle): if not rawhtml: parts = html_parts(open(filename, 'rb').read().decode('utf8')) title = parts['title'] content = parts['body'] else: title = opts.title content = open(filename, 'rb').read().decode('utf8') return title, content
def read_blogpost(filename, rawhtml, rawhtmltitle): if not rawhtml: parts = html_parts(open(filename, "rb").read().decode("utf8")) title = parts["title"] content = parts["body"] else: title = opts.title content = open(filename, "rb").read().decode("utf8") return title, content
def make_report(isodate='today'): """ Build a HTML report with the computations performed at the given isodate. Return the name of the report, which is saved in the current directory. """ if isodate == 'today': isodate = date.today() else: isodate = date(*time.strptime(isodate, '%Y-%m-%d')[:3]) isodate1 = isodate + timedelta(1) # +1 day tag_ids = [] tag_status = [] tag_contents = [] # the fetcher returns an header which is stripped with [1:] jobs = dbcmd( 'fetch', ALL_JOBS, isodate.isoformat(), isodate1.isoformat())[1:] page = '<h2>%d job(s) finished before midnight of %s</h2>' % ( len(jobs), isodate) for job_id, user, status, ds_calc in jobs: tag_ids.append(job_id) tag_status.append(status) stats = dbcmd('fetch', JOB_STATS, job_id) if not stats[1:]: continue (job_id, user, start_time, stop_time, status) = stats[1] try: ds = read(job_id, datadir=os.path.dirname(ds_calc)) txt = view_fullreport('fullreport', ds).decode('utf-8') report = html_parts(txt) except Exception as exc: report = dict( html_title='Could not generate report: %s' % cgi.escape( unicode(exc), quote=True), fragment='') page = report['html_title'] add_duration(stats) page += html(stats) page += report['fragment'] tag_contents.append(page) page = make_tabs(tag_ids, tag_status, tag_contents) + ( 'Report last updated: %s' % datetime.now()) fname = 'jobs-%s.html' % isodate with open(fname, 'w') as f: f.write(PAGE_TEMPLATE % page.encode('utf-8')) return fname
def make_report(isodate='today'): """ Build a HTML report with the computations performed at the given isodate. Return the name of the report, which is saved in the current directory. """ if isodate == 'today': isodate = date.today() else: isodate = date(*time.strptime(isodate, '%Y-%m-%d')[:3]) isodate1 = isodate + timedelta(1) # +1 day tag_ids = [] tag_status = [] tag_contents = [] # the fetcher returns an header which is stripped with [1:] jobs = dbcmd('fetch', ALL_JOBS, isodate.isoformat(), isodate1.isoformat())[1:] page = '<h2>%d job(s) finished before midnight of %s</h2>' % (len(jobs), isodate) for job_id, user, status, ds_calc in jobs: tag_ids.append(job_id) tag_status.append(status) stats = dbcmd('fetch', JOB_STATS, job_id) if not stats[1:]: continue (job_id, user, start_time, stop_time, status) = stats[1] try: ds = read(job_id, datadir=os.path.dirname(ds_calc)) txt = view_fullreport('fullreport', ds).decode('utf-8') report = html_parts(txt) except Exception as exc: report = dict(html_title='Could not generate report: %s' % cgi.escape(unicode(exc), quote=True), fragment='') page = report['html_title'] add_duration(stats) page += html(stats) page += report['fragment'] tag_contents.append(page) page = make_tabs(tag_ids, tag_status, tag_contents) + ( 'Report last updated: %s' % datetime.now()) fname = 'jobs-%s.html' % isodate with open(fname, 'w') as f: f.write(PAGE_TEMPLATE % page.encode('utf-8')) return fname
def make_report(conn, isodate='today'): """ Build a HTML report with the computations performed at the given isodate. Return the name of the report, which is saved in the current directory. """ if isodate == 'today': isodate = datetime.date.today().isoformat() curs = conn.cursor() fetcher = Fetcher(curs) tag_ids = [] tag_status = [] tag_contents = [] jobs = fetcher.query(ALL_JOBS, isodate, isodate)[1:] page = '<h2>%d job(s) finished before midnight of %s</h2>' % ( len(jobs), isodate) for job_id, user, status, ds_calc in jobs: tag_ids.append(job_id) tag_status.append(status) stats = fetcher.query(JOB_STATS, job_id)[1:] if not stats: continue (job_id, user, start_time, stop_time, status, duration) = stats[0] try: ds = read(job_id, datadir=os.path.dirname(ds_calc)) txt = view_fullreport('fullreport', ds).decode('utf-8') report = html_parts(txt) except Exception as exc: report = dict( html_title='Could not generate report: %s' % cgi.escape( unicode(exc), quote=True), fragment='') page = report['html_title'] job_stats = html(fetcher.query(JOB_STATS, job_id)) page += job_stats page += report['fragment'] tag_contents.append(page) page = make_tabs(tag_ids, tag_status, tag_contents) + ( 'Report last updated: %s' % datetime.datetime.now()) fname = 'jobs-%s.html' % isodate with open(fname, 'w') as f: f.write(PAGE_TEMPLATE % page.encode('utf-8')) return fname
def read_blogpost(file_or_dir, rawhtml, rawhtmltitle): if os.path.isdir(file_or_dir): filename = "{0}/{1}".format(file_or_dir, random.choice(os.listdir(file_or_dir))) else: filename = file_or_dir if not rawhtml: parts = html_parts(open(filename, 'rb').read().decode('utf8')) title = parts['title'] content = parts['body'] else: title = opts.title content = open(filename, 'rb').read().decode('utf8') return title, content
def convert_string_RST( myString ): """ Converts a valid reStructuredText_ input string into rich HTML. :param str myString: the candidate reStructuredText_ input. :returns: If the input string is valid reStructuredText_, returns the rich HTML as a :py:class:`string <str>`. Otherwise emits a :py:meth:`logging error message <logging.error>` and returns ``None``. :rtype: str .. seealso:: :py:meth:`check_valid_RST <howdy.core.check_valid_RST>`. """ if not check_valid_RST( myString ): logging.error( "Error, could not convert %s into RST." % myString ) return None html_body = html_parts( myString )[ 'whole' ] html = BeautifulSoup( html_body, 'lxml' ) return html.prettify( )
def check_valid_RST( myString ): """ Checks to see whether the input string is valid reStructuredText_. :param str myString: the candidate reStructuredText_ input. :returns: ``True`` if valid, otherwise ``False``. :rtype: bool .. seealso:: :py:meth:`convert_string_RST <howdy.core.convert_string_RST>`. .. _reStructuredText: https://en.wikipedia.org/wiki/ReStructuredText """ body = html_parts( myString)[ 'body' ] html = BeautifulSoup( body, 'lxml' ) error_messages = html.find_all('p', { 'class' : 'system-message-title' } ) return len( error_messages) == 0
def make_report(conn, isodate='today'): """ Build a HTML report with the computations performed at the given isodate. Return the name of the report, which is saved in the current directory. """ if isodate == 'today': isodate = datetime.date.today().isoformat() curs = conn.cursor() fetcher = Fetcher(curs) tag_ids = [] tag_status = [] tag_contents = [] jobs = fetcher.query(ALL_JOBS, isodate, isodate)[1:] page = '<h2>%d job(s) finished before midnight of %s</h2>' % (len(jobs), isodate) for job_id, user, status, ds_calc in jobs: tag_ids.append(job_id) tag_status.append(status) stats = fetcher.query(JOB_STATS, job_id)[1:] if not stats: continue (job_id, user, start_time, stop_time, status, duration) = stats[0] try: ds = read(job_id, datadir=os.path.dirname(ds_calc)) txt = view_fullreport('fullreport', ds).decode('utf-8') report = html_parts(txt) except Exception as exc: report = dict(html_title='Could not generate report: %s' % cgi.escape(unicode(exc), quote=True), fragment='') page = report['html_title'] job_stats = html(fetcher.query(JOB_STATS, job_id)) page += job_stats page += report['fragment'] tag_contents.append(page) page = make_tabs(tag_ids, tag_status, tag_contents) + ( 'Report last updated: %s' % datetime.datetime.now()) fname = 'jobs-%s.html' % isodate with open(fname, 'w') as f: f.write(PAGE_TEMPLATE % page.encode('utf-8')) return fname
def html(self, resource_url): def cook_link(match): path = match.groups()[0] try: if not path.startswith('/'): path = '/' + path page = find_resource(self, path) if isinstance(page, Page): return resource_url(page) except KeyError: pass return resource_url( find_root(self), 'add_page', query={'path': path}) body = page_links.sub(cook_link, self.body) return html_parts(body, doctitle=False, initial_header_level=2)['html_body']
def dump_blogpost(filename): parts = html_parts(open(filename, 'rb').read().decode('utf8')) print parts['whole']
def dump_blogpost(filename): parts = html_parts(open(filename, "rb").read().decode("utf8")) print parts["whole"]
def _text_restruct(self, text): output = html_parts(unicode(text), doctitle=False) return output['body']