Esempio n. 1
0
def activate(profile,mixed=False):
    dname = os.path.dirname(__file__)
    pdir = locate_profile(profile)

    jc = JSONFileConfigLoader('ipython_notebook_config.json',pdir)


    try:
        config = jc.load_config();
    except (ConfigFileNotFound,ValueError) as e:
        config = Config()
    if 'NotebookApp' in config:
        if ('tornado_settings' in config['NotebookApp']) or ('contents_manager_class' in config['NotebookApp']):
            # TODO, manually merge tornado settin if exist
            # but cannot do anythin automatically if contents_manager_calss is set
            raise ValueError('You already got some configuration that will conflict with google drive. Bailin out')
    if mixed :
        drive_config  = JSONFileConfigLoader('mixed_contents.json', dname).load_config()
    else :
        drive_config  = JSONFileConfigLoader('ipython_notebook_config.json', dname).load_config()
    config.merge(drive_config)
    print('Activating Google Drive integration for profile "%s"' % profile)
    config['nbformat'] = 1

    with io.open(os.path.join(pdir,'ipython_notebook_config.json'),'w', encoding='utf-8') as f:
        f.write(cast_unicode_py2(json.dumps(config, indent=2)))
Esempio n. 2
0
 def default_config(self):
     c = Config({
         'CSSHTMLHeaderPreprocessor':{
             'enabled':True
             }          
         })
     c.merge(super(HTMLExporter,self).default_config)
     return c
Esempio n. 3
0
 def default_config(self):
     c = Config({
         'RevealHelpPreprocessor': {
             'enabled': True,
             },
         })
     c.merge(super(SlidesExporter,self).default_config)
     return c
Esempio n. 4
0
 def default_config(self):
     c = Config(
         {
             "GlobalConfigurable": {"display_data_priority": ["latex", "svg", "png", "jpg", "jpeg", "text"]},
             "ExtractFigureTransformer": {"enabled": True, "extra_ext_map": {"svg": "pdf"}},
         }
     )
     c.merge(super(LatexExporter, self).default_config)
     return c
Esempio n. 5
0
 def default_config(self):
     c = Config({
         'CSSHTMLHeaderTransformer':{
             'enabled':True
             },
         'RevealHelpTransformer':{
             'enabled':True,
             },                
         })
     c.merge(super(RevealExporter,self).default_config)
     return c
Esempio n. 6
0
 def default_config(self):
     c = Config({
         'GlobalConfigurable': {
             'display_data_priority' : ['latex', 'svg', 'png', 'jpg', 'jpeg' , 'text']
             },
          'ExtractFigureTransformer': {
                 'enabled':True,
                 'extra_ext_map':{'svg':'pdf'},
              }
      })
     c.merge(super(LatexExporter,self).default_config)
     return c
Esempio n. 7
0
 def default_config(self):
     c = Config({
         'NbConvertBase': {
             'display_data_priority' : ['text/javascript', 'text/html', 'application/pdf', 'image/svg+xml', 'text/latex', 'image/png', 'image/jpeg', 'text/plain']
             },
         'CSSHTMLHeaderPreprocessor':{
             'enabled':True
             },
         'HighlightMagicsPreprocessor': {
             'enabled':True
             }
         })
     c.merge(super(HTMLExporter,self).default_config)
     return c
Esempio n. 8
0
 def default_config(self):
     c = Config({
         'CSSHTMLHeaderPreprocessor':{
             'enabled':True
             },
         'RevealHelpPreprocessor':{
             'enabled':True,
             },                
         'HighlightMagicsPreprocessor': {
             'enabled':True
             }
         })
     c.merge(super(SlidesExporter,self).default_config)
     return c
Esempio n. 9
0
 def default_config(self):
     c = Config({
         'NbConvertBase': {
             'display_data_priority' : ['latex', 'png', 'jpg', 'svg', 'jpeg', 'text']
             },
          'ExtractFigureTransformer': {
                 'enabled':True
              },
          'SVG2PDFTransformer': {
                 'enabled':True
              },
          'LatexTransformer': {
                 'enabled':True
              }
      })
     c.merge(super(LatexExporter,self).default_config)
     return c
Esempio n. 10
0
 def default_config(self):
     c = Config({
         'NbConvertBase': {
             'display_data_priority' : ['latex', 'pdf', 'png', 'jpg', 'svg', 'jpeg', 'text']
             },
          'ExtractOutputPreprocessor': {
                 'enabled':True
              },
          'SVG2PDFPreprocessor': {
                 'enabled':True
              },
          'LatexPreprocessor': {
                 'enabled':True
              },
          'SphinxPreprocessor': {
                 'enabled':True
              }
      })
     c.merge(super(LatexExporter,self).default_config)
     return c
Esempio n. 11
0
 def default_config(self):
     c = Config({
         'NbConvertBase': {
             'display_data_priority' : ['text/latex', 'application/pdf', 'image/png', 'image/jpeg', 'image/svg+xml', 'text/plain']
             },
          'ExtractOutputPreprocessor': {
                 'enabled':True
              },
          'SVG2PDFPreprocessor': {
                 'enabled':True
              },
          'LatexPreprocessor': {
                 'enabled':True
              },
          'SphinxPreprocessor': {
                 'enabled':True
              },
          'HighlightMagicsPreprocessor': {
                 'enabled':True
              }
      })
     c.merge(super(LatexExporter,self).default_config)
     return c
Esempio n. 12
0
def test_find_connection_file():
    cfg = Config()
    with TemporaryDirectory() as d:
        cfg.ProfileDir.location = d
        cf = 'kernel.json'
        app = DummyConsoleApp(config=cfg, connection_file=cf)
        app.initialize(argv=[])
        BaseIPythonApplication._instance = app

        profile_cf = os.path.join(app.profile_dir.location, 'security', cf)
        with open(profile_cf, 'w') as f:
            f.write("{}")

        for query in (
                'kernel.json',
                'kern*',
                '*ernel*',
                'k*',
        ):
            nt.assert_equal(connect.find_connection_file(query), profile_cf)

        BaseIPythonApplication._instance = None
Esempio n. 13
0
 def run(self):
     import glob
     import os
     import os.path
     from nbconvert.exporters import python
     from IPython.config import Config
     examples_dir = os.path.join(os.path.dirname(__file__), 'examples')
     script_dir = os.path.join(examples_dir, 'scripts')
     if not os.path.exists(script_dir):
         os.makedirs(script_dir)
     c = Config(
         {'Exporter': {
             'template_file': 'examples/python-scripts.tpl'
         }})
     exporter = python.PythonExporter(config=c)
     for fname in glob.glob(
             os.path.join(examples_dir, 'notebooks', '*.ipynb')):
         output, _ = exporter.from_filename(fname)
         out_fname = os.path.splitext(os.path.basename(fname))[0]
         out_name = os.path.join(script_dir, out_fname + '.py')
         print(fname, '->', out_name)
         with open(out_name, 'w') as outf:
             outf.write(output)
Esempio n. 14
0
    def _parse_ipynb(self):
        notebook = nbformat.reads_json(self.source)

        config = Config({'HTMLExporter': {'default_template': 'basic'}})
        exporter = HTMLExporter(config=config)
        body, resources = exporter.from_notebook_node(notebook)

        body = body[body.index('\n<div class="cell'):-36]  # excise template
        body = self._decapitate(body)
        body = body.replace('\n</pre>', '</pre>')

        self.add_mathjax = r'\(' in body

        fields = notebook['metadata']

        if 'date' in fields:
            self.date = datetime.strptime(fields['date'], '%d %B %Y').date()
        else:
            self.date = datetime.now().date()
        self.tags = set()
        if 'tags' in fields:
            self.tags.update('-'.join(tag.strip().lower().split())
                             for tag in fields['tags'].split(','))

        if self.date and self.tags:
            heading = ':Date: {}\n:Tags: {}\n'.format(
                self.date.strftime('%d %B %Y').lstrip('0'),
                ', '.join(sorted(self.tags)),
            )
            parts = parse_rst(heading)
            body = parts['docinfo'] + body

        self.add_disqus = fields.get('add_disqus', False)
        self.body_html = body
        self.next_link = None
        self.previous_link = None
        self.add_title = True
Esempio n. 15
0
File: main.py Progetto: zonca/runipy
def main():
    log_format = '%(asctime)s %(message)s'
    log_datefmt = '%m/%d/%Y %I:%M:%S %p'

    parser = argparse.ArgumentParser()
    parser.add_argument('input_file', help='.ipynb file to run')
    parser.add_argument('output_file',
                        nargs='?',
                        help='.ipynb file to save cell output to')
    parser.add_argument('--quiet',
                        '-q',
                        action='store_true',
                        help='don\'t print anything unless things go wrong')
    parser.add_argument('--overwrite',
                        '-o',
                        action='store_true',
                        help='write notebook output back to original notebook')
    parser.add_argument('--html',
                        nargs='?',
                        default=False,
                        help='output an HTML snapshot of the notebook')
    parser.add_argument('--template',
                        nargs='?',
                        default=False,
                        help='template to use for HTML output')
    parser.add_argument('--pylab',
                        action='store_true',
                        help='start notebook with pylab enabled')
    parser.add_argument('--matplotlib',
                        action='store_true',
                        help='start notebook with matplotlib inlined')
    parser.add_argument(
        '--skip-exceptions',
        '-s',
        action='store_true',
        help=
        'if an exception occurs in a cell, continue running the subsequent cells'
    )
    args = parser.parse_args()

    if args.overwrite:
        if args.output_file is not None:
            print(
                'Error: output_filename must not be provided if '
                '--overwrite (-o) given',
                file=stderr)
            exit(1)
        else:
            args.output_file = args.input_file

    if not args.quiet:
        logging.basicConfig(level=logging.DEBUG,
                            format=log_format,
                            datefmt=log_datefmt)

    nb_runner = NotebookRunner(args.input_file, args.pylab, args.matplotlib)

    exit_status = 0
    try:
        nb_runner.run_notebook(skip_exceptions=args.skip_exceptions)
    except NotebookError:
        exit_status = 1

    if args.output_file:
        nb_runner.save_notebook(args.output_file)

    if args.html is not False:
        if args.html is None:
            # if --html is given but no filename is provided,
            # come up with a sane output name based on the
            # input filename
            if args.input_file.endswith('.ipynb'):
                args.html = args.input_file[:-6] + '.html'
            else:
                args.html = args.input_file + '.ipynb'

        if args.template is False:
            exporter = HTMLExporter()
        else:
            exporter = HTMLExporter(config=Config(
                {'HTMLExporter': {
                    'default_template': args.template
                }}))

        logging.info('Saving HTML snapshot to %s' % args.html)
        output, resources = exporter.from_notebook_node(nb_runner.nb)
        codecs.open(args.html, 'w', encoding='utf-8').write(output)

    if exit_status != 0:
        logging.warning('Exiting with nonzero exit status')
    exit(exit_status)
Esempio n. 16
0
        template_extension = Unicode(".tplx", config=True)
	

    def _init_filters(self):
        """
        Register all of the filters required for the exporter.
        """
        
                super(LatexExporter, self)._init_filters()

                self.register_filter('escape_tex', filters.escape_latex) 
        self.register_filter('highlight', filters.highlight2latex)
	@property
    def default_config(self):
<<<<<<< MINE
        c = Config({            'NbConvertBase': {                'display_data_priority' : ['latex', 'svg', 'png', 'jpg', 'jpeg', 'text']                },             'ExtractFigureTransformer': {                    'enabled':True                 },             'SVG2PDFTransformer': {                    'enabled':True                 },             'LatexTransformer': {                    'enabled':True                 }         })
=======
        c = Config({            'NbConvertBase': {                'display_data_priority' : ['latex', 'pdf', 'png', 'jpg', 'svg', 'jpeg', 'text']                },             'ExtractFigureTransformer': {                    'enabled':True                 },             'SVG2PDFTransformer': {                    'enabled':True                 },             'LatexTransformer': {                    'enabled':True                 }         })
>>>>>>> YOURS
        c.merge(super(LatexExporter,self).default_config)
        return c
	"""
    Exports to a Latex template.  Inherit from this class if your template is
    LaTeX based and you need custom tranformers/filters.  Inherit from it if 
    you are writing your own HTML template and need custom tranformers/filters.  
    If you don't need custom tranformers/filters, just change the 
    'template_file' config option.  Place your template in the special "/latex" 
    subfolder of the "../templates" folder.
    """

Esempio n. 17
0
 def config(self):
     return Config()
Esempio n. 18
0
def main():
    # command-line options
    define("debug", default=False, help="run in debug mode", type=bool)
    define("no_cache", default=False, help="Do not cache results", type=bool)
    define("localfile", default=False, help="Allow to serve localfile under /localfile/* this can be a security risk", type=bool)
    define("localfile_path", default="", help="Allow to serve local files under relative or absolute path; this can be a security risk", type=str)
    define("localfile_uri", default="/localfile/", help="Allow to serve local files under url; this can be a security risk", type=str)
    define("port", default=5000, help="run on the given port", type=int)
    define("cache_expiry_min", default=10*60, help="minimum cache expiry (seconds)", type=int)
    define("cache_expiry_max", default=2*60*60, help="maximum cache expiry (seconds)", type=int)
    define("mc_threads", default=1, help="number of threads to use for Async Memcache", type=int)
    define("threads", default=1, help="number of threads to use for background IO", type=int)
    tornado.options.parse_command_line()
    
    # NBConvert config
    config = Config()
    config.HTMLExporter.template_file = 'basic'
    config.NbconvertApp.fileext = 'html'
    config.CSSHTMLHeaderTransformer.enabled = False
    # don't strip the files prefix - we use it for redirects
    # config.Exporter.filters = {'strip_files_prefix': lambda s: s}
    
    exporter = HTMLExporter(config=config, log=log.app_log)
    
    # DEBUG env implies both autoreload and log-level
    if os.environ.get("DEBUG"):
        options.debug = True
        logging.getLogger().setLevel(logging.DEBUG)
    
    # setup memcache
    mc_pool = ThreadPoolExecutor(options.mc_threads)
    pool = ThreadPoolExecutor(options.threads)
    memcache_urls = os.environ.get('MEMCACHIER_SERVERS',
        os.environ.get('MEMCACHE_SERVERS')
    )
    if options.no_cache :
        log.app_log.info("Not using cache")
        cache = MockCache()
    elif pylibmc and memcache_urls:
        kwargs = dict(pool=mc_pool)
        username = os.environ.get('MEMCACHIER_USERNAME', '')
        password = os.environ.get('MEMCACHIER_PASSWORD', '')
        if username and password:
            kwargs['binary'] = True
            kwargs['username'] = username
            kwargs['password'] = password
            log.app_log.info("Using SASL memcache")
        else:
            log.app_log.info("Using plain memecache")
        
        cache = AsyncMultipartMemcache(memcache_urls.split(','), **kwargs)
    else:
        log.app_log.info("Using in-memory cache")
        cache = DummyAsyncCache()
    
    # setup tornado handlers and settings
    
    template_path = pjoin(here, 'templates')
    static_path = pjoin(here, 'static')
    env = Environment(loader=FileSystemLoader(template_path))
    env.filters['markdown'] = markdown.markdown
    try:
        git_data = git_info(here)
    except Exception as e:
        app_log.error("Failed to get git info: %s", e)
        git_data = {}
    else:
        git_data['msg'] = escape(git_data['msg'])

    if options.no_cache :
        # force jinja to recompile template every time
        env.globals.update(cache_size=0)
    env.globals.update(nrhead=nrhead, nrfoot=nrfoot, git_data=git_data)
    AsyncHTTPClient.configure(HTTPClientClass)
    client = AsyncHTTPClient()
    github_client = AsyncGitHubClient(client)
    github_client.authenticate()
    
    settings = dict(
        log_function=log_request,
        jinja2_env=env,
        static_path=static_path,
        client=client,
        github_client=github_client,
        exporter=exporter,
        cache=cache,
        cache_expiry_min=options.cache_expiry_min,
        cache_expiry_max=options.cache_expiry_max,
        pool=pool,
        gzip=True,
        render_timeout=20,
        localfile_path=options.localfile_path,
    )
    
    # create and start the app
    if options.localfile:
        log.app_log.warning("Serving local files, this can be a security risk")
        # use absolute or relative paths:
        handlers.insert(0, (r'%s(.*)' % options.localfile_uri, LocalFileHandler))

    app = web.Application(handlers, debug=options.debug, **settings)
    http_server = httpserver.HTTPServer(app, xheaders=True)
    log.app_log.info("Listening on port %i", options.port)
    http_server.listen(options.port)
    ioloop.IOLoop.instance().start()
Esempio n. 19
0
File: rst.py Progetto: 2t7/ipython
 def default_config(self):
     c = Config({'ExtractOutputPreprocessor':{'enabled':True}})
     c.merge(super(RSTExporter,self).default_config)
     return c
Esempio n. 20
0
    def read(self, filepath):
        metadata = {}

        # Files
        filedir = os.path.dirname(filepath)
        filename = os.path.basename(filepath)
        metadata_filename = filename.split('.')[0] + '.ipynb-meta'
        metadata_filepath = os.path.join(filedir, metadata_filename)

        # Load metadata
        if os.path.exists(metadata_filepath):
            # Metadata is on a external file, process using Pelican MD Reader
            md_reader = MarkdownReader(self.settings)
            _content, metadata = md_reader.read(metadata_filepath)
        else:
            # Load metadata from ipython notebook file
            ipynb_file = open(filepath)
            metadata = json.load(ipynb_file)['metadata']

            # Fix metadata to pelican standards
            for key, value in metadata.items():
                del metadata[key]
                key = key.lower()
                metadata[key] = self.process_metadata(key, value)
            metadata['ipython'] = True

        # Convert ipython notebook to html
        config = Config({
            'CSSHTMLHeaderTransformer': {
                'enabled': True,
                'highlight_class': '.highlight-ipynb'
            }
        })
        exporter = HTMLExporter(config=config,
                                template_file='basic',
                                filters={'highlight2html': custom_highlighter})

        content, info = exporter.from_filename(filepath)

        if BeautifulSoup:
            soup = BeautifulSoup(content)
            for i in soup.findAll("div", {"class": "input"}):
                if i.findChildren()[1].find(text='#ignore') is not None:
                    i.extract()
        else:
            soup = content

        # Process using Pelican HTMLReader
        content = '<body>{0}</body>'.format(
            soup)  # So Pelican HTMLReader works
        parser = MyHTMLParser(self.settings, filename)
        parser.feed(content)
        parser.close()
        body = parser.body
        summary = parser.summary

        metadata['summary'] = summary

        # Remove some CSS styles, so it doesn't break the themes.
        def filter_tags(style_text):
            style_list = style_text.split('\n')
            exclude = [
                'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'a', 'ul', 'ol', 'li',
                '.rendered_html', '@media', '.navbar', 'nav.navbar',
                '.navbar-text', 'code', 'pre', 'div.text_cell_render'
            ]
            style_list = [
                i for i in style_list
                if len(list(filter(i.startswith, exclude))) == 0
            ]
            ans = '\n'.join(style_list)
            return '<style type=\"text/css\">{0}</style>'.format(ans)

        css = '\n'.join(filter_tags(css) for css in info['inlining']['css'])
        css = css + CUSTOM_CSS
        body = css + body

        return body, metadata
Esempio n. 21
0
 def default_config(self):
     c = Config({"CSSHTMLHeaderTransformer": {"enabled": True}})
     c.merge(super(RevealExporter, self).default_config)
     return c
Esempio n. 22
0
 def test_preprocessor(self):
     """Do preprocessors work?"""
     config = Config({'Exporter': {'preprocessors': [PizzaPreprocessor()]}})
     exporter = Exporter(config=config)
     (notebook, resources) = exporter.from_filename(self._get_notebook())
     self.assertEqual(notebook['pizza'], 'cheese')
def notebook(preprocessor, tag, markup):
    match = FORMAT.search(markup)
    if match:
        argdict = match.groupdict()
        src = argdict['src']
        start = argdict['start']
        end = argdict['end']
    else:
        raise ValueError("Error processing input, "
                         "expected syntax: {0}".format(SYNTAX))

    if start:
        start = int(start)
    else:
        start = 0

    if end:
        end = int(end)
    else:
        end = None

    settings = preprocessor.configs.config['settings']
    nb_dir = settings.get('NOTEBOOK_DIR', 'notebooks')
    nb_path = os.path.join('content', nb_dir, src)

    if not os.path.exists(nb_path):
        raise ValueError("File {0} could not be found".format(nb_path))

    # Create the custom notebook converter
    c = Config({
        'CSSHTMLHeaderTransformer': {
            'enabled': True,
            'highlight_class': '.highlight-ipynb'
        },
        'SubCell': {
            'enabled': True,
            'start': start,
            'end': end
        }
    })

    exporter = HTMLExporter(config=c,
                            template_file='basic',
                            filters={'highlight2html': custom_highlighter},
                            transformers=[SubCell],
                            extra_loaders=[pelican_loader])

    # read and parse the notebook
    with open(nb_path) as f:
        nb_text = f.read()
    nb_json = nbformat.reads_json(nb_text)
    (body, resources) = exporter.from_notebook_node(nb_json)

    # if we haven't already saved the header, save it here.
    if not notebook.header_saved:
        print("\n ** Writing styles to _nb_header.html: "
              "this should be included in the theme. **\n")

        header = '\n'.join(
            CSS_WRAPPER.format(css_line)
            for css_line in resources['inlining']['css'])
        header += JS_INCLUDE

        with open('_nb_header.html', 'w') as f:
            f.write(header)
        notebook.header_saved = True

    # this will stash special characters so that they won't be transformed
    # by subsequent processes.
    body = preprocessor.configs.htmlStash.store(body, safe=True)
    return body
Esempio n. 24
0
class Exporter(Configurable):
    """
    Exports notebooks into other file formats.  Uses Jinja 2 templating engine
    to output new formats.  Inherit from this class if you are creating a new
    template type along with new filters/transformers.  If the filters/
    transformers provided by default suffice, there is no need to inherit from
    this class.  Instead, override the template_file and file_extension
    traits via a config file.
    
    {filters}
    """

    # finish the docstring
    __doc__ = __doc__.format(filters='- ' +
                             '\n    - '.join(default_filters.keys()))

    template_file = Unicode('',
                            config=True,
                            help="Name of the template file to use")

    file_extension = Unicode(
        'txt',
        config=True,
        help="Extension of the file that should be written to disk")

    template_path = Unicode("/../templates/",
                            config=True,
                            help="Path where the template files are located.")

    template_skeleton_path = Unicode(
        "/../templates/skeleton/",
        config=True,
        help="Path where the template skeleton files are located.")

    #Jinja block definitions
    jinja_comment_block_start = Unicode("", config=True)
    jinja_comment_block_end = Unicode("", config=True)
    jinja_variable_block_start = Unicode("", config=True)
    jinja_variable_block_end = Unicode("", config=True)
    jinja_logic_block_start = Unicode("", config=True)
    jinja_logic_block_end = Unicode("", config=True)

    #Extension that the template files use.
    template_extension = Unicode(".tpl", config=True)

    #Processors that process the input data prior to the export, set in the
    #constructor for this class.
    transformers = None

    _default_config = Config()

    def __init__(self, transformers=None, filters=None, config=None, **kw):
        """
        Public constructor
    
        Parameters
        ----------
        transformers : list[of transformer]
            Custom transformers to apply to the notebook prior to engaging
            the Jinja template engine.  Any transformers specified here 
            will override existing transformers if a naming conflict
            occurs.
        filters : dict[of filter]
            filters specified here will override existing filters if a naming
            conflict occurs. Filters are availlable in jinja template through
            the name of the corresponding key. Cf class docstring for
            availlable default filters.
        config : config
            User configuration instance.
        """

        #Call the base class constructor
        super(Exporter, self).__init__(config=config, **kw)

        #Standard environment
        self._init_environment()

        #Add transformers
        self._register_transformers()

        #Add filters to the Jinja2 environment
        self._register_filters()

        #Load user transformers.  Overwrite existing transformers if need be.
        if transformers:
            for transformer in transformers:
                self.register_transformer(transformer)

        #Load user filters.  Overwrite existing filters if need be.
        if not filters is None:
            for key, user_filter in filters.iteritems():
                if issubclass(user_filter, MetaHasTraits):
                    self.environment.filters[key] = user_filter(config=config)
                else:
                    self.environment.filters[key] = user_filter

    @property
    def default_config(self):
        if self._default_config:
            return Config(deepcopy(self._default_config))
        else:
            return Config()

    def from_notebook_node(self, nb, resources=None):
        """
        Convert a notebook from a notebook node instance.
    
        Parameters
        ----------
        nb : Notebook node
        resources : a dict of additional resources that
                can be accessed read/write by transformers
                and filters.
        """
        if resources is None:
            resources = {}
        nb, resources = self._preprocess(nb, resources)

        #Load the template file.
        self.template = self.environment.get_template(self.template_file +
                                                      self.template_extension)

        return self.template.render(nb=nb, resources=resources), resources

    def from_filename(self, filename):
        """
        Convert a notebook from a notebook file.
    
        Parameters
        ----------
        filename : str
            Full filename of the notebook file to open and convert.
        """

        with io.open(filename) as f:
            return self.from_notebook_node(nbformat.read(f, 'json'))

    def from_file(self, file_stream):
        """
        Convert a notebook from a notebook file.
    
        Parameters
        ----------
        file_stream : file-like object
            Notebook file-like object to convert.
        """
        return self.from_notebook_node(nbformat.read(file_stream, 'json'))

    def register_transformer(self, transformer):
        """
        Register a transformer.
        Transformers are classes that act upon the notebook before it is
        passed into the Jinja templating engine.  Transformers are also
        capable of passing additional information to the Jinja
        templating engine.
    
        Parameters
        ----------
        transformer : transformer
        """
        if self.transformers is None:
            self.transformers = []

        if inspect.isfunction(transformer):
            self.transformers.append(transformer)
            return transformer
        elif isinstance(transformer, MetaHasTraits):
            transformer_instance = transformer(config=self.config)
            self.transformers.append(transformer_instance)
            return transformer_instance
        else:
            transformer_instance = transformer()
            self.transformers.append(transformer_instance)
            return transformer_instance

    def register_filter(self, name, filter):
        """
        Register a filter.
        A filter is a function that accepts and acts on one string.  
        The filters are accesible within the Jinja templating engine.
    
        Parameters
        ----------
        name : str
            name to give the filter in the Jinja engine
        filter : filter
        """
        if inspect.isfunction(filter):
            self.environment.filters[name] = filter
        elif isinstance(filter, MetaHasTraits):
            self.environment.filters[name] = filter(config=self.config)
        else:
            self.environment.filters[name] = filter()
        return self.environment.filters[name]

    def _register_transformers(self):
        """
        Register all of the transformers needed for this exporter.
        """

        self.register_transformer(music21.ext.nbconvert.transformers.
                                  coalescestreams.coalesce_streams)

        #Remember the figure extraction transformer so it can be enabled and
        #disabled easily later.
        self.extract_figure_transformer = self.register_transformer(
            music21.ext.nbconvert.transformers.extractfigure.
            ExtractFigureTransformer)

    def _register_filters(self):
        """
        Register all of the filters required for the exporter.
        """
        for k, v in default_filters.iteritems():
            self.register_filter(k, v)

    def _init_environment(self):
        """
        Create the Jinja templating environment.
        """

        self.environment = Environment(loader=FileSystemLoader([
            os.path.dirname(os.path.realpath(__file__)) + self.template_path,
            os.path.dirname(os.path.realpath(__file__)) +
            self.template_skeleton_path,
        ]),
                                       extensions=JINJA_EXTENSIONS)

        #Set special Jinja2 syntax that will not conflict with latex.
        if self.jinja_logic_block_start:
            self.environment.block_start_string = self.jinja_logic_block_start
        if self.jinja_logic_block_end:
            self.environment.block_end_string = self.jinja_logic_block_end
        if self.jinja_variable_block_start:
            self.environment.variable_start_string = self.jinja_variable_block_start
        if self.jinja_variable_block_end:
            self.environment.variable_end_string = self.jinja_variable_block_end
        if self.jinja_comment_block_start:
            self.environment.comment_start_string = self.jinja_comment_block_start
        if self.jinja_comment_block_end:
            self.environment.comment_end_string = self.jinja_comment_block_end

    def _preprocess(self, nb, resources):
        """
        Preprocess the notebook before passing it into the Jinja engine.
        To preprocess the notebook is to apply all of the 
    
        Parameters
        ----------
        nb : notebook node
            notebook that is being exported.
        resources : a dict of additional resources that
            can be accessed read/write by transformers
            and filters.
        """

        # Do a deepcopy first,
        # we are never safe enough with what the transformers could do.
        nbc = deepcopy(nb)
        resc = deepcopy(resources)
        #Run each transformer on the notebook.  Carry the output along
        #to each transformer
        for transformer in self.transformers:
            nb, resources = transformer(nbc, resc)
        return nb, resources
Esempio n. 25
0
    def read(self, filepath):
        metadata = {}

        # Files
        filedir = os.path.dirname(filepath)
        filename = os.path.basename(filepath)
        metadata_filename = filename.split('.')[0] + '.ipynb-meta'
        metadata_filepath = os.path.join(filedir, metadata_filename)

        # If filename starts with draft, set the status accordingly
        if filename.lower().startswith('draft'):
            metadata['status'] = 'draft'

        with open(filepath) as f:
            nb = nbformat.read(f, 'ipynb') # readin ipynb content

        first_cell = nb.worksheets[0].cells[0]

        # Read in metadata
        metadata = join_metadata(metadata, nb.metadata)

        if 'pelican' in first_cell.metadata:
            m = first_cell.metadata['pelican']
            metadata = join_metadata(metadata, m)

        if os.path.exists(metadata_filepath):
            # Metadata is on a external file, process using Pelican MD Reader
            md_reader = MarkdownReader(self.settings)
            _content, m = md_reader.read(metadata_filepath)
            metadata = join_metadata(metadata, m)

        # Reformat metadata into pelican acceptable format
        for k, v in metadata.items():
            del metadata[k]
            k = k.lower()
            metadata[k] = self.process_metadata(k, v)

        metadata['ipython'] = True

        # use first cell as the title if flag is set
        field = 'IPYNB_FIRST_CELL_HEADING_AS_TITLE'
        if self.settings.get(field, False) and first_cell.cell_type == 'heading':
            metadata['title'] = first_cell.source
            # Truncate the first cell from notebook
            nb.worksheets[0].cells = nb.worksheets[0].cells[1:]

        # Convert ipython notebook to html
        config = Config({'CSSHTMLHeaderPreprocessor': {'enabled': True,
                         'highlight_class': '.highlight-ipynb'}})
        exporter = HTMLExporter(config=config, template_file='basic',
                                filters={'highlight2html': custom_highlighter})

        content, info = exporter.from_notebook_node(nb)

        if BeautifulSoup:
            soup = BeautifulSoup(content)
            for i in soup.findAll("div", {"class" : "input"}):
                if i.findChildren()[1].find(text='#ignore') is not None:
                    i.extract()
        else:
            soup = content

        content = '<body>{0}</body>'.format(soup)  # So Pelican HTMLReader works
        parser = MyHTMLParser(self.settings, filename)
        parser.feed(content)
        parser.close()
        body = parser.body
        summary = parser.summary

        field = 'IPYNB_FIRST_CONTENT_AS_SUMMARY'
        first_cell = nb.worksheets[0].cells[0]
        if self.settings.get(field, False) and first_cell.cell_type == 'markdown':
            raw = nb.worksheets[0].cells[0].source
            md = markdown.Markdown()
            metadata['summary'] = md.convert(raw)
        else:
            metadata['summary'] = summary

        # Remove some CSS styles, so it doesn't break the theme.
        def filter_tags(style_text):
            style_list = style_text.split('\n')
            exclude = ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'a', 'ul', 'ol', 'li',
                       '.rendered_html', '@media', '.navbar', 'nav.navbar', '.navbar-text',
                       'code', 'pre', 'div.text_cell_render']
            style_list = [i for i in style_list if len(list(filter(i.startswith, exclude))) == 0]
            ans = '\n'.join(style_list)
            return '<style type=\"text/css\">{0}</style>'.format(ans)

        css = '\n'.join(filter_tags(css) for css in info['inlining']['css'])
        css = css + CUSTOM_CSS
        body = css + body

        return body, metadata
Esempio n. 26
0
 def _config_changed(self, name, old, new):
     # warn on change of renamed config section
     if new.InlineBackendConfig != getattr(old, 'InlineBackendConfig',
                                           Config()):
         warn("InlineBackendConfig has been renamed to InlineBackend")
     super(InlineBackend, self)._config_changed(name, old, new)
Esempio n. 27
0
def main():
    # command-line options
    define("debug", default=False, help="run in debug mode", type=bool)
    define("port", default=5000, help="run on the given port", type=int)
    define("cache_expiry_min", default=10*60, help="minimum cache expiry (seconds)", type=int)
    define("cache_expiry_max", default=2*60*60, help="maximum cache expiry (seconds)", type=int)
    define("mc_threads", default=1, help="number of threads to use for Async Memcache", type=int)
    define("threads", default=1, help="number of threads to use for background IO", type=int)
    tornado.options.parse_command_line()
    
    # NBConvert config
    config = Config()
    config.HTMLExporter.template_file = 'basic'
    config.NbconvertApp.fileext = 'html'
    config.CSSHTMLHeaderTransformer.enabled = False
    # don't strip the files prefix - we use it for redirects
    # config.Exporter.filters = {'strip_files_prefix': lambda s: s}
    
    exporter = HTMLExporter(config=config, log=log.app_log)
    
    # DEBUG env implies both autoreload and log-level
    if os.environ.get("DEBUG"):
        options.debug = True
        logging.getLogger().setLevel(logging.DEBUG)
    
    # setup memcache
    mc_pool = ThreadPoolExecutor(options.mc_threads)
    pool = ThreadPoolExecutor(options.threads)
    memcache_urls = os.environ.get('MEMCACHIER_SERVERS',
        os.environ.get('MEMCACHE_SERVERS')
    )
    if pylibmc and memcache_urls:
        kwargs = dict(pool=mc_pool)
        username = os.environ.get('MEMCACHIER_USERNAME', '')
        password = os.environ.get('MEMCACHIER_PASSWORD', '')
        if username and password:
            kwargs['binary'] = True
            kwargs['username'] = username
            kwargs['password'] = password
            log.app_log.info("Using SASL memcache")
        else:
            log.app_log.info("Using plain memecache")
        
        cache = AsyncMultipartMemcache(memcache_urls.split(','), **kwargs)
    else:
        log.app_log.info("Using in-memory cache")
        cache = DummyAsyncCache()
    
    # setup tornado handlers and settings
    
    template_path = pjoin(here, 'templates')
    static_path = pjoin(here, 'static')
    env = Environment(loader=FileSystemLoader(template_path))
    env.filters['markdown'] = markdown2html
    env.globals.update(nrhead=nrhead, nrfoot=nrfoot)
    AsyncHTTPClient.configure(HTTPClientClass)
    client = AsyncHTTPClient()
    github_client = AsyncGitHubClient(client)
    github_client.authenticate()
    
    settings = dict(
        jinja2_env=env,
        static_path=static_path,
        client=client,
        github_client=github_client,
        exporter=exporter,
        cache=cache,
        cache_expiry_min=options.cache_expiry_min,
        cache_expiry_max=options.cache_expiry_max,
        pool=pool,
        render_timeout=20,
    )
    
    # create and start the app
    app = web.Application(handlers, debug=options.debug, **settings)
    http_server = httpserver.HTTPServer(app)
    log.app_log.info("Listening on port %i", options.port)
    http_server.listen(options.port)
    ioloop.IOLoop.instance().start()
Esempio n. 28
0
    def read(self, filepath):
        metadata = {}

        # Files
        filedir = os.path.dirname(filepath)
        filename = os.path.basename(filepath)
        metadata_filename = filename.split('.')[0] + '.ipynb-meta'
        metadata_filepath = os.path.join(filedir, metadata_filename)

        # Load metadata
        if os.path.exists(metadata_filepath):
            # Metadata is on a external file, process using Pelican MD Reader
            md_reader = MarkdownReader(self.settings)
            _content, metadata = md_reader.read(metadata_filepath)
        else:
            # Load metadata from ipython notebook file
            ipynb_file = open(filepath)
            metadata = json.load(ipynb_file)['metadata']

            # Fix metadata to pelican standards
            for key, value in metadata.items():
                del metadata[key]
                key = key.lower()
                metadata[key] = self.process_metadata(key, value)
        metadata['ipython'] = True

        # Convert ipython notebook to html
        config = Config({
            'CSSHTMLHeaderTransformer': {
                'enabled': True,
                'highlight_class': '.highlight-ipynb'
            }
        })
        exporter = HTMLExporter(config=config,
                                template_file='basic',
                                filters={'highlight2html': custom_highlighter})

        content, info = exporter.from_filename(filepath)

        if BeautifulSoup:
            soup = BeautifulSoup(content)
            for i in soup.findAll("div", {"class": "input"}):
                if i.findChildren()[1].find(text='#ignore') is not None:
                    i.extract()
        else:
            soup = content

        # Process using Pelican HTMLReader
        content = '<body>{0}</body>'.format(
            soup)  # So Pelican HTMLReader works
        parser = MyHTMLParser(self.settings, filename)
        parser.feed(content)
        parser.close()
        body = parser.body
        if ('IPYNB_USE_META_SUMMARY' in self.settings.keys() and \
          self.settings['IPYNB_USE_META_SUMMARY'] == False) or \
          'IPYNB_USE_META_SUMMARY' not in self.settings.keys():
            metadata['summary'] = parser.summary

        def filter_css(style_text):
            '''
            HACK: IPython returns a lot of CSS including its own bootstrap.
            Get only the IPython Notebook CSS styles.
            '''
            index = style_text.find('/*!\n*\n* IPython notebook\n*\n*/')
            if index > 0:
                style_text = style_text[index:]
            index = style_text.find('/*!\n*\n* IPython notebook webapp\n*\n*/')
            if index > 0:
                style_text = style_text[:index]

            style_text = re.sub(r'color\:\#0+(;)?', '', style_text)
            style_text = re.sub(
                r'\.rendered_html[a-z0-9,._ ]*\{[a-z0-9:;%.#\-\s\n]+\}', '',
                style_text)

            return '<style type=\"text/css\">{0}</style>'.format(style_text)

        ipython_css = '\n'.join(
            filter_css(css_style) for css_style in info['inlining']['css'])
        body = ipython_css + body + LATEX_CUSTOM_SCRIPT

        return body, metadata
Esempio n. 29
0
 def default_config(self):
     c = Config({"RevealHelpPreprocessor": {"enabled": True}})
     c.merge(super(SlidesExporter, self).default_config)
     return c
Esempio n. 30
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import glob

from IPython.config import Config
from IPython.nbconvert import HTMLExporter

c = Config({"ExecutePreprocessor": {"enabled": True}})

exporter = HTMLExporter(config=c)

for filename in glob.glob("example-notebooks/*.ipynb"):
    print(filename)
    exporter.from_filename(filename)
Esempio n. 31
0
 def default_config(self):
     return Config()
Esempio n. 32
0
def main():
    # command-line options
    define("debug", default=False, help="run in debug mode", type=bool)
    define("no_cache", default=False, help="Do not cache results", type=bool)
    define(
        "localfiles",
        default="",
        help=
        "Allow to serve local files under /localfile/* this can be a security risk",
        type=str)
    define("port", default=5000, help="run on the given port", type=int)
    define("cache_expiry_min",
           default=10 * 60,
           help="minimum cache expiry (seconds)",
           type=int)
    define("cache_expiry_max",
           default=2 * 60 * 60,
           help="maximum cache expiry (seconds)",
           type=int)
    define("mc_threads",
           default=1,
           help="number of threads to use for Async Memcache",
           type=int)
    define("threads",
           default=1,
           help="number of threads to use for rendering",
           type=int)
    define("processes",
           default=0,
           help="use processes instead of threads for rendering",
           type=int)
    define("frontpage",
           default=FRONTPAGE_JSON,
           help="path to json file containing frontpage content",
           type=str)
    define("sslcert", help="path to ssl .crt file", type=str)
    define("sslkey", help="path to ssl .key file", type=str)
    define("default_format",
           default="html",
           help="format to use for legacy / URLs",
           type=str)
    define("proxy_host", default="", help="The proxy URL.", type=str)
    define("proxy_port", default="", help="The proxy port.", type=int)
    tornado.options.parse_command_line()

    # NBConvert config
    config = Config()
    config.NbconvertApp.fileext = 'html'
    config.CSSHTMLHeaderTransformer.enabled = False
    # don't strip the files prefix - we use it for redirects
    # config.Exporter.filters = {'strip_files_prefix': lambda s: s}

    # DEBUG env implies both autoreload and log-level
    if os.environ.get("DEBUG"):
        options.debug = True
        logging.getLogger().setLevel(logging.DEBUG)

    # setup memcache
    mc_pool = ThreadPoolExecutor(options.mc_threads)

    # setup formats
    formats = configure_formats(options, config, log.app_log)

    if options.processes:
        pool = ProcessPoolExecutor(options.processes)
    else:
        pool = ThreadPoolExecutor(options.threads)

    memcache_urls = os.environ.get('MEMCACHIER_SERVERS',
                                   os.environ.get('MEMCACHE_SERVERS'))

    # Handle linked Docker containers
    if (os.environ.get('NBCACHE_PORT')):
        tcp_memcache = os.environ.get('NBCACHE_PORT')
        memcache_urls = tcp_memcache.split('tcp://')[1]

    if (os.environ.get('NBINDEX_PORT')):
        log.app_log.info("Indexing notebooks")
        tcp_index = os.environ.get('NBINDEX_PORT')
        index_url = tcp_index.split('tcp://')[1]
        index_host, index_port = index_url.split(":")
        indexer = ElasticSearch(index_host, index_port)
    else:
        log.app_log.info("Not indexing notebooks")
        indexer = NoSearch()

    if options.no_cache:
        log.app_log.info("Not using cache")
        cache = MockCache()
    elif pylibmc and memcache_urls:
        kwargs = dict(pool=mc_pool)
        username = os.environ.get('MEMCACHIER_USERNAME', '')
        password = os.environ.get('MEMCACHIER_PASSWORD', '')
        if username and password:
            kwargs['binary'] = True
            kwargs['username'] = username
            kwargs['password'] = password
            log.app_log.info("Using SASL memcache")
        else:
            log.app_log.info("Using plain memecache")

        cache = AsyncMultipartMemcache(memcache_urls.split(','), **kwargs)
    else:
        log.app_log.info("Using in-memory cache")
        cache = DummyAsyncCache()

    # setup tornado handlers and settings

    template_path = pjoin(here, 'templates')
    static_path = pjoin(here, 'static')
    env = Environment(loader=FileSystemLoader(template_path))
    env.filters['markdown'] = markdown.markdown
    try:
        git_data = git_info(here)
    except Exception as e:
        app_log.error("Failed to get git info: %s", e)
        git_data = {}
    else:
        git_data['msg'] = escape(git_data['msg'])

    if options.no_cache:
        # force jinja to recompile template every time
        env.globals.update(cache_size=0)
    env.globals.update(
        nrhead=nrhead,
        nrfoot=nrfoot,
        git_data=git_data,
        ipython_info=ipython_info(),
        len=len,
    )
    AsyncHTTPClient.configure(HTTPClientClass)
    client = AsyncHTTPClient()
    github_client = AsyncGitHubClient(client)

    # load frontpage sections
    with io.open(options.frontpage, 'r') as f:
        frontpage_sections = json.load(f)

    # cache frontpage links for the maximum allowed time
    max_cache_uris = {''}
    for section in frontpage_sections:
        for link in section['links']:
            max_cache_uris.add('/' + link['target'])

    fetch_kwargs = dict(connect_timeout=10, )
    if options.proxy_host:
        fetch_kwargs.update(
            dict(proxy_host=options.proxy_host, proxy_port=options.proxy_port))

        log.app_log.info("Using web proxy {proxy_host}:{proxy_port}."
                         "".format(**fetch_kwargs))

    settings = dict(
        log_function=log_request,
        jinja2_env=env,
        static_path=static_path,
        client=client,
        github_client=github_client,
        formats=formats,
        default_format=options.default_format,
        config=config,
        index=indexer,
        cache=cache,
        cache_expiry_min=options.cache_expiry_min,
        cache_expiry_max=options.cache_expiry_max,
        max_cache_uris=max_cache_uris,
        frontpage_sections=frontpage_sections,
        pool=pool,
        gzip=True,
        render_timeout=20,
        localfile_path=os.path.abspath(options.localfiles),
        fetch_kwargs=fetch_kwargs,
    )

    # handle handlers
    handlers = init_handlers(formats)
    if options.localfiles:
        log.app_log.warning(
            "Serving local notebooks in %s, this can be a security risk",
            options.localfiles)
        # use absolute or relative paths:
        local_handlers = [(r'/localfile/(.*)', LocalFileHandler)]
        handlers = (local_handlers +
                    format_providers(formats, local_handlers) + handlers)

    # load ssl options
    ssl_options = None
    if options.sslcert:
        ssl_options = {
            'certfile': options.sslcert,
            'keyfile': options.sslkey,
        }

    # create and start the app
    app = web.Application(handlers, debug=options.debug, **settings)
    http_server = httpserver.HTTPServer(app,
                                        xheaders=True,
                                        ssl_options=ssl_options)
    log.app_log.info("Listening on port %i", options.port)
    http_server.listen(options.port)
    ioloop.IOLoop.instance().start()
Esempio n. 33
0
 def default_config(self):
     c = Config({'ExtractFigureTransformer':{'enabled':True}})
     c.merge(super(RstExporter,self).default_config)
     return c
Esempio n. 34
0
def main():
    log_format = '%(asctime)s %(levelname)s: %(message)s'
    log_datefmt = '%m/%d/%Y %I:%M:%S %p'

    parser = argparse.ArgumentParser()
    parser.add_argument('--version',
                        '-v',
                        action='version',
                        version=runipy.__version__,
                        help='print version information')
    parser.add_argument('input_file',
                        nargs='?',
                        help='.ipynb file to run (or stdin)')
    parser.add_argument('output_file',
                        nargs='?',
                        help='.ipynb file to save cell output to')
    parser.add_argument('--quiet',
                        '-q',
                        action='store_true',
                        help='don\'t print anything unless things go wrong')
    parser.add_argument('--overwrite',
                        '-o',
                        action='store_true',
                        help='write notebook output back to original notebook')
    parser.add_argument('--html',
                        nargs='?',
                        default=False,
                        help='output an HTML snapshot of the notebook')
    parser.add_argument('--template',
                        nargs='?',
                        default=False,
                        help='template to use for HTML output')
    parser.add_argument('--pylab',
                        action='store_true',
                        help='start notebook with pylab enabled')
    parser.add_argument('--matplotlib',
                        action='store_true',
                        help='start notebook with matplotlib inlined')
    parser.add_argument('--skip-exceptions',
                        '-s',
                        action='store_true',
                        help='if an exception occurs in a cell,' +
                        ' continue running the subsequent cells')
    parser.add_argument(
        '--stdout',
        action='store_true',
        help='print notebook to stdout (or use - as output_file')
    parser.add_argument(
        '--stdin',
        action='store_true',
        help='read notebook from stdin (or use - as input_file)')
    parser.add_argument(
        '--no-chdir',
        action='store_true',
        help="do not change directory to notebook's at kernel startup")
    parser.add_argument('--profile-dir',
                        help="set the profile location directly")
    args = parser.parse_args()

    if args.overwrite:
        if args.output_file is not None:
            print(
                'Error: output_filename must not be provided if '
                '--overwrite (-o) given',
                file=stderr)
            exit(1)
        else:
            args.output_file = args.input_file

    if not args.quiet:
        logging.basicConfig(level=logging.INFO,
                            format=log_format,
                            datefmt=log_datefmt)

    working_dir = None

    payload_source = ""
    payload = ""
    if args.input_file == '-' or args.stdin:  # force stdin
        payload_source = stdin.name
        payload = stdin.read()
    elif not args.input_file and stdin.isatty():  # no force, empty stdin
        parser.print_help()
        exit()
    elif not args.input_file:  # no file -> default stdin
        payload_source = stdin.name
        payload = stdin.read()
    else:  # must have specified normal input_file
        with open(args.input_file) as input_file:
            payload_source = input_file.name
            payload = input_file.read()
        working_dir = os.path.dirname(args.input_file)

    if args.no_chdir:
        working_dir = None

    if args.profile_dir:
        profile_dir = os.path.expanduser(args.profile_dir)
    else:
        profile_dir = None

    logging.info('Reading notebook %s', payload_source)
    try:
        # Ipython 3
        nb = reads(payload, 3)
    except (TypeError, NBFormatError):
        # Ipython 2
        nb = reads(payload, 'json')
    nb_runner = NotebookRunner(nb, args.pylab, args.matplotlib, profile_dir,
                               working_dir)

    exit_status = 0
    try:
        nb_runner.run_notebook(skip_exceptions=args.skip_exceptions)
    except NotebookError:
        exit_status = 1

    if args.output_file and args.output_file != '-':
        logging.info('Saving to %s', args.output_file)
        with open(args.output_file, 'w') as output_filehandle:
            try:
                # Ipython 3
                write(nb_runner.nb, output_filehandle, 3)
            except (TypeError, NBFormatError):
                # Ipython 2
                write(nb_runner.nb, output_filehandle, 'json')

    if args.stdout or args.output_file == '-':
        try:
            # Ipython 3
            write(nb_runner.nb, stdout, 3)
        except (TypeError, NBFormatError):
            # Ipython 2
            write(nb_runner.nb, stdout, 'json')
        print()

    if args.html is not False:
        if args.html is None:
            # if --html is given but no filename is provided,
            # come up with a sane output name based on the
            # input filename
            if args.input_file.endswith('.ipynb'):
                args.html = args.input_file[:-6] + '.html'
            else:
                args.html = args.input_file + '.html'

        if args.template is False:
            exporter = HTMLExporter()
        else:
            exporter = HTMLExporter(config=Config({
                'HTMLExporter': {
                    'template_file': args.template,
                    'template_path': ['.', '/']
                }
            }))

        logging.info('Saving HTML snapshot to %s' % args.html)
        output, resources = exporter.from_notebook_node(
            convert(nb_runner.nb, current_nbformat))
        codecs.open(args.html, 'w', encoding='utf-8').write(output)

    nb_runner.shutdown_kernel()

    if exit_status != 0:
        logging.warning('Exiting with nonzero exit status')
    exit(exit_status)
Esempio n. 35
0
class LatexExporter(exporter.Exporter):
    """
    Exports to a Latex template.  Inherit from this class if your template is
    LaTeX based and you need custom tranformers/filters.  Inherit from it if 
    you are writing your own HTML template and need custom tranformers/filters.  
    If you don't need custom tranformers/filters, just change the 
    'template_file' config option.  Place your template in the special "/latex" 
    subfolder of the "../templates" folder.
    """

    file_extension = Unicode(
        'tex',
        config=True,
        help="Extension of the file that should be written to disk")

    template_file = Unicode('base',
                            config=True,
                            help="Name of the template file to use")

    #Latex constants
    template_path = Unicode("/../templates/latex/",
                            config=True,
                            help="Path where the template files are located.")

    template_skeleton_path = Unicode(
        "/../templates/latex/skeleton/",
        config=True,
        help="Path where the template skeleton files are located.")

    #Special Jinja2 syntax that will not conflict when exporting latex.
    jinja_comment_block_start = Unicode("((=", config=True)
    jinja_comment_block_end = Unicode("=))", config=True)
    jinja_variable_block_start = Unicode("(((", config=True)
    jinja_variable_block_end = Unicode(")))", config=True)
    jinja_logic_block_start = Unicode("((*", config=True)
    jinja_logic_block_end = Unicode("*))", config=True)

    #Extension that the template files use.
    template_extension = Unicode(".tplx", config=True)

    _default_config = Config({
        'display_data_priority':
        ['latex', 'svg', 'png', 'jpg', 'jpeg', 'text'],
        'extra_ext_map': {
            'svg': 'pdf'
        },
        'ExtractFigureTransformer':
        Config({'enabled': True})
    })

    def __init__(self, transformers=None, filters=None, config=None, **kw):
        """
        Public constructor
    
        Parameters
        ----------
        transformers : list[of transformer]
            Custom transformers to apply to the notebook prior to engaging
            the Jinja template engine.  Any transformers specified here
            will override existing transformers if a naming conflict
            occurs.
        filters : list[of filter]
            Custom filters to make accessible to the Jinja templates.  Any
            filters specified here will override existing filters if a
            naming conflict occurs.
        config : config
            User configuration instance.
        """

        #Call base class constructor.

        c = self.default_config
        if config:
            c.merge(Config(config))

        super(LatexExporter, self).__init__(transformers,
                                            filters,
                                            config=c,
                                            **kw)

        #self.extract_figure_transformer.extra_ext_map={'svg':'pdf'}

    def _register_filters(self):
        """
        Register all of the filters required for the exporter.
        """

        #Register the filters of the base class.
        super(LatexExporter, self)._register_filters()

        #Add latex filters to the Jinja2 environment
        self.register_filter('escape_tex',
                             nbconvert.filters.latex.escape_latex)
        self.register_filter('highlight',
                             nbconvert.filters.highlight.highlight2latex)

    def _register_transformers(self):
        """
        Register all of the transformers needed for this exporter.
        """

        #Register the transformers of the base class.
        super(LatexExporter, self)._register_transformers()

        #Register latex transformer
        self.register_transformer(LatexTransformer)
Esempio n. 36
0
 def default_config(self):
     if self._default_config:
         return Config(deepcopy(self._default_config))
     else:
         return Config()
Esempio n. 37
0
        'CACHE_TYPE': 'lib.MemcachedMultipart.multipartmemecached',
        'CACHE_MEMCACHED_SERVERS': servers
    }
else:
    print 'using sasl memcached'
    config = {
        'CACHE_TYPE': 'lib.MemcachedMultipart.multipartmemecached',
        'CACHE_MEMCACHED_SERVERS': servers,
        'CACHE_MEMCACHED_PASSWORD': password[0],
        'CACHE_MEMCACHED_USERNAME': username[0]
    }

cache = Cache(app, config=config)

from IPython.config import Config
config = Config()
config.HTMLExporter.template_file = 'basic'
config.NbconvertApp.fileext = 'html'
config.CSSHtmlHeaderTransformer.enabled = False

C = HTMLExporter(config=config)

minutes = 60
hours = 60 * minutes

import newrelic.agent


def nrhead():
    return newrelic.agent.get_browser_timing_header()
Esempio n. 38
0
def notebook(preprocessor, tag, markup):
    match = FORMAT.search(markup)
    if match:
        argdict = match.groupdict()
        src = argdict['src']
        start = argdict['start']
        end = argdict['end']
        language = argdict['language']
    else:
        raise ValueError("Error processing input, "
                         "expected syntax: {0}".format(SYNTAX))

    if start:
        start = int(start)
    else:
        start = 0

    if end:
        end = int(end)
    else:
        end = None

    language_applied_highlighter = partial(custom_highlighter,
                                           language=language)

    nb_dir = preprocessor.configs.getConfig('NOTEBOOK_DIR')
    nb_path = os.path.join('content', nb_dir, src)

    if not os.path.exists(nb_path):
        raise ValueError("File {0} could not be found".format(nb_path))

    # Create the custom notebook converter
    c = Config({
        'CSSHTMLHeaderTransformer': {
            'enabled': True,
            'highlight_class': '.highlight-ipynb'
        },
        'SubCell': {
            'enabled': True,
            'start': start,
            'end': end
        }
    })

    template_file = 'basic'
    if LooseVersion(IPython.__version__) >= '2.0':
        if os.path.exists('pelicanhtml_2.tpl'):
            template_file = 'pelicanhtml_2'
    else:
        if os.path.exists('pelicanhtml_1.tpl'):
            template_file = 'pelicanhtml_1'

    if LooseVersion(IPython.__version__) >= '2.0':
        subcell_kwarg = dict(preprocessors=[SubCell])
    else:
        subcell_kwarg = dict(transformers=[SubCell])

    exporter = HTMLExporter(
        config=c,
        template_file=template_file,
        filters={'highlight2html': language_applied_highlighter},
        **subcell_kwarg)

    # read and parse the notebook
    with open(nb_path) as f:
        nb_text = f.read()
    nb_json = nbformat.reads_json(nb_text)
    (body, resources) = exporter.from_notebook_node(nb_json)
    # To make the higlighting work with Ipython 2.X
    resources['inlining']['css'][1] = resources['inlining']['css'][1].replace(
        'highlight', 'highlight-ipynb')
    # if we haven't already saved the header, save it here.
    if not notebook.header_saved:
        print("\n ** Writing styles to _nb_header.html: "
              "this should be included in the theme. **\n")

        header = '\n'.join(
            CSS_WRAPPER.format(css_line)
            for css_line in resources['inlining']['css'])
        header += JS_INCLUDE

        with open('_nb_header.html', 'w') as f:
            f.write(header)
        notebook.header_saved = True

    # this will stash special characters so that they won't be transformed
    # by subsequent processes.
    body = preprocessor.configs.htmlStash.store(body, safe=True)
    return body
Esempio n. 39
0
 def test_remote_profile_dir(self):
     cfg = Config()
     launcher_cfg = getattr(cfg, self.launcher_class.__name__)
     launcher_cfg.remote_profile_dir = "foo"
     launcher = self.build_launcher(config=cfg)
     self.assertEqual(launcher.remote_profile_dir, "foo")
Esempio n. 40
0
class PGContentsAPITest(APITest):

    config = Config()
    config.NotebookApp.contents_manager_class = PostgresContentsManager
    config.PostgresContentsManager.user_id = 'test'

    # Don't support hidden directories.
    hidden_dirs = []

    @property
    def contents_manager(self):
        return self.notebook.contents_manager

    @property
    def user_id(self):
        return self.contents_manager.user_id

    @property
    def engine(self):
        return self.contents_manager.engine

    # Superclass method overrides.
    def make_dir(self, api_path):
        with self.engine.begin() as db:
            create_directory(db, self.user_id, api_path)

    def make_txt(self, api_path, txt):
        with self.engine.begin() as db:
            save_file(
                db,
                self.user_id,
                api_path,
                b64encode(txt.encode('utf-8')),
                UNLIMITED,
            )

    def make_blob(self, api_path, blob):
        with self.engine.begin() as db:
            save_file(db, self.user_id, api_path, b64encode(blob), UNLIMITED)

    def make_nb(self, api_path, nb):
        with self.engine.begin() as db:
            save_file(db, self.user_id, api_path, writes_base64(nb), UNLIMITED)

    # TODO: Use these rather than relying on `purge`.
    def delete_dir(self, api_path):
        raise NotImplementedError()

    def delete_file(self, api_path):
        raise NotImplementedError()

    def isfile(self, api_path):
        with self.engine.begin() as db:
            return file_exists(db, self.user_id, api_path)

    def isdir(self, api_path):
        with self.engine.begin() as db:
            return dir_exists(db, self.user_id, api_path)

    def setUp(self):
        self.contents_manager.purge()
        self.contents_manager.ensure_user()
        super(PGContentsAPITest, self).setUp()

    def tearDown(self):
        self.contents_manager.purge()

    # End superclass method overrides.

    # Test overrides.
    def test_mkdir_hidden_400(self):
        """
        We don't support hidden directories.
        """
        pass
Esempio n. 41
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import glob

from IPython.config import Config
from IPython.nbconvert import HTMLExporter

c = Config({'ExecutePreprocessor': {'enabled': True}})

exporter = HTMLExporter(config=c)

for filename in glob.glob("example-notebooks/*.ipynb"):
    print(filename)
    exporter.from_filename(filename)
Esempio n. 42
0
 def default_config(self):
     c = Config({'CSSHtmlHeaderTransformer':{'enabled':True}})
     c.merge(super(FullHtmlExporter,self).default_config)
     return c
Esempio n. 43
0
 def default_config(self):
     c = Config({'ExtractOutputTransformer':{'enabled':True}})
     c.merge(super(MarkdownExporter,self).default_config)
     return c
Esempio n. 44
0
 def default_config(self):
     c = Config({'ExtractOutputTransformer':{'enabled':True}})
     c.merge(super(RSTExporter,self).default_config)
     return c
Esempio n. 45
0
 def default_config(self):
     c = Config({'ExtractOutputPreprocessor': {'enabled': True}})
     c.merge(super(MarkdownExporter, self).default_config)
     return c
Esempio n. 46
0
 def default_config(self):
     c = Config({"ExtractFigureTransformer": {"enabled": True}})
     c.merge(super(RSTExporter, self).default_config)
     return c
Esempio n. 47
0
from .. import FileRenderer
import re
import os.path
from IPython.config import Config
# from IPython.nbconvert import export_python
from IPython.nbconvert.exporters import HTMLExporter
from IPython.nbformat import current as nbformat

c = Config()
c.HTMLExporter.template_file = 'basic'
c.NbconvertApp.fileext = 'html'
c.CSSHTMLHeaderTransformer.enabled = False
c.Exporter.filters = {
    'strip_files_prefix': lambda s: s
}  #don't strip the files prefix
exporter = HTMLExporter(config=c)


class NbFormatError(Exception):
    pass


class IPynbRenderer(FileRenderer):
    def _detect(self, file_pointer):
        _, ext = os.path.splitext(file_pointer.name)
        return ext.lower() == '.ipynb'

    def _render(self, file_pointer, **kwargs):
        content = file_pointer.read()
        nb = self._parse_json(content)
        name, theme = self._get_metadata(nb)
Esempio n. 48
0
 def default_config(self):
     c = Config({'CSSHtmlHeaderTransformer': {'enabled': True}})
     c.merge(super(FullHtmlExporter, self).default_config)
     return c
Esempio n. 49
0
 def default_config(self):
     c = Config({'SphinxTransformer': {'enabled':True}})
     c.merge(super(SphinxHowtoExporter,self).default_config)
     return c
Esempio n. 50
0
 def default_config(self):
     c = Config({"CSSHTMLHeaderPreprocessor": {"enabled": True}, "HighlightMagicsPreprocessor": {"enabled": True}})
     c.merge(super(HTMLExporter, self).default_config)
     return c