def default_config(self):
        # import jupyter_core.paths
        # import os
        c = Config({
            'NbConvertBase': {
                'display_data_priority': ['application/javascript',
                                          'text/html',
                                          'text/markdown',
                                          'image/svg+xml',
                                          'text/latex',
                                          'image/png',
                                          'image/jpeg',
                                          'text/plain'
                                          ]
            },
            'CSSHTMLHeaderPreprocessor': {
                'enabled': True},
            'HighlightMagicsPreprocessor': {
                'enabled': True},
            'ExtractOutputPreprocessor': {
                'enabled': True},
            'latex_envs.LenvsLatexPreprocessor': {'enabled': True}
        }
        )
        from jupyter_contrib_nbextensions.nbconvert_support import (
            templates_directory)
        c.merge(super(LenvsLatexExporter, self).default_config)

        # user_templates = os.path.join(jupyter_core.paths.jupyter_data_dir(),
        # 'templates')
        c.TemplateExporter.template_path = ['.', templates_directory()]

        # c.Exporter.preprocessors = ['tmp.LenvsLatexPreprocessor' ]
        # c.NbConvertApp.postprocessor_class = 'tmp.TocPostProcessor'
        return c
Ejemplo n.º 2
0
 def _compile_string(self, nb_json):
     """Export notebooks as HTML strings."""
     self._req_missing_ipynb()
     c = Config(self.site.config['IPYNB_CONFIG'])
     c.update(get_default_jupyter_config())
     exportHtml = HTMLExporter(config=c)
     body, _ = exportHtml.from_notebook_node(nb_json)
     return body
Ejemplo n.º 3
0
 def default_config(self):
     c = Config({
         'RevealHelpPreprocessor': {
             'enabled': True,
             },
         })
     c.merge(super(SlidesExporter,self).default_config)
     return c
    def default_config(self):
        c = Config({"ExtractOutputPreprocessor": {"enabled": True}})
        #  import here to avoid circular import
        from jupyter_contrib_nbextensions.nbconvert_support import templates_directory

        c.merge(super(TocExporter, self).default_config)

        c.TemplateExporter.template_path = [".", templates_directory()]

        return c
Ejemplo n.º 5
0
 def _compile_string(self, nb_json):
     """Export notebooks as HTML strings."""
     self._req_missing_ipynb()
     c = Config(self.site.config['IPYNB_CONFIG'])
     c.update(get_default_jupyter_config())
     if 'template_file' not in self.site.config['IPYNB_CONFIG'].get('Exporter', {}):
         c['Exporter']['template_file'] = 'basic.tpl'  # not a typo
     exportHtml = HTMLExporter(config=c)
     body, _ = exportHtml.from_notebook_node(nb_json)
     return body
Ejemplo n.º 6
0
 def default_config(self):
     import jupyter_core.paths
     import os
     c = Config({'ExtractOutputPreprocessor':{'enabled':True}})
     c.merge(super(TocExporter,self).default_config)
     
     user_templates = os.path.join(jupyter_core.paths.jupyter_data_dir(), 'templates')
     c.TemplateExporter.template_path = [
                             '.', user_templates ]
     return c
Ejemplo n.º 7
0
 def default_config(self):
     c = Config({
         'RegexRemovePreprocessor': {
             'enabled': True
             },
         'TagRemovePreprocessor': {
             'enabled': True
             }
         })
     c.merge(super(TemplateExporter, self).default_config)
     return c
Ejemplo n.º 8
0
 def default_config(self):
     c = Config({
         'ExtractOutputPreprocessor':{
             'enabled':True
             },
         'HighlightMagicsPreprocessor': {
             'enabled':True
             },
         })
     c.merge(super(RSTExporter,self).default_config)
     return c
    def check_install(self, argv=None, dirs=None):
        """Check files were installed in the correct place."""
        if argv is None:
            argv = []
        if dirs is None:
            dirs = {
                'conf': jupyter_core.paths.jupyter_config_dir(),
                'data': jupyter_core.paths.jupyter_data_dir(),
            }
        conf_dir = dirs['conf']

        # do install
        main_app(argv=['enable'] + argv)

        # list everything that got installed
        installed_files = []
        for root, subdirs, files in os.walk(dirs['conf']):
            installed_files.extend([os.path.join(root, f) for f in files])
        nt.assert_true(
            installed_files,
            'Install should create files in {}'.format(dirs['conf']))

        # a bit of a hack to allow initializing a new app instance
        for klass in app_classes:
            reset_app_class(klass)

        # do uninstall
        main_app(argv=['disable'] + argv)
        # check the config directory
        conf_installed = [
            path for path in installed_files
            if path.startswith(conf_dir) and os.path.exists(path)]
        for path in conf_installed:
            with open(path, 'r') as f:
                conf = Config(json.load(f))
            nbapp = conf.get('NotebookApp', {})
            nt.assert_not_in(
                'jupyter_nbextensions_configurator',
                nbapp.get('server_extensions', []),
                'Uninstall should empty'
                'server_extensions list'.format(path))
            nbservext = nbapp.get('nbserver_extensions', {})
            nt.assert_false(
                {k: v for k, v in nbservext.items() if v},
                'Uninstall should disable all '
                'nbserver_extensions in file {}'.format(path))
            confstrip = {}
            confstrip.update(conf)
            confstrip.pop('NotebookApp', None)
            confstrip.pop('version', None)
            nt.assert_false(confstrip, 'Uninstall should leave config empty.')

        reset_app_class(DisableJupyterNbextensionsConfiguratorApp)
Ejemplo n.º 10
0
 def default_config(self):
     c = Config({
         'ExtractOutputPreprocessor': {
             'enabled': True,
             'output_filename_template': '{unique_key}_{cell_index}_{index}{extension}'
         },
         'HighlightMagicsPreprocessor': {
             'enabled': True
         },
     })
     c.merge(super(UpgradedRSTExporter, self).default_config)
     return c
def export_through_preprocessor(
        notebook_node, preproc_cls, exporter_class, export_format, customconfig=None):
    """Export a notebook through a given preprocessor."""
    config=Config(NbConvertApp={'export_format': export_format })
    if customconfig is not None:
        config.merge(customconfig)
    exporter = exporter_class(
        preprocessors=[preproc_cls.__module__ + '.' + preproc_cls.__name__],
        config=config)
    try:
        return exporter.from_notebook_node(notebook_node)
    except PandocMissing:
        raise SkipTest("Pandoc wasn't found")
    def default_config(self):
        c = Config({'ExtractOutputPreprocessor': {'enabled': False}})
        #  import here to avoid circular import
        from jupyter_contrib_nbextensions.nbconvert_support import (
            templates_directory)
        c.merge(super(TocExporter, self).default_config)

        c.TemplateExporter.template_path = [
            '.',
            templates_directory(),
        ]

        return c
Ejemplo n.º 13
0
    def load_config(self):
        paths = jupyter_config_path()
        paths.insert(0, os.getcwd())

        config_found = False
        full_config = Config()
        for config in NbGrader._load_config_files("nbgrader_config", path=paths, log=self.log):
            full_config.merge(config)
            config_found = True

        if not config_found:
            self.log.warning("No nbgrader_config.py file found. Rerun with DEBUG log level to see where nbgrader is looking.")

        return full_config
Ejemplo n.º 14
0
 def default_config(self):
     c = Config({
         'NbConvertBase': {
             'display_data_priority' : ['application/javascript', 'text/html', 'text/markdown', 'application/pdf', 'image/svg+xml', 'text/latex', 'image/png', 'image/jpeg', 'text/plain']
             },
         'CSSHTMLHeaderPreprocessor':{
             'enabled':True
             },
         'HighlightMagicsPreprocessor': {
             'enabled':True
             }
         })
     c.merge(super(HTMLExporter,self).default_config)
     return c
Ejemplo n.º 15
0
def test_url_config(hub_config, expected):
    # construct the config object
    cfg = Config()
    for key, value in hub_config.items():
        cfg.JupyterHub[key] = value

    # instantiate the Hub and load config
    app = JupyterHub(config=cfg)
    # validate config
    for key, value in hub_config.items():
        if key not in expected:
            assert getattr(app, key) == value

    # validate additional properties
    for key, value in expected.items():
        assert getattr(app, key) == value
Ejemplo n.º 16
0
def get_html_from_filepath(filepath):
    """Convert ipython notebook to html
    Return: html content of the converted notebook
    """
    config = Config({'CSSHTMLHeaderTransformer': {'enabled': True,
                                                  'highlight_class': '.highlight-ipynb'}})

    config.HTMLExporter.preprocessors = [HtmlLinksPreprocessor]
    config.HtmlLinksPreprocessor['enabled'] = True

    path = os.path.dirname(os.path.realpath(__file__))
    exporter = HTMLExporter(config=config, template_file='no_code',
                            template_path=['.', path + '/../../../scripts/'],
                            filters={'highlight2html': custom_highlighter})
    content, info = exporter.from_filename(filepath)

    return content, info
Ejemplo n.º 17
0
    def default_config(self):
        c = Config({
            'ExtractOutputPreprocessor': {'enabled': True},
            'NbConvertBase': {
                'display_data_priority': ['text/html',
                                          'text/markdown',
                                          'image/svg+xml',
                                          'text/latex',
                                          'image/png',
                                          'image/jpeg',
                                          'text/plain'
                                          ]
            },

        })
        c.merge(super(MarkdownExporter, self).default_config)
        return c
Ejemplo n.º 18
0
 def default_config(self):
     c = Config({
         'NbConvertBase': {
             'display_data_priority' : ['text/latex', 'application/pdf', 'image/png', 'image/jpeg', 'image/svg+xml', 'text/markdown', 'text/plain']
             },
          'ExtractOutputPreprocessor': {
                 'enabled':True
              },
          'SVG2PDFPreprocessor': {
                 'enabled':True
              },
          'LatexPreprocessor': {
                 'enabled':True
              },
          'SphinxPreprocessor': {
                 'enabled':True
              },
          'HighlightMagicsPreprocessor': {
                 'enabled':True
              }
      })
     c.merge(super(LatexExporter,self).default_config)
     return c
Ejemplo n.º 19
0
 def default_config(self):
     c = Config(
         {
             "NbConvertBase": {
                 "display_data_priority": [
                     "text/latex",
                     "application/pdf",
                     "image/png",
                     "image/jpeg",
                     "image/svg+xml",
                     "text/markdown",
                     "text/plain",
                 ]
             },
             "ExtractOutputPreprocessor": {"enabled": True},
             "SVG2PDFPreprocessor": {"enabled": True},
             "LatexPreprocessor": {"enabled": True},
             "SphinxPreprocessor": {"enabled": True},
             "HighlightMagicsPreprocessor": {"enabled": True},
         }
     )
     c.merge(super(LatexExporter, self).default_config)
     return c
Ejemplo n.º 20
0
    def _render_notebook_from_source(self,
                                     ipynb_source,
                                     indices=None,
                                     clear_output=False,
                                     clear_markdown=False,
                                     **kwargs):
        # type: (Text, Optional[Any], Optional[bool], Optional[bool], **Any) -> Text
        """
        Get HTML format of ipython notebook so as to be rendered in RELATE flow
        pages.
        :param ipynb_source: the :class:`text` read from a ipython notebook.
        :param indices: a :class:`list` instance, 0-based indices of notebook cells
        which are expected to be rendered.
        :param clear_output: a :class:`bool` instance, indicating whether existing
        execution output of code cells should be removed.
        :param clear_markdown: a :class:`bool` instance, indicating whether markdown
        cells will be ignored..
        :return:
        """
        import nbformat
        from nbformat.reader import parse_json
        nb_source_dict = parse_json(ipynb_source)

        if indices:
            nb_source_dict.update(
                {"cells": [nb_source_dict["cells"][idx] for idx in indices]})

        if clear_markdown:
            nb_source_dict.update({
                "cells": [
                    cell for cell in nb_source_dict["cells"]
                    if cell['cell_type'] != "markdown"
                ]
            })

        nb_source_dict.update({"cells": nb_source_dict["cells"]})

        import json
        ipynb_source = json.dumps(nb_source_dict)
        notebook = nbformat.reads(ipynb_source, as_version=4)

        from traitlets.config import Config
        c = Config()

        # This is to prevent execution of arbitrary code from note book
        c.ExecutePreprocessor.enabled = False
        if clear_output:
            c.ClearOutputPreprocessor.enabled = True

        c.CSSHTMLHeaderPreprocessor.enabled = False
        c.HighlightMagicsPreprocessor.enabled = False

        import os

        # Place the template in course template dir
        import course
        template_path = os.path.join(os.path.dirname(course.__file__),
                                     "templates", "course", "jinja2")
        c.TemplateExporter.template_path.append(template_path)

        from nbconvert import HTMLExporter
        html_exporter = HTMLExporter(config=c,
                                     template_file="nbconvert_template.tpl")

        (body, resources) = html_exporter.from_notebook_node(notebook)

        return "<div class='relate-notebook-container'>%s</div>" % body
Ejemplo n.º 21
0
def notebook(preprocessor, tag, markup):
    match = FORMAT.search(markup)
    if match:
        argdict = match.groupdict()
        src = argdict['src']
        start = argdict['start']
        end = argdict['end']
        language = argdict['language']
    else:
        raise ValueError("Error processing input, "
                         "expected syntax: {0}".format(SYNTAX))

    if start:
        start = int(start)
    else:
        start = 0

    if end:
        end = int(end)
    else:
        end = None

    language_applied_highlighter = partial(custom_highlighter,
                                           language=language)

    nb_dir = preprocessor.configs.getConfig('NOTEBOOK_DIR')
    nb_path = os.path.join('content', nb_dir, src)

    if not os.path.exists(nb_path):
        raise ValueError("File {0} could not be found".format(nb_path))

    # Create the custom notebook converter
    c = Config({
        'CSSHTMLHeaderTransformer': {
            'enabled': True,
            'highlight_class': '.highlight-ipynb'
        },
        'SubCell': {
            'enabled': True,
            'start': start,
            'end': end
        }
    })

    template_file = 'basic'
    if IPYTHON_VERSION >= 3:
        if os.path.exists('pelicanhtml_3.tpl'):
            template_file = 'pelicanhtml_3'
    elif IPYTHON_VERSION == 2:
        if os.path.exists('pelicanhtml_2.tpl'):
            template_file = 'pelicanhtml_2'
    else:
        if os.path.exists('pelicanhtml_1.tpl'):
            template_file = 'pelicanhtml_1'

    if IPYTHON_VERSION >= 2:
        subcell_kwarg = dict(preprocessors=[SubCell])
    else:
        subcell_kwarg = dict(transformers=[SubCell])

    exporter = HTMLExporter(
        config=c,
        template_file=template_file,
        filters={'highlight2html': language_applied_highlighter},
        **subcell_kwarg)

    # read and parse the notebook
    with open(nb_path, encoding='utf-8') as f:
        nb_text = f.read()
        if IPYTHON_VERSION < 3:
            nb_json = IPython.nbformat.current.reads_json(nb_text)
        else:
            try:
                nb_json = nbformat.reads(nb_text, as_version=4)
            except:
                nb_json = IPython.nbformat.reads(nb_text, as_version=4)

    (body, resources) = exporter.from_notebook_node(nb_json)

    # if we haven't already saved the header, save it here.
    if not notebook.header_saved:
        print("\n ** Writing styles to _nb_header.html: "
              "this should be included in the theme. **\n")

        header = '\n'.join(
            CSS_WRAPPER.format(css_line)
            for css_line in resources['inlining']['css'])
        header += JS_INCLUDE

        with open('_nb_header.html', 'w') as f:
            f.write(header)
        notebook.header_saved = True

    # this will stash special characters so that they won't be transformed
    # by subsequent processes.
    body = preprocessor.configs.htmlStash.store(body, safe=True)
    return body
Ejemplo n.º 22
0
def convert_notebook_to_assets(notebook_file_name, base_name, output_prefix):
    # define and create output folder
    output_folder = output_prefix + '/' + base_name
    os.makedirs(output_folder, exist_ok=True)

    # open file
    print('Converting Notebook: ' + notebook_file_name + ' ...')
    nb_file = open(notebook_file_name, 'r').read()
    nb = nbformat.reads(nb_file, as_version=4)

    # 1. clear output
    print(" - clearing output")
    ep = ClearOutputPreprocessor()
    ep.preprocess(nb, {})

    # 2. generate fresh charts by running the notebook
    print(" - executing")
    ep = ExecutePreprocessor(timeout=600,
                             kernel_name='python3',
                             allow_errors=False)
    try:
        ep.preprocess(nb, {'metadata': {'path': output_folder}})
    except Exception as e:
        print('ERROR: Execution of the notebook ' + notebook_file_name +
              ' stopped, likely for missing some py libs.')
        print(
            '       Please check the output/exception and add those to the requirements.'
        )
        print(e)
        exit(1)

    # 3. export HTML
    print(" - generating html")
    cleaner_config = Config({
        "HTMLExporter": {
            "exclude_input": True,
            "exclude_input_prompt": True,
            "exclude_output_prompt": True,
            "preprocessors":
            ['nbconvert.preprocessors.ExtractOutputPreprocessor']
        },
    })
    local_templates = DictLoader({
        'our-html.tpl': stand_alone_tpl,
        'react-glue.tpl': react_glue_tpl,
    })
    exporter = HTMLExporter(config=cleaner_config,
                            extra_loaders=[local_templates])
    exporter.template_file = 'our-html.tpl'
    (html_body, html_resources) = exporter.from_notebook_node(nb)

    # save html output file, with local reference to the pictures
    local_html = []
    output_html_file_name = output_folder + '/' + "index.html"
    print("   - saving html: " + output_html_file_name)
    with open(output_html_file_name, 'wt') as the_file:
        the_file.write(html_body)
    local_html.append({
        'notebook': base_name,
        'html_notebook': output_html_file_name,
    })

    # save js file for react inclusion (local ref to the pictures)
    # exporter.template_file = 'react-glue.tpl'
    # (react_body, react_resources) = exporter.from_notebook_node(nb)
    # output_react_file_name = output_folder + '/' + "index.js"
    # print("   - saving react js: " + output_react_file_name)
    # with open(output_react_file_name, 'wt') as the_file:
    #     the_file.write(react_body)

    # save all the figures
    local_figures = []
    figures = html_resources['outputs']
    figures_count = len(figures)
    figure_index = 1
    for figure_file in figures:
        output_figure_file_name = output_folder + '/' + figure_file
        print("   - saving png " + str(figure_index) + " of " +
              str(figures_count) + ": " + output_figure_file_name)
        if not figure_file.endswith('.png'):
            print("WARNING: figure is not a PNG file")
            continue
        with open(output_figure_file_name, 'wb') as the_file:
            the_file.write(figures[figure_file])
        local_figures.append({
            'figure': figure_file,
            'file': output_figure_file_name,
            'notebook': base_name,
            'html_notebook': output_html_file_name,
        })

    # create an empty 'custom.css'
    custom_css_file_name = output_folder + '/' + 'custom.css'
    with open(custom_css_file_name, 'wt') as the_file:
        the_file.write("")

    # return a recap of all assets
    return local_html, local_figures
Ejemplo n.º 23
0
class SchemaHandlerTest(MetadataTestBase):
    """Test Schema REST API"""
    config = Config({'NotebookApp': {"nbserver_extensions": {"elyra": True}}})

    def test_bogus_namespace(self):
        # Validate missing is not found

        # Remove self.request (and other 'self.' prefixes) once transition to jupyter_server occurs
        r = fetch(self.request,
                  'elyra',
                  'schema',
                  'bogus',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 404
        assert "Namespace 'bogus' is not in the list of valid namespaces:" in r.text

    def test_missing_runtimes_schema(self):
        # Validate missing is not found
        r = fetch(self.request,
                  'elyra',
                  'schema',
                  'runtimes',
                  'missing',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 404
        assert "No such schema named 'missing' was found in the runtimes namespace." in r.text

    def test_get_runtimes_schemas(self):
        # Ensure all schema for runtimes can be found
        self._get_namespace_schemas('runtimes', ['kfp'])

    def test_get_code_snippets_schemas(self):
        # Ensure all schema for code-snippets can be found
        self._get_namespace_schemas('code-snippets', ['code-snippet'])

    def test_get_test_schemas(self):
        # Ensure all schema for metadata_tests can be found
        self._get_namespace_schemas(METADATA_TEST_NAMESPACE,
                                    ['metadata-test', 'metadata-test2'])

    def test_get_runtimes_schema(self):
        # Ensure all schema for runtimes can be found
        self._get_namespace_schema('runtimes', 'kfp')

    def test_get_code_snippets_schema(self):
        # Ensure all schema for code-snippets can be found
        self._get_namespace_schema('code-snippets', 'code-snippet')

    def test_get_test_schema(self):
        # Ensure all schema for code-snippets can be found
        self._get_namespace_schema(METADATA_TEST_NAMESPACE, 'metadata-test')

    def _get_namespace_schemas(self, namespace, expected):
        r = fetch(self.request,
                  'elyra',
                  'schema',
                  namespace,
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        namespace_schemas = r.json()
        assert isinstance(namespace_schemas, dict)
        assert len(namespace_schemas) == 1
        schemas = namespace_schemas[namespace]
        assert len(schemas) == len(expected)
        for expected_schema in expected:
            assert get_instance(schemas, 'name', expected_schema)

    def _get_namespace_schema(self, namespace, expected):
        r = fetch(self.request,
                  'elyra',
                  'schema',
                  namespace,
                  expected,
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        namespace_schema = r.json()
        assert isinstance(namespace_schema, dict)
        assert expected == namespace_schema['name']
        assert namespace == namespace_schema['namespace']
def _uninstall_pre_config(logger=None):
    """Undo config settings inserted by an old (pre-themysto) installation."""
    # for application json config files
    cm = BaseJSONConfigManager(config_dir=jupyter_config_dir())

    # -------------------------------------------------------------------------
    # notebook json config
    config_basename = 'jupyter_notebook_config'
    config = Config(cm.get(config_basename))
    config_path = cm.file_name(config_basename)
    if config and logger:
        logger.info('- Removing old config values from {}'.format(config_path))
    to_remove = ['nbextensions']
    # remove from notebook >= 4.2 key nbserver_extensions
    section = config.get('NotebookApp', Config())
    server_extensions = section.get('nbserver_extensions', {})
    for se in to_remove:
        server_extensions.pop(se, None)
    if len(server_extensions) == 0:
        section.pop('nbserver_extensions', None)
    # and notebook < 4.2 key server_extensions
    _update_config_list(
        config, 'NotebookApp.server_extensions', to_remove, False)
    _update_config_list(config, 'NotebookApp.extra_template_paths', [
        os.path.join(jupyter_data_dir(), 'templates'),
    ], False)
    _set_managed_config(cm, config_basename, config, logger)

    # -------------------------------------------------------------------------
    # nbconvert json config
    config_basename = 'jupyter_nbconvert_config'
    config = Config(cm.get(config_basename))
    if config and logger:
        logger.info('- Removing old config values from {}'.format(config_path))
    _update_config_list(config, 'Exporter.template_path', [
        '.', os.path.join(jupyter_data_dir(), 'templates'),
    ], False)
    _update_config_list(config, 'Exporter.preprocessors', [
        'pre_codefolding.CodeFoldingPreprocessor',
        'pre_pymarkdown.PyMarkdownPreprocessor',
    ], False)
    section = config.get('NbConvertApp', {})
    if (section.get('postprocessor_class') ==
            'post_embedhtml.EmbedPostProcessor'):
        section.pop('postprocessor_class', None)
    if len(section) == 0:
        config.pop('NbConvertApp', None)
    _set_managed_config(cm, config_basename, config, logger)

    # -------------------------------------------------------------------------
    # Remove old config lines from .py configuration files
    for config_basename in ('jupyter_notebook_config.py',
                            'jupyter_nbconvert_config.py'):
        py_config_path = os.path.join(jupyter_config_dir(), config_basename)
        if not os.path.isfile(py_config_path):
            continue
        if logger:
            logger.info(
                '--  Removing now-empty config file {}'.format(py_config_path))
        with io.open(py_config_path, 'r') as f:
            lines = f.readlines()
        marker = '#--- nbextensions configuration ---'
        marker_inds = [ii for ii, l in enumerate(lines) if l.find(marker) >= 0]
        if len(marker_inds) >= 2:
            lines = lines[0:marker_inds[0]] + lines[marker_inds[1] + 1:]
            if [l for l in lines if l.strip]:
                with io.open(py_config_path, 'w') as f:
                    f.writelines(lines)
            else:
                if logger:
                    logger.info(
                        'Removing now-empty config file {}'.format(
                            py_config_path))
                try:
                    os.remove(py_config_path)
                except OSError as ex:
                    if ex.errno != errno.ENOENT:
                        raise
Ejemplo n.º 25
0
 def config(self):
     return Config()
Ejemplo n.º 26
0
    def setup_class(cls):
        cls.tmp_dir = TemporaryDirectory()
        def tmp(*parts):
            path = os.path.join(cls.tmp_dir.name, *parts)
            try:
                os.makedirs(path)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise
            return path
        
        cls.home_dir = tmp('home')
        data_dir = cls.data_dir = tmp('data')
        config_dir = cls.config_dir = tmp('config')
        runtime_dir = cls.runtime_dir = tmp('runtime')
        cls.notebook_dir = tmp('notebooks')
        cls.env_patch = patch.dict('os.environ', cls.get_patch_env())
        cls.env_patch.start()
        cls.path_patch = patch.multiple(
            jupyter_core.paths,
            SYSTEM_JUPYTER_PATH=[tmp('share', 'jupyter')],
            ENV_JUPYTER_PATH=[tmp('env', 'share', 'jupyter')],
            SYSTEM_CONFIG_PATH=[tmp('etc', 'jupyter')],
            ENV_CONFIG_PATH=[tmp('env', 'etc', 'jupyter')],
        )
        cls.path_patch.start()

        config = cls.config or Config()
        config.NotebookNotary.db_file = ':memory:'

        cls.token = hexlify(os.urandom(4)).decode('ascii')

        started = Event()
        def start_thread():
            if 'asyncio' in sys.modules:
                import asyncio
                asyncio.set_event_loop(asyncio.new_event_loop())
            app = cls.notebook = NotebookApp(
                port=cls.port,
                port_retries=0,
                open_browser=False,
                config_dir=cls.config_dir,
                data_dir=cls.data_dir,
                runtime_dir=cls.runtime_dir,
                notebook_dir=cls.notebook_dir,
                base_url=cls.url_prefix,
                config=config,
                allow_root=True,
                token=cls.token,
            )
            # don't register signal handler during tests
            app.init_signal = lambda : None
            # clear log handlers and propagate to root for nose to capture it
            # needs to be redone after initialize, which reconfigures logging
            app.log.propagate = True
            app.log.handlers = []
            app.initialize(argv=cls.get_argv())
            app.log.propagate = True
            app.log.handlers = []
            loop = IOLoop.current()
            loop.add_callback(started.set)
            try:
                app.start()
            finally:
                # set the event, so failure to start doesn't cause a hang
                started.set()
                app.session_manager.close()
        cls.notebook_thread = Thread(target=start_thread)
        cls.notebook_thread.daemon = True
        cls.notebook_thread.start()
        started.wait()
        cls.wait_until_alive()
Ejemplo n.º 27
0
 def default_config(self):
     return Config()
Ejemplo n.º 28
0
class MetadataHandlerTest(NotebookTestBase):
    """Test Metadata REST API"""
    config = Config({'NotebookApp': {"nbserver_extensions": {"elyra": True}}})

    def setUp(self):
        # The _dir names here are fixtures that should be referenced by the appropriate
        # test methods once transition to jupyter_server occurs.
        self.metadata_tests_dir = os.path.join(self.data_dir, 'metadata',
                                               'elyra-metadata-tests')
        self.metadata_bogus_dir = os.path.join(self.data_dir, 'metadata',
                                               'bogus')

        create_json_file(self.metadata_tests_dir, 'valid.json',
                         valid_metadata_json)
        create_json_file(self.metadata_tests_dir, 'another.json',
                         another_metadata_json)
        create_json_file(self.metadata_tests_dir, 'invalid.json',
                         invalid_metadata_json)

    def test_bogus_namespace(self):
        # Validate missing is not found

        # Remove self.request (and other 'self.' prefixes) once transition to jupyter_server occurs
        r = fetch(self.request,
                  'api',
                  'metadata',
                  'bogus',
                  'missing',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 404
        assert "Metadata namespace 'bogus' was not found!" in r.text
        assert not os.path.exists(self.metadata_bogus_dir)

    def test_missing_runtime(self):
        # Validate missing is not found
        r = fetch(self.request,
                  'api',
                  'metadata',
                  'elyra-metadata-tests',
                  'missing',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 404
        assert "Metadata 'missing' in namespace 'elyra-metadata-tests' was not found!" in r.text

    def test_invalid_runtime(self):
        # Validate invalid throws 404 with validation message
        r = fetch(self.request,
                  'api',
                  'metadata',
                  'elyra-metadata-tests',
                  'invalid',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 404
        assert "Schema validation failed for metadata 'invalid'" in r.text

    def test_valid_runtime(self):
        # Ensure valid metadata can be found
        r = fetch(self.request,
                  'api',
                  'metadata',
                  'elyra-metadata-tests',
                  'valid',
                  base_url=self.base_url(),
                  headers=self.auth_headers())

        assert r.status_code == 200
        metadata = r.json()
        assert 'schema_name' in metadata
        assert metadata['display_name'] == 'valid metadata instance'

    def test_get_instances(self):
        # Ensure all valid metadata can be found
        r = fetch(self.request,
                  'api',
                  'metadata',
                  'elyra-metadata-tests',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        metadata = r.json()
        assert isinstance(metadata, dict)
        assert len(metadata) == 1
        instances = metadata['elyra-metadata-tests']
        assert len(instances) == 2
        assert 'another' in instances.keys()
        assert 'valid' in instances.keys()

    def test_get_instances_empty(self):
        # Delete the metadata dir contents and attempt listing metadata
        shutil.rmtree(self.metadata_tests_dir)
        r = fetch(self.request,
                  'api',
                  'metadata',
                  'elyra-metadata-tests',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 404
        assert "Metadata namespace 'elyra-metadata-tests' was not found!" in r.text

        # Now create empty namespace
        os.makedirs(self.metadata_tests_dir)
        r = fetch(self.request,
                  'api',
                  'metadata',
                  'elyra-metadata-tests',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        metadata = r.json()
        assert isinstance(metadata, dict)
        assert len(metadata) == 1
        instances = metadata['elyra-metadata-tests']
        assert len(instances) == 0
Ejemplo n.º 29
0
from nbconvert import HTMLExporter
from traitlets.config import Config

preamble = """
<script type="text/x-mathjax-config">
  MathJax.Hub.Config({
    tex2jax: {
      inlineMath: [['$','$']],
      processEscapes: true
    }
  });
</script>
"""

# Use ExtractOutputPreprocessor to extract the images to separate files
config = Config()
config.HTMLExporter.preprocessors = [
    'nbconvert.preprocessors.ExtractOutputPreprocessor',
]

# Output a HTML partial, not a complete page
html_exporter = HTMLExporter(config=config)
html_exporter.template_file = 'basic'

# Output notebook HTML partials into this directory
NOTEBOOK_HTML_DIR = 'notebooks-html'

# Output notebook HTML images into this directory
NOTEBOOK_IMAGE_DIR = 'notebooks-images'

# The prefix for the interact button links. The path format string gets filled
Ejemplo n.º 30
0
    def __init__(self, exec_lines=None):

        self.cout = StringIO()

        if exec_lines is None:
            exec_lines = []

        # Create config object for IPython
        config = Config()
        config.HistoryManager.hist_file = ':memory:'
        config.InteractiveShell.autocall = False
        config.InteractiveShell.autoindent = False
        config.InteractiveShell.colors = 'NoColor'

        # create a profile so instance history isn't saved
        tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
        profname = 'auto_profile_sphinx_build'
        pdir = os.path.join(tmp_profile_dir, profname)
        profile = ProfileDir.create_profile_dir(pdir)

        # Create and initialize global ipython, but don't start its mainloop.
        # This will persist across different EmbededSphinxShell instances.
        IP = InteractiveShell.instance(config=config, profile_dir=profile)
        atexit.register(self.cleanup)

        # io.stdout redirect must be done after instantiating InteractiveShell
        io.stdout = self.cout
        io.stderr = self.cout

        # For debugging, so we can see normal output, use this:
        #from IPython.utils.io import Tee
        #io.stdout = Tee(self.cout, channel='stdout') # dbg
        #io.stderr = Tee(self.cout, channel='stderr') # dbg

        # Store a few parts of IPython we'll need.
        self.IP = IP
        self.user_ns = self.IP.user_ns
        self.user_global_ns = self.IP.user_global_ns

        self.input = ''
        self.output = ''
        self.tmp_profile_dir = tmp_profile_dir

        self.is_verbatim = False
        self.is_doctest = False
        self.is_suppress = False

        # Optionally, provide more detailed information to shell.
        # this is assigned by the SetUp method of IPythonDirective
        # to point at itself.
        #
        # So, you can access handy things at self.directive.state
        self.directive = None

        # on the first call to the savefig decorator, we'll import
        # pyplot as plt so we can make a call to the plt.gcf().savefig
        self._pyplot_imported = False

        # Prepopulate the namespace.
        for line in exec_lines:
            self.process_input_line(line, store_history=False)
Ejemplo n.º 31
0
def page_html(ntbk, path_media_output=None, name=None, preprocessors=None,
              execute_dir=False, kernel_name=None, clear_output=False,
              title=None, author=None):
    """Build the HTML for a single notebook page.

    Inputs
    ======

    ntbk : Instance of NotebookNode
        The notebook that we'll convert to a page's HTML.
    path_media_output : string | None
        If a string, the path to where images should be extracted, relative
        to wherever you will write the final HTML file. If None,
        images will be embedded in the HTML. Note that this will not actually
        write the images. To do so, use the `write_page` function.
    name : string | None
        The name of the notebook being converted. This will be used if
        `path_media_output` is noe None in order to create unique media
        file names.
    preprocessors : list of NBConvert Preprocessors | None
        Any extra preprocessors to add to the end of the preprocessor chain
        in the HTMLConverter.
    execute_dir : string | None
        Execute the notebook with a kernel started in the directory specified
        with this argument. If None, the notebook will not be executed.
    kernel_name : string
        The name of the kernel to use if we execute notebooks.
    clear_output: bool
        To remove the output from notebook
    title : string | "infer_title" | None
        A title to include with the page. If given, then the title will
        be printed at the top of the output page. If "infer_title", look for the title
        in `ntbk.metadata['title']` and if not found, infer it if the first
        line of the notebook is an H1 header (beginning with `# `). If None,
        or if no metadata is found, do not display a title.
    author : string | "infer_author" | None
        An author to include with the page. If given, then the author will
        be printed at the top of the output page. If "infer_author", look for the author
        in `ntbk.metadata['author']`. If `None`, or if no metadata is found,
        then do not display an author.

    Returns
    =======
    page : HTML document
        The input content file converted to HTML format.
    """
    _check_cell_tags(ntbk)

    if preprocessors is None:
        preprocessors = []
    elif not isinstance(preprocessors, (list, tuple)):
        preprocessors = [preprocessors]

    if name is None:
        name = "notebook"

    # If title and author isn't False or a string, try to infer it
    meta_html_info = ''
    if title == 'infer_title':
        title = _infer_title(ntbk)

    if isinstance(title, str):
        meta_html_info += f'<div id="page-title">{title}</div>\n'

    if author == "infer_author":
        author = ntbk.metadata.get('author')
    if author:
        meta_html_info += f'<div id="page-author">{author}</div>\n'
    meta_html = f'<div id="page-info">{meta_html_info}</div>'

    ########################################
    # Notebook cleaning
    _clean_markdown_cells(ntbk)

    #############################################
    # Preprocessor configuration
    c = Config()

    # Remove cell elements using tags
    c.TagRemovePreprocessor.remove_cell_tags = ("remove_cell", "removecell")
    c.TagRemovePreprocessor.remove_all_outputs_tags = ("remove_output",)
    c.TagRemovePreprocessor.remove_input_tags = ("remove_input",)

    # Remove any cells that are *only* whitespace
    c.RegexRemovePreprocessor.patterns = ["\\s*\\Z"]

    c.HTMLExporter.preprocessors = [
        "nbconvert.preprocessors.TagRemovePreprocessor",
        "nbconvert.preprocessors.RegexRemovePreprocessor",
    ]

    if clear_output:
        c.HTMLExporter.preprocessors.append('nbconvert.preprocessors.ClearOutputPreprocessor')

    if path_media_output is not None:
        # So the images are written to disk
        c.HTMLExporter.preprocessors.append(
            "nbconvert.preprocessors.ExtractOutputPreprocessor"
        )

    # Add extra preprocessors given by the user
    for preprocessor in preprocessors:
        c.HTMLExporter.preprocessors.append(preprocessor)

    # The text used as the text for anchor links.
    # The text used as the text for anchor links.
    # TEMPORATILY Set to empty since we'll use anchor.js for the links
    # Once https://github.com/jupyter/nbconvert/pull/1101 is fixed
    # set to '<i class="fas fa-link"> </i>'
    c.HTMLExporter.anchor_link_text = " "

    # Excluding input/output prompts
    c.HTMLExporter.exclude_input_prompt = True
    c.HTMLExporter.exclude_output_prompt = True

    #############################################
    # Run and convert to HTML

    # Excution of the notebook if we wish
    if execute_dir is not None:
        ntbk = run_ntbk(ntbk, execute_dir, allow_errors=True)

    # Generate HTML from our notebook using the template
    output_resources = {"output_files_dir": path_media_output, "unique_key": name}
    exp = HTMLExporter(template_file=PATH_TEMPLATE, config=c)
    html, resources = exp.from_notebook_node(ntbk, resources=output_resources)

    html = f"""
    <main class="jupyter-page">
    {meta_html}
    {html}
    </main>
    """
    return html, resources
 def default_config(self):
     c = Config({})
     c.merge(super(MarkdownExporter, self).default_config)
     return c
Ejemplo n.º 33
0
def build_page(path_ntbk,
               path_html_output,
               path_media_output=None,
               execute=False,
               path_template=None,
               verbose=False,
               kernel_name=None):
    """Build the HTML for a single notebook page.

    Inputs
    ======

    path_ntbk : string
        The path to a notebook or text file we want to convert. If a text
        file, then Jupytext will be used to convert into a notebook. This
        will also cause the notebook to be *run* (e.g. execute=True).
    path_html_output : string
        The path to the folder where the HTML will be output.
    path_media_output : string | None
        If a string, the path to where images should be extracted. If None,
        images will be embedded in the HTML.
    execute : bool
        Whether to execute the notebook before converting
    path_template : string
        A path to the template used in conversion.
    kernel_name : string
        The name of the kernel to use if we execute notebooks.
    """

    ########################################
    # Load in the notebook
    notebook_name, suff = op.splitext(op.basename(path_ntbk))

    is_raw_markdown_file = False
    if suff in ['.md', '.markdown']:
        # If it's a markdown file, we need to check whether it's a jupytext format
        with open(path_ntbk, 'r') as ff:
            lines = ff.readlines()
            yaml_lines, content = _split_yaml(lines)
            yaml = YAML().load(''.join(yaml_lines))

        if (yaml is not None) and yaml.get('jupyter', {}).get('jupytext'):
            # If we have jupytext metadata, then use it to read the markdown file
            ntbk = jpt.reads(''.join(lines), 'md')
        else:
            # Otherwise, create an empty notebook and add all of the file contents as a markdown file
            is_raw_markdown_file = True
            ntbk = nbf.v4.new_notebook()
            ntbk['cells'].append(
                nbf.v4.new_markdown_cell(source=''.join(content)))
    else:
        # If it's not markdown, we assume it's either ipynb or a jupytext format
        ntbk = jpt.read(path_ntbk)

    if _is_jupytext_file(ntbk):
        execute = True

    ########################################
    # Notebook cleaning

    # Minor edits to cells
    _clean_markdown_cells(ntbk)

    #############################################
    # Conversion to HTML
    # create a configuration object that changes the preprocessors
    c = Config()

    c.FilesWriter.build_directory = path_html_output

    # Remove cell elements using tags
    c.TagRemovePreprocessor.remove_cell_tags = ("remove_cell", "removecell")
    c.TagRemovePreprocessor.remove_all_outputs_tags = ('remove_output', )
    c.TagRemovePreprocessor.remove_input_tags = ('remove_input', )

    # Remove any cells that are *only* whitespace
    c.RegexRemovePreprocessor.patterns = ["\\s*\\Z"]

    c.HTMLExporter.preprocessors = [
        'nbconvert.preprocessors.TagRemovePreprocessor',
        'nbconvert.preprocessors.RegexRemovePreprocessor',
        # So the images are written to disk
        'nbconvert.preprocessors.ExtractOutputPreprocessor',
        # Wrap cells in Jekyll raw tags
        _RawCellPreprocessor,
    ]

    # The text used as the text for anchor links. Set to empty since we'll use anchor.js for the links
    c.HTMLExporter.anchor_link_text = " "

    # Excluding input/output prompts
    c.HTMLExporter.exclude_input_prompt = True
    c.HTMLExporter.exclude_output_prompt = True

    # Excution of the notebook if we wish
    if execute is True:
        ntbk = run_ntbk(ntbk, op.dirname(path_ntbk))

    # Define the path to images and then the relative path to where they'll originally be placed
    if isinstance(path_media_output, str):
        path_media_output_rel = op.relpath(path_media_output, path_html_output)

    # Generate HTML from our notebook using the template
    output_resources = {
        'output_files_dir': path_media_output_rel,
        'unique_key': notebook_name
    }
    exp = HTMLExporter(template_file=path_template, config=c)
    html, resources = exp.from_notebook_node(ntbk, resources=output_resources)

    # Now write the markdown and resources
    writer = FilesWriter(config=c)
    writer.write(html, resources, notebook_name=notebook_name)

    # Add the frontmatter to the yaml file in case it's wanted
    if is_raw_markdown_file and len(yaml_lines) > 0:
        with open(op.join(path_html_output, notebook_name + '.html'),
                  'r') as ff:
            md_lines = ff.readlines()
        md_lines.insert(0, '---\n')
        for iline in yaml_lines[::-1]:
            md_lines.insert(0, iline + '\n')
        md_lines.insert(0, '---\n')
        with open(op.join(path_html_output, notebook_name + '.html'),
                  'w') as ff:
            ff.writelines(md_lines)

    if verbose:
        print("Finished writing notebook to {}".format(path_html_output))
Ejemplo n.º 34
0
 def _config_changed(self, name, old, new):
     # warn on change of renamed config section
     if new.InlineBackendConfig != getattr(old, 'InlineBackendConfig',
                                           Config()):
         warn("InlineBackendConfig has been renamed to InlineBackend")
     super(InlineBackend, self)._config_changed(name, old, new)
Ejemplo n.º 35
0
def type_of_run(date_path, run_number, counters, n_events=500):
    """
    Guessing empirically the type of run based on the percentage of
    pedestals/mono trigger types from the first n_events:
    DRS4 pedestal run (DRS4): 100% mono events (trigger_type == 1)
    cosmic data run (DATA): <10% pedestal events (trigger_type == 32)
    pedestal-calibration run (PEDCALIB): ~50% mono, ~50% pedestal events
    Otherwise (ERROR) the run is not expected to be processed.
    This method may not give always the correct type.
    At some point this should be taken directly from TCU.

    Parameters
    ----------
    date_path : pathlib.Path
        Path to the R0 files
    run_number : int
        Run id
    counters : dict
        Dict containing the reference counters and timestamps
    n_events : int
        Number of events used to infer the type of the run

    Returns
    -------
    run_type: str
        Type of run (DRS4, PEDCALIB, DATA, ERROR)
    """

    pattern = f"LST-1.1.Run{run_number:05d}.0000*.fits.fz"
    list_of_files = sorted(date_path.glob(pattern))
    if len(list_of_files) == 0:
        log.error(f"First subrun not found for {pattern}")
        return "ERROR"

    filename = list_of_files[0]

    config = Config()
    config.EventTimeCalculator.dragon_reference_time = int(counters["dragon_reference_time"])
    config.EventTimeCalculator.dragon_reference_counter = int(counters["dragon_reference_counter"])
    config.EventTimeCalculator.dragon_module_id = int(counters["dragon_reference_module_id"])

    try:
        with LSTEventSource(filename, config=config, max_events=n_events) as source:
            source.log.setLevel(logging.ERROR)

            event_type_counts = Counter(event.trigger.event_type for event in source)
            n_pedestals = event_type_counts[EventType.SKY_PEDESTAL]
            n_subarray = event_type_counts[EventType.SUBARRAY]

        if n_subarray / n_events > 0.999:
            run_type = "DRS4"
        elif n_pedestals / n_events > 0.1:
            run_type = "PEDCALIB"
        elif n_pedestals / n_events < 0.1:
            run_type = "DATA"
        else:
            run_type = "ERROR"

    except (AttributeError, ValueError, IOError, IndexError) as err:
        log.error(f"File {filename} has error: {err!r}")

        run_type = "ERROR"

    return run_type
Ejemplo n.º 36
0
def random_ports(n):
    """Return n random ports that are available."""
    sockets = []
    ports = []
    for i in range(n):
        sock = socket.socket()
        sock.bind(('', 0))
        sockets.append(sock)
    for sock in sockets:
        port = sock.getsockname()[1]
        sock.close()
        ports.append(port)
    return ports


default_config = Config()
default_config.ConfigurableHTTPProxy.api_url = 'http://127.0.0.1:%i' % tuple(
    random_ports(1))

from unittest.mock import MagicMock

hub = MagicMock()
hub.url = 'http://127.0.0.1'
app = MagicMock()
app.subdomain_host = ''
app.statsd_host = None


async def start_proxy(ProxyClass, port):
    """Start a proxy
    
Ejemplo n.º 37
0
    def __init__(
        self,
        config,
        subarray,
        cams_and_foclens,
        mode,
        event_cutflow=None,
        image_cutflow=None,
        debug=False,
    ):
        """Initiliaze an EventPreparer object."""
        # Cleaning for reconstruction
        self.cleaner_reco = ImageCleaner(  # for reconstruction
            config=config["ImageCleaning"]["biggest"],
            cameras=cams_and_foclens.keys(),
            mode=mode,
        )

        # Cleaning for energy/score estimation
        # Add possibility to force energy/score cleaning with tailcut analysis
        force_mode = mode
        try:
            if config["General"]["force_tailcut_for_extended_cleaning"] is True:
                force_mode = config["General"]["force_mode"]
                print("> Activate force-mode for cleaning!!!!")
        except:
            pass  # force_mode = mode

        self.cleaner_extended = ImageCleaner(  # for energy/score estimation
            config=config["ImageCleaning"]["extended"],
            cameras=cams_and_foclens.keys(),
            mode=force_mode,
        )

        # Image book keeping
        self.image_cutflow = image_cutflow or CutFlow("ImageCutFlow")

        # Add quality cuts on images
        charge_bounds = config["ImageSelection"]["charge"]
        npix_bounds = config["ImageSelection"]["pixel"]
        ellipticity_bounds = config["ImageSelection"]["ellipticity"]
        nominal_distance_bounds = config["ImageSelection"]["nominal_distance"]

        if debug:
            camera_radius(cams_and_foclens,
                          "all")  # Display all registered camera radii

        # Radii in meters from CTA-MARS
        # self.camera_radius = {
        #     cam_id: camera_radius(cams_and_foclens, cam_id)
        #     for cam_id in cams_and_foclens.keys()
        # }

        # Radii in degrees from CTA-MARS
        self.camera_radius = {
            cam_id: CTAMARS_radii(cam_id)
            for cam_id in cams_and_foclens.keys()
        }

        self.image_cutflow.set_cuts(
            OrderedDict([
                ("noCuts", None),
                ("min pixel", lambda s: np.count_nonzero(s) < npix_bounds[0]),
                ("min charge", lambda x: x < charge_bounds[0]),
                (
                    "poor moments",
                    lambda m: m.width <= 0 or m.length <= 0 or np.isnan(
                        m.width) or np.isnan(m.length),
                ),
                (
                    "bad ellipticity",
                    lambda m: (m.width / m.length) < ellipticity_bounds[0] or
                    (m.width / m.length) > ellipticity_bounds[-1],
                ),
                (
                    "close to the edge",
                    lambda m, cam_id: m.r.value >
                    (nominal_distance_bounds[-1] * self.camera_radius[cam_id]),
                ),  # in meter
            ]))

        # Configuration for the camera calibrator

        cfg = Config()

        extractor = TwoPassWindowSum(config=cfg, subarray=subarray)
        # Get the name of the image extractor in order to adapt some options
        # specific to TwoPassWindowSum later on
        self.extractorName = list(extractor.get_current_config().items())[0][0]

        self.calib = CameraCalibrator(
            config=cfg,
            image_extractor=extractor,
            subarray=subarray,
        )

        # Reconstruction
        self.shower_reco = MyHillasReconstructor()

        # Event book keeping
        self.event_cutflow = event_cutflow or CutFlow("EventCutFlow")

        # Add cuts on events
        self.min_ntel = config["Reconstruction"]["min_tel"]
        self.event_cutflow.set_cuts(
            OrderedDict([
                ("noCuts", None),
                ("min2Tels trig", lambda x: x < self.min_ntel),
                ("min2Tels reco", lambda x: x < self.min_ntel),
                ("direction nan", lambda x: x.is_valid is False),
            ]))
Ejemplo n.º 38
0
def autoipython(funcs):
    # Use IPython in combination with AUTO
    # First import the shell class
    ipython011 = False
    ipython1x = False
    try:
        from IPython.terminal.prompts import Prompts, Token

        class MyPrompt(Prompts):
            def in_prompt_tokens(self, cli=None):
                return [(Token.Prompt, 'AUTO In ['),
                        (Token.PromptNum, str(self.shell.execution_count)),
                        (Token.Prompt, ']: ')]

            def continuation_prompt_tokens(self, cli=None, width=None):
                if width is None: width = self._width()
                return [(Token.Prompt, 'AUTO    ' + (width - 10) * '.' + ': ')]

            def out_prompt_tokens(self, cli=None):
                return [(Token.OutPrompt, 'Out['),
                        (Token.OutPromptNum, str(self.shell.execution_count)),
                        (Token.OutPrompt, ']: ')]

        ipython5x = True
    except ImportError:
        ipython5x = False

    try:  # Check for ipython >=1.0
        from IPython import start_ipython
        ipython1x = True
    except ImportError:
        try:
            import IPython.Shell
        except ImportError:
            try:  # Check for ipython >= 0.11
                from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
                ipython011 = True
            except ImportError:
                print("Sorry, ipython is not available on this system.")
                return

    if ipython011 or ipython1x:
        if ipython1x:
            from traitlets.config import Config
        else:
            from IPython.config.loader import Config
        cfg = Config()
        if ipython5x:
            cfg.TerminalInteractiveShell.prompts_class = MyPrompt
        else:
            cfg.PromptManager.in_template = "AUTO In [\\#]: "
            cfg.PromptManager.in2_template = "AUTO    .\\D.: "
            cfg.PromptManager.out_template = "Out[\#]: "
        cfg.InteractiveShell.confirm_exit = False
        cfg.InteractiveShell.autocall = 2
        cfg.InteractiveShell.banner2 = """
Welcome to the AUTO IPython CLUI
man     -> List of AUTO CLUI commands"""

        if ipython1x:
            sys.exit(start_ipython(user_ns=funcs, config=cfg))
            return
        ipshell = TerminalInteractiveShell(config=cfg, user_ns=funcs)
        ipshell.show_banner()
        ipshell.mainloop()
        return

    import IPython
    from IPython.iplib import InteractiveShell

    # Now create an instance of the embeddable shell. The first argument is a
    # string with options exactly as you would type them if you were starting
    # IPython at the system command line. Any parameters you want to define for
    # configuration can thus be specified here.

    args = [
        '-pi1', 'AUTO In [\\#]: ', '-pi2', 'AUTO    .\\D.: ', '-po',
        'Out[\#]: ', '-noconfirm_exit', '-autocall', '2'
    ]

    banner = [
        'Python %s\n'
        'Type "copyright", "credits" or "license" '
        'for more information.\n' % (sys.version.split('\n')[0], ),
        "IPython %s -- An enhanced Interactive Python." %
        (IPython.Release.version, ),
        """?       -> Introduction to IPython's features.
%magic  -> Information about IPython's 'magic' % functions.
help    -> Python's own help system.
object? -> Details about 'object'. ?object also works, ?? prints more.

Welcome to the AUTO IPython CLUI
man     -> List of AUTO CLUI commands"""
    ]

    class AUTOInteractiveShell(AUTOInteractive, InteractiveShell):
        def __init__(self, name, **kw):
            self.parentclass = InteractiveShell
            AUTOInteractive.__init__(self, kw["user_ns"], None)
            InteractiveShell.__init__(self, name, **kw)

        def prefilter(self, line, continue_prompt):
            if not continue_prompt:
                line = self.processShorthand(line)
            line = InteractiveShell.prefilter(self, line, continue_prompt)
            return line

    ipshell = IPython.Shell.IPShell(args,
                                    user_ns=funcs,
                                    user_global_ns=funcs,
                                    shell_class=AUTOInteractiveShell)
    ipshell.IP.user_ns['help'] = ipshell.IP.help
    ipshell.mainloop(banner='\n'.join(banner))
Ejemplo n.º 39
0
class MetadataHandlerTest(MetadataTestBase):
    """Test Metadata REST API"""
    config = Config({'NotebookApp': {"nbserver_extensions": {"elyra": True}}})

    def setUp(self):
        # The _dir names here are fixtures that should be referenced by the appropriate
        # test methods once transition to jupyter_server occurs.
        self.namespace_location = os.path.join(self.data_dir, 'metadata',
                                               METADATA_TEST_NAMESPACE)
        self.bogus_location = os.path.join(self.data_dir, 'metadata', 'bogus')

        create_json_file(self.namespace_location, 'valid.json',
                         valid_metadata_json)
        create_json_file(self.namespace_location, 'another.json',
                         another_metadata_json)
        create_json_file(self.namespace_location, 'invalid.json',
                         invalid_metadata_json)

    def test_bogus_namespace(self):
        # Validate missing is not found

        # Remove self.request (and other 'self.' prefixes) once transition to jupyter_server occurs
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  'bogus',
                  'missing',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 400
        assert "Namespace 'bogus' is not in the list of valid namespaces:" in r.text
        assert not os.path.exists(self.bogus_location)

    def test_missing_instance(self):
        # Validate missing is not found
        name = 'missing'
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  name,
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 404
        assert "No such instance named '{}' was found in the {} namespace.".\
               format(name, METADATA_TEST_NAMESPACE) in r.text

    def test_invalid_instance(self):
        # Validate invalid throws 404 with validation message
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'invalid',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 400
        assert "Validation failed for instance 'invalid'" in r.text

    def test_valid_instance(self):
        # Ensure valid metadata can be found
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'valid',
                  base_url=self.base_url(),
                  headers=self.auth_headers())

        assert r.status_code == 200
        metadata = r.json()
        assert 'schema_name' in metadata
        assert metadata['display_name'] == 'valid metadata instance'

    def test_get_instances(self):
        # Ensure all valid metadata can be found
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        metadata = r.json()
        assert isinstance(metadata, dict)
        assert len(metadata) == 1
        instances = metadata[METADATA_TEST_NAMESPACE]
        assert len(instances) == 2
        assert isinstance(instances, list)
        assert get_instance(instances, 'name', 'another')
        assert get_instance(instances, 'name', 'valid')

    def test_get_empty_namespace_instances(self):
        # Delete the metadata dir contents and attempt listing metadata
        shutil.rmtree(self.namespace_location)
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        metadata = r.json()
        assert isinstance(metadata, dict)
        assert len(metadata) == 1
        instances = metadata[METADATA_TEST_NAMESPACE]
        assert len(instances) == 0

        # Now create empty namespace
        os.makedirs(self.namespace_location)
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        metadata = r.json()
        assert isinstance(metadata, dict)
        assert len(metadata) == 1
        instances = metadata[METADATA_TEST_NAMESPACE]
        assert len(instances) == 0
Ejemplo n.º 40
0
    def __init__(self, course_dir=None, auto=False) -> 'Course':
        """Initialize a course from a config file. 
    :param course_dir: The directory your course. If none, defaults to current working directory. 
    :type course_dir: str
    :param auto: Suppress all prompts, automatically answering yes.
    :type auto: bool

    :returns: A Course object for performing operations on an entire course at once.
    :rtype: Course
    """

        #=======================================#
        #     Working Directory & Git Sync      #
        #=======================================#

        # Set up the working directory. If no course_dir has been specified, then it
        # is assumed that this is the course directory.
        self.working_directory = course_dir if course_dir is not None else os.getcwd(
        )

        repo = Repo(self.working_directory)

        # Before we do ANYTHING, make sure our working directory is clean with no
        # untracked files! Unless we're running a automated job, in which case we
        # don't want to fail for an unexpected reason.
        if (repo.is_dirty() or repo.untracked_files) and (not auto):
            continue_with_dirty = input("""
        Your repository is currently in a dirty state (modifications or
        untracked changes are present). We strongly suggest that you resolve
        these before proceeding. Continue? [y/n]:""")
            # if they didn't say no, exit
            if continue_with_dirty.lower() != 'y':
                sys.exit("Exiting...")

        # PRINT BANNER
        print(
            AsciiTable([['Initializing Course and Pulling Instructors Repo']
                        ]).table)

        # pull the latest copy of the repo
        utils.pull_repo(repo_dir=self.working_directory)

        # Make sure we're running our nbgrader commands within our instructors repo.
        # this will contain our gradebook database, our source directory, and other
        # things.
        config = Config()
        config.CourseDirectory.root = self.working_directory

        #=======================================#
        #              Load Config              #
        #=======================================#

        # Check for an nbgrader config file...
        if not os.path.exists(
                os.path.join(self.working_directory, 'nbgrader_config.py')):
            # if there isn't one, make sure there's at least a rudaux config file
            if not os.path.exists(
                    os.path.join(self.working_directory, 'rudaux_config.py')):
                sys.exit("""
          You do not have nbgrader_config.py or rudaux_config.py in your current
          directory. We need at least one of these to set up your course
          parameters! You can specify a directory with the course_dir argument
          if you wish.
          """)

        # use the traitlets Application class directly to load nbgrader config file.
        # reference:
        # https://github.com/jupyter/nbgrader/blob/41f52873c690af716c796a6003d861e493d45fea/nbgrader/server_extensions/validate_assignment/handlers.py#L35-L37

        # ._load_config_files() returns a generator, so if the config is missing,
        # the generator will act similarly to an empty array

        # load rudaux_config if it exists, otherwise just bring in nbgrader_config.
        for rudaux_config in Application._load_config_files(
                'rudaux_config', path=self.working_directory):
            config.merge(rudaux_config)

        for nbgrader_config in Application._load_config_files(
                'nbgrader_config', path=self.working_directory):
            config.merge(nbgrader_config)

        #=======================================#
        #           Set Config Params           #
        #=======================================#

        ## NBGRADER PARAMS

        # If the user set the exchange, perform home user expansion if necessary
        if config.get('Exchange', {}).get('root') is not None:
            # perform home user expansion. Should not throw an error, but may
            try:
                # expand home user in-place
                config['Exchange']['root'] = os.path.expanduser(
                    config['Exchange']['root'])
            except:
                pass

        ## CANVAS PARAMS

        # Before we continue, make sure we have all of the necessary parameters.
        self.course_id = config.get('Canvas', {}).get('course_id')
        self.canvas_url = config.get('Canvas', {}).get('canvas_url')
        self.external_tool_name = config.get('Canvas',
                                             {}).get('external_tool_name')
        self.external_tool_level = config.get('Canvas',
                                              {}).get('external_tool_level')
        # The canvas url should have no trailing slash
        self.canvas_url = re.sub(r"/$", "", self.canvas_url)

        ## GITHUB PARAMS
        self.stu_repo_url = config.get('GitHub', {}).get('stu_repo_url', '')
        self.assignment_release_path = config.get(
            'GitHub', {}).get('assignment_release_path')
        self.ins_repo_url = config.get('GitHub', {}).get('ins_repo_url')
        # subpath not currently supported
        # self.ins_dir_subpath = config.get('GitHub').get('ins_dir_subpath')

        ## JUPYTERHUB PARAMS

        self.hub_url = config.get('JupyterHub', {}).get('hub_url')
        # The hub url should have no trailing slash
        self.hub_url = re.sub(r"/$", "", self.hub_url)
        # Get Storage directory & type
        self.storage_path = config.get('JupyterHub', {}).get('storage_path', )
        self.zfs = config.get('JupyterHub',
                              {}).get('zfs')  # Optional, default is false!
        self.zfs_regex = config.get('JupyterHub',
                                    {}).get('zfs_regex')  # default is false!
        self.zfs_datetime_pattern = config.get('JupyterHub', {}).get(
            'zfs_datetime_pattern')  # default is false!
        # Note hub_prefix, not base_url, to avoid any ambiguity
        self.hub_prefix = config.get('JupyterHub', {}).get('base_url')
        # If prefix was set, make sure it has no trailing slash, but a preceding
        # slash
        if self.hub_prefix is not None:
            self.hub_prefix = re.sub(r"/$", "", self.hub_prefix)
            if re.search(r"^/", self.hub_prefix) is None:
                self.hub_prefix = fr"/{self.hub_prefix}"

        ## COURSE PARAMS

        self.grading_image = config.get('Course', {}).get('grading_image')
        self.tmp_dir = config.get('Course', {}).get('tmp_dir')
        assignment_list = config.get('Course', {}).get('assignments')

        self.course_timezone = config.get('Course', {}).get('timezone')
        self.system_timezone = pendulum.now(tz='local').timezone.name

        ## Repurpose the rest of the params for later batches
        ## (Hang onto them in case we need something)

        self._full_config = config

        #=======================================#
        #        Validate URLs (Slightly)       #
        #=======================================#

        urls = {
            'JupyterHub.hub_url': self.hub_url,
            'Canvas.canvas_url': self.canvas_url
        }

        for key, value in urls.items():
            if re.search(r"^https{0,1}", value) is None:
                sys.exit(f"""
          You must specify the scheme (e.g. https://) for all URLs.
          You are missing the scheme in "{key}":
          {value}
          """)
            if re.search(r".git$", value) is not None:
                sys.exit(f"""
          Please do not use .git-appended URLs. 
          You have used a .git url in "{key}":
          {value}
          """)

        #=======================================#
        #       Check For Required Params       #
        #=======================================#

        # Finally, before we continue, make sure all of our required parameters were
        # specified in the config file(s)
        required_params = {
            "Canvas.course_id": self.course_id,
            "Canvas.canvas_url": self.canvas_url,
            "GitHub.stu_repo_url": self.stu_repo_url,
            "GitHub.ins_repo_url": self.ins_repo_url,
            "JupyterHub.hub_url": self.hub_url,
            "Course.assignments": assignment_list
        }

        # If any are none...
        if None in required_params.values():
            # Figure out which ones are none and let the user know.
            for key, value in required_params.items():
                if value is None:
                    print(f"    \"{key}\" is missing.")
            sys.exit(
                'Please make sure you have specified all required parameters in your config file.'
            )

        #=======================================#
        #       Check For Optional Params       #
        #=======================================#

        # Now look for all of our optional parameters. If any are missing, let the
        # user know we'll be using the default.
        optional_params = {
            "assignment_release_path": {
                "value": self.assignment_release_path,
                "default": 'materials',
                "config_name": "GitHub.assignment_release_path"
            },
            # "assignment_source_path": {
            #   "value": self.assignment_source_path,
            #   "default": "source",
            #   "config_name": "c.GitHub.assignment_source_path"
            # },
            "hub_prefix": {
                "value": self.hub_prefix,
                "default": "",
                "config_name": "JupyterHub.base_url"
            },
            "zfs": {
                "value": self.zfs,
                "default": False,
                "config_name": "JupyterHub.zfs"
            },
            "zfs_regex": {
                "value": self.zfs_regex,
                "default": r'\d{4}-\d{2}-\d{2}-\d{4}',
                "config_name": "JupyterHub.zfs_regex"
            },
            "zfs_datetime_pattern": {
                "value": self.zfs_datetime_pattern,
                "default": 'YYYY-MM-DD-HHmm',
                "config_name": "JupyterHub.zfs_datetime_pattern"
            },
            "course_timezone": {
                "value": self.course_timezone,
                "default": 'US/Pacific',
                "config_name": "Course.timezone"
            },
            "grading_image": {
                "value": self.grading_image,
                "default": 'ubcdsci/r-dsci-grading',
                "config_name": "Course.grading_image"
            },
            "tmp_dir": {
                "value": self.tmp_dir,
                "default": os.path.join(Path.home(), 'tmp'),
                "config_name": "Course.tmp_dir"
            },
            "external_tool_name": {
                "value": self.external_tool_name,
                "default": 'Jupyter',
                "config_name": "Canvas.external_tool_name"
            },
            "external_tool_level": {
                "value": self.external_tool_level,
                "default": 'course',
                "config_name": "Canvas.external_tool_level"
            }
        }

        for key, param in optional_params.items():
            if param.get('value') is None:
                setattr(self, key, param.get('default'))
                print(
                    f"    \"{param.get('config_name')}\" is missing, using default parameter of \"{getattr(self, key)}\""
                )

        # Make sure no preceding or trailing slashes in assignment release path
        self.assignment_release_path = re.sub(r"/$", "",
                                              self.assignment_release_path)
        self.assignment_release_path = re.sub(r"^/", "",
                                              self.assignment_release_path)

        # Since we are using the student repo URL for the Launch URLs
        # (i.e. telling nbgitpuller where to find the notebook),
        # if the user provided an SSH url, we need the https version as well.
        self.stu_launch_url = utils.generate_git_urls(
            self.stu_repo_url).get('plain_https')

        #! this is cheating a bit, but we can get the repo name this way
        #! Fix me in the future
        self.ins_repo_name = os.path.split(
            utils.generate_git_urls(self.ins_repo_url).get('plain_https'))[1]
        self.stu_repo_name = os.path.split(self.stu_launch_url)[1]

        #=======================================#
        #           Set Canvas Token            #
        #=======================================#

        canvas_token_name = config.get('Canvas').get('token_name')

        if canvas_token_name is None:
            print("Searching for default Canvas token, CANVAS_TOKEN...")
            canvas_token_name = 'CANVAS_TOKEN'

        self.canvas_token = self._get_token(canvas_token_name)

        #=======================================#
        #        Finalize Setting Params        #
        #=======================================#

        # set up the nbgrader api with our merged config files
        self.nb_api = NbGraderAPI(config=config)

        # assign init params to object
        # self.canvas_token = self._get_token(canvas_token_name)
        # self.course = self._get_course()

        # Set crontab
        # self.cron = CronTab(user=True)
        # We need to use the system crontab because we'll be making ZFS snapshots
        # which requires elevated permissions
        self.cron = CronTab(user=True)

        #=======================================#
        #        Instantiate Assignments        #
        #=======================================#

        # Subclass assignment for this course:
        class CourseAssignment(rudaux.Assignment):
            course = self

        instantiated_assignments = []

        for _assignment in assignment_list:
            assignment = CourseAssignment(
                name=_assignment.get('name'),
                duedate=_assignment.get('duedate'),
                duetime=_assignment.get(
                    'duetime', '23:59:59'),  # default is 1 sec to midnight
                points=_assignment.get('points', 0),  # default is zero points
                manual=_assignment.get('manual',
                                       False),  # default is no manual grading
            )
            instantiated_assignments.append(assignment)

        self.assignments = instantiated_assignments
Ejemplo n.º 41
0
def create_shell(console, manage_dict=None, extra_vars=None, exit_hooks=None):
    """Creates the shell"""
    manage_dict = manage_dict or MANAGE_DICT
    _vars = globals()
    _vars.update(locals())
    auto_imported = import_objects(manage_dict)
    if extra_vars:
        auto_imported.update(extra_vars)
    _vars.update(auto_imported)
    msgs = []
    if manage_dict['shell']['banner']['enabled']:
        msgs.append(
            manage_dict['shell']['banner']['message'].format(**manage_dict)
        )
    if auto_imported and manage_dict['shell']['auto_import']['display']:
        auto_imported_names = [
            key for key in auto_imported.keys()
            if key not in ['__builtins__', 'builtins']
        ]
        msgs.append('\tAuto imported: {0}\n'.format(auto_imported_names))

    banner_msg = u'\n'.join(msgs)

    exec_init(manage_dict, _vars)
    exec_init_script(manage_dict, _vars)

    atexit_functions = [
        import_string(func_name) for func_name in
        manage_dict['shell'].get('exit_hooks', [])
    ]
    atexit_functions += exit_hooks or []
    for atexit_function in atexit_functions:
        atexit.register(atexit_function)

    if console == 'ptpython':
        try:
            from ptpython.repl import embed
            embed({}, _vars)
        except ImportError:
            click.echo("ptpython is not installed!")
        return

    if console == 'bpython':
        try:
            from bpython import embed
            embed(locals_=_vars, banner=banner_msg)
        except ImportError:
            click.echo("bpython is not installed!")
        return

    try:
        if console == 'ipython':
            from IPython import start_ipython
            from traitlets.config import Config
            c = Config()
            c.TerminalInteractiveShell.banner2 = banner_msg
            c.InteractiveShellApp.extensions = [
                extension for extension in
                manage_dict['shell'].get('ipython_extensions', [])
            ]
            c.InteractiveShellApp.exec_lines = [
                exec_line for exec_line in
                manage_dict['shell'].get('ipython_exec_lines', [])
            ]
            if manage_dict['shell'].get('ipython_auto_reload', True) is True:
                c.InteractiveShellApp.extensions.append('autoreload')
                c.InteractiveShellApp.exec_lines.append('%autoreload 2')
            start_ipython(argv=[], user_ns=_vars, config=c)
        else:
            raise ImportError
    except ImportError:
        if manage_dict['shell']['readline_enabled']:
            import readline
            import rlcompleter
            readline.set_completer(rlcompleter.Completer(_vars).complete)
            readline.parse_and_bind('tab: complete')
        shell = code.InteractiveConsole(_vars)
        shell.interact(banner=banner_msg)
Ejemplo n.º 42
0
def plot_pedestals(data_file,
                   pedestal_file,
                   run=0,
                   plot_file=None,
                   tel_id=1,
                   offset_value=400,
                   sample_size=1000):
    """
    plot pedestal quantities quantities

    Parameters
    ----------
    data_file:   pedestal run

    pedestal_file:   file with drs4 corrections

    run: run number of data to be corrected

    plot_file:  name of output pdf file

    tel_id: id of the telescope

    offset_value: baseline off_set
    """

    config = {
        "LSTEventSource": {
            "pointing_information": False,
            "allowed_tels": [1],
            "LSTR0Corrections": {
                "drs4_pedestal_path": pedestal_file,
            },
        }
    }
    # event_reader
    reader = EventSource(data_file, config=Config(config), max_events=None)
    t = np.linspace(2, 37, 36)

    # configuration for the charge integrator
    charge_config = Config({
        "FixedWindowSum": {
            "window_shift": 6,
            "window_width": 12,
            "peak_index": 18,
        }
    })
    # declare the pedestal component
    pedestal = PedestalIntegrator(
        tel_id=tel_id,
        time_sampling_correction_path=None,
        sample_size=sample_size,
        sample_duration=1000000,
        charge_median_cut_outliers=[-10, 10],
        charge_std_cut_outliers=[-10, 10],
        charge_product="FixedWindowSum",
        config=charge_config,
        subarray=reader.subarray,
    )

    for i, event in enumerate(reader):
        if tel_id != event.trigger.tels_with_trigger[0]:
            raise Exception(
                f"Given wrong telescope id {tel_id}, files has id {event.trigger.tels_with_trigger[0]}"
            )

        are_pedestals_calculated = pedestal.calculate_pedestals(event)
        if are_pedestals_calculated:
            ped_data = event.mon.tel[tel_id].pedestal
            break

    camera_geometry = reader.subarray.tels[tel_id].camera.geometry
    camera_geometry = camera_geometry.transform_to(EngineeringCameraFrame())

    if are_pedestals_calculated and plot_file is not None:
        with PdfPages(plot_file) as pdf:

            plt.rc("font", size=15)

            # first figure
            fig = plt.figure(1, figsize=(12, 24))
            plt.tight_layout()
            n_samples = charge_config["FixedWindowSum"]["window_width"]
            fig.suptitle(f"Run {run}, integration on {n_samples} samples",
                         fontsize=25)
            pad = 420

            image = ped_data.charge_median
            mask = ped_data.charge_median_outliers
            for chan in np.arange(2):
                pad += 1
                plt.subplot(pad)
                plt.tight_layout()
                disp = CameraDisplay(camera_geometry)
                mymin = np.median(image[chan]) - 2 * np.std(image[chan])
                mymax = np.median(image[chan]) + 2 * np.std(image[chan])
                disp.set_limits_minmax(mymin, mymax)
                disp.highlight_pixels(mask[chan], linewidth=2)
                disp.image = image[chan]
                disp.cmap = plt.cm.coolwarm
                # disp.axes.text(lposx, 0, f'{channel[chan]} pedestal [ADC]', rotation=90)
                plt.title(f"{channel[chan]} pedestal [ADC]")
                disp.add_colorbar()

            image = ped_data.charge_std
            mask = ped_data.charge_std_outliers
            for chan in np.arange(2):
                pad += 1
                plt.subplot(pad)
                plt.tight_layout()
                disp = CameraDisplay(camera_geometry)
                mymin = np.median(image[chan]) - 2 * np.std(image[chan])
                mymax = np.median(image[chan]) + 2 * np.std(image[chan])
                disp.set_limits_minmax(mymin, mymax)
                disp.highlight_pixels(mask[chan], linewidth=2)
                disp.image = image[chan]
                disp.cmap = plt.cm.coolwarm
                # disp.axes.text(lposx, 0, f'{channel[chan]} pedestal std [ADC]', rotation=90)
                plt.title(f"{channel[chan]} pedestal std [ADC]")
                disp.add_colorbar()

            #  histograms
            for chan in np.arange(2):
                mean_ped = ped_data.charge_mean[chan]
                ped_std = ped_data.charge_std[chan]

                # select good pixels
                select = np.logical_not(mask[chan])

                # fig.suptitle(f"Run {run} channel: {channel[chan]}", fontsize=25)
                pad += 1
                # pedestal charge
                plt.subplot(pad)
                plt.tight_layout()
                plt.ylabel("pixels")
                plt.xlabel(f"{channel[chan]} pedestal")
                median = np.median(mean_ped[select])
                rms = np.std(mean_ped[select])
                label = f"{channel[chan]} Median {median:3.2f}, std {rms:3.2f}"
                plt.hist(mean_ped[select], bins=50, label=label)
                plt.legend()
                pad += 1
                # pedestal std
                plt.subplot(pad)
                plt.ylabel("pixels")
                plt.xlabel(f"{channel[chan]} pedestal std")
                median = np.median(ped_std[select])
                rms = np.std(ped_std[select])
                label = f" Median {median:3.2f}, std {rms:3.2f}"
                plt.hist(ped_std[select], bins=50, label=label)
                plt.legend()

            plt.subplots_adjust(top=0.94, bottom=0.04, right=0.96)

            pdf.savefig()
            plt.close()

            # event_reader
            # reader = EventSource(data_file, config=Config(config), max_events=1000)

            pix = 0
            pad = 420
            # plot corrected waveforms of first 8 events
            for i, ev in enumerate(reader):
                for chan in np.arange(2):

                    if pad == 420:
                        # new figure

                        fig = plt.figure(ev.index.event_id * 1000,
                                         figsize=(12, 24))
                        fig.suptitle(f"Run {run}, pixel {pix}", fontsize=25)
                        plt.tight_layout()
                    pad += 1
                    plt.subplot(pad)

                    # remove samples at beginning / end of waveform
                    start = reader.r0_r1_calibrator.r1_sample_start.tel[tel_id]
                    end = reader.r0_r1_calibrator.r1_sample_end.tel[tel_id]

                    plt.subplots_adjust(top=0.92)
                    label = f"event {ev.index.event_id}, {channel[chan]}: R0"
                    plt.step(
                        t,
                        ev.r0.tel[tel_id].waveform[chan, pix, start:end],
                        color="blue",
                        label=label,
                    )

                    label = "baseline correction \n + dt corr + corrected spikes"

                    plt.step(
                        t,
                        ev.r1.tel[tel_id].waveform[chan, pix] + offset_value,
                        alpha=0.5,
                        color="green",
                        label=label,
                    )
                    plt.plot([0, 40], [offset_value, offset_value],
                             "k--",
                             label="offset")
                    plt.xlabel("time sample [ns]")
                    plt.ylabel("counts [ADC]")
                    plt.legend()
                    plt.ylim(200, 600)

                if pad == 428:
                    pad = 420
                    plt.subplots_adjust(top=0.92)
                    pdf.savefig()
                    plt.close()

                if i == 8:
                    break

    elif not are_pedestals_calculated:
        log.error(
            "Not able to calculate pedestals or output pdf file not especified."
        )

    elif plot_file is None:
        log.warning("Not PDF outputfile specified.")
Ejemplo n.º 43
0
from mistletoe import markdown
from mistletoe.base_renderer import BaseRenderer
from pygments import highlight
from pygments.formatters import TerminalTrueColorFormatter
from pygments.formatters.terminal256 import EscapeSequence
from pygments.lexers import Python3Lexer, get_lexer_by_name
from pygments.style import Style
from pygments.token import Token
from pygments.util import ClassNotFound
from traitlets.config import Config

FILE = sys.argv[0]
NAME = os.path.basename(FILE)
WIDTH, _ = get_terminal_size()

CONFIG = Config()
CONFIG.TerminalInteractiveShell.confirm_exit = False
CONFIG.TerminalIPythonApp.display_banner = False

BASHRC_TEMPLATE = """
    source ~/.bashrc
    history -r
    HISTFILE={histfile}
    {extrarc_lines}
"""

SYSTEMD_KEYWORDS = set("minutely hourly daily oneshot exec notify".split())


def bash(history: List[str] = None, init: List[str] = None) -> None:
    if history is None:
Ejemplo n.º 44
0
 def test_remote_profile_dir(self):
     cfg = Config()
     launcher_cfg = getattr(cfg, self.launcher_class.__name__)
     launcher_cfg.remote_profile_dir = "foo"
     launcher = self.build_launcher(config=cfg)
     self.assertEqual(launcher.remote_profile_dir, "foo")
Ejemplo n.º 45
0
class TestPathFileTemplate(ServerTest):

    config = Config(
        {
            "NotebookApp": {"nbserver_extensions": {"jupyter_project": True}},
            "JupyterProject": {
                "file_templates": [
                    {
                        "name": "template1",
                        "location": str(Path(template_folder.name) / "file_templates"),
                        "files": [{"template": "file1.py"}, {"template": "file2.html"}],
                    },
                    {
                        "name": "template2",
                        "module": "my_package",
                        "location": "my_templates",
                        "files": [{"template": "file1.py"}],
                    },
                    {
                        "name": "template3",
                        "module": "my_package.sub",
                        "location": "templates",
                        "files": [{"template": "file1.py"}],
                    },
                ]
            },
        }
    )

    @classmethod
    def setup_class(cls):
        # Given
        folder = Path(template_folder.name) / "file_templates"
        folder.mkdir(exist_ok=True, parents=True)
        file1 = folder / "file1.py"
        file1.write_text("def add(a, b):\n    return a + b\n")
        file2 = folder / "file2.html"
        file2.write_text(
            """<!doctype html>
<html lang="en">
<head>
  <title>HTML</title>
</head>
<body>
</body>
</html>"""
        )

        folder = Path(template_folder.name) / "file_templates" / "my_package"
        folder.mkdir(exist_ok=True, parents=True)
        sys.path.insert(0, str(folder.parent))
        folder1 = folder / "my_templates"
        folder2 = folder / "sub" / "templates"

        for folder in (folder1, folder2):
            folder.mkdir(exist_ok=True, parents=True)
            init = folder.parent / "__init__.py"
            init.write_bytes(b"")
            file1 = folder / "file1.py"
            file1.write_text("def add(a, b):\n    return a + b\n")
        super().setup_class()

    @classmethod
    def teardown_class(cls):
        super().teardown_class()
        sys.path.remove(str(Path(template_folder.name) / "file_templates"))
        template_folder.cleanup()

    @mock.patch("jupyter_project.handlers.Template")
    @mock.patch("jinja2.Template.render")
    def test_template1_file1(self, renderer, default_name):
        instance = default_name.return_value
        name = str(uuid.uuid4())
        instance.render.return_value = name
        renderer.return_value = "dummy content"
        path = generate_path()
        body = dict(dummy="hello", smart="world")

        answer = self.api_tester.post(
            ["files", quote("template1/file1", safe=""), path], body=body
        )
        assert answer.status_code == 201

        instance.render.assert_called_with(**body)
        renderer.assert_called_with(**body)
        model = answer.json()
        assert model["content"] is None
        assert model["name"] == name + ".py"
        assert model["path"] == url_path_join(path, name + ".py")

    @mock.patch("jupyter_project.handlers.Template")
    @mock.patch("jinja2.Template.render")
    def test_template1_file2(self, renderer, default_name):
        instance = default_name.return_value
        name = str(uuid.uuid4())
        instance.render.return_value = name
        renderer.return_value = "dummy content"
        path = generate_path()
        body = dict(dummy="hello", smart="world")

        answer = self.api_tester.post(
            ["files", quote("template1/file2", safe=""), path], body=body
        )
        assert answer.status_code == 201

        instance.render.assert_called_with(**body)
        renderer.assert_called_with(**body)
        model = answer.json()
        assert model["content"] is None
        assert model["name"] == name + ".html"
        assert model["path"] == url_path_join(path, name + ".html")

    @mock.patch("jupyter_project.handlers.Template")
    @mock.patch("jinja2.Template.render")
    def test_template2_file1(self, renderer, default_name):
        instance = default_name.return_value
        name = str(uuid.uuid4())
        instance.render.return_value = name
        renderer.return_value = "dummy content"
        path = generate_path()
        body = dict(dummy="hello", smart="world")

        answer = self.api_tester.post(
            ["files", quote("template2/file1", safe=""), path], body=body
        )
        assert answer.status_code == 201

        instance.render.assert_called_with(**body)
        renderer.assert_called_with(**body)
        model = answer.json()
        assert model["content"] is None
        assert model["name"] == name + ".py"
        assert model["path"] == url_path_join(path, name + ".py")

    @mock.patch("jupyter_project.handlers.Template")
    @mock.patch("jinja2.Template.render")
    def test_template3_file1(self, renderer, default_name):
        instance = default_name.return_value
        name = str(uuid.uuid4())
        instance.render.return_value = name
        renderer.return_value = "dummy content"
        path = generate_path()
        body = dict(dummy="hello", smart="world")

        answer = self.api_tester.post(
            ["files", quote("template3/file1", safe=""), path], body=body
        )
        assert answer.status_code == 201

        instance.render.assert_called_with(**body)
        renderer.assert_called_with(**body)
        model = answer.json()
        assert model["content"] is None
        assert model["name"] == name + ".py"
        assert model["path"] == url_path_join(path, name + ".py")

    def test_missing_endpoint(self):
        with assert_http_error(404):
            self.api_tester.post(["files", quote("template4/file", safe="")], body={})

    def test_missing_body(self):
        with assert_http_error(500):
            self.api_tester.post(["files", quote("template3/file1", safe="")])

    @mock.patch("jupyter_project.handlers.Template")
    @mock.patch("jinja2.Template.render")
    def test_fail_name_rendering(self, renderer, default_name):
        instance = default_name.return_value
        instance.render.side_effect = jinja2.TemplateError
        renderer.return_value = "dummy content"
        path = generate_path()
        body = dict(dummy="hello", smart="world")

        answer = self.api_tester.post(
            ["files", quote("template1/file1", safe=""), path], body=body
        )
        assert answer.status_code == 201

        instance.render.assert_called_with(**body)
        renderer.assert_called_with(**body)
        model = answer.json()
        assert model["content"] is None
        print(model["name"])
        assert re.match(r"untitled\d*\.py", model["name"]) is not None
        assert re.match(path + r"/untitled\d*\.py", model["path"]) is not None

    @mock.patch("jupyter_project.handlers.Template")
    @mock.patch("jinja2.Template.render")
    def test_fail_template_rendering(self, renderer, default_name):
        instance = default_name.return_value
        name = str(uuid.uuid4())
        instance.render.return_value = name
        renderer.side_effect = jinja2.TemplateError
        path = generate_path()
        body = dict(dummy="hello", smart="world")

        with assert_http_error(500):
            self.api_tester.post(
                ["files", quote("template1/file1", safe=""), path], body=body
            )
Ejemplo n.º 46
0
 def __init__(self, **kwargs):
     super(RawData,
           self).__init__(config=Config(kwargs.pop('config', None)))
Ejemplo n.º 47
0
    def setup_class(cls):
        cls.tmp_dir = TemporaryDirectory()

        def tmp(*parts):
            path = os.path.join(cls.tmp_dir.name, *parts)
            try:
                os.makedirs(path)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise
            return path

        cls.home_dir = tmp('home')
        cls.data_dir = tmp('data')
        cls.config_dir = tmp('config')
        cls.runtime_dir = tmp('runtime')
        cls.lab_dir = tmp('lab')
        cls.app_settings_dir = tmp('appsettings')
        cls.lab_schemas = tmp('labschemas')
        cls.lab_settings = tmp('labsettings')
        cls.lab_workspaces = tmp('labworkspaces')
        cls.env_patch = patch.dict(
            'os.environ',
            {
                'HOME': cls.home_dir,
                'PYTHONPATH': os.pathsep.join(sys.path),
                'IPYTHONDIR': pjoin(cls.home_dir, '.ipython'),
                'JUPYTER_NO_CONFIG': '1',  # needed in the future
                'JUPYTER_CONFIG_DIR': cls.config_dir,
                'JUPYTER_DATA_DIR': cls.data_dir,
                'JUPYTER_RUNTIME_DIR': cls.runtime_dir,
                'JUPYTERLAB_DIR': cls.lab_dir,
                'JUPYTERLAB_SETTINGS_DIR': cls.lab_settings
            })
        cls.env_patch.start()
        cls.lab_config = LabConfig(app_settings_dir=cls.app_settings_dir,
                                   schemas_dir=cls.lab_schemas,
                                   user_settings_dir=cls.lab_settings,
                                   workspaces_dir=cls.lab_workspaces)
        cls.notebook_dir = tmp('notebooks')
        cls.path_patch = patch.multiple(
            jupyter_core.paths,
            SYSTEM_JUPYTER_PATH=[tmp('share', 'jupyter')],
            ENV_JUPYTER_PATH=[tmp('env', 'share', 'jupyter')],
            SYSTEM_CONFIG_PATH=[tmp('etc', 'jupyter')],
            ENV_CONFIG_PATH=[tmp('env', 'etc', 'jupyter')],
        )
        cls.path_patch.start()

        cls.config = cls.config or Config()
        cls.config.NotebookNotary.db_file = ':memory:'

        cls.token = hexlify(os.urandom(4)).decode('ascii')

        started = Event()

        def start_thread():
            if 'asyncio' in sys.modules:
                import asyncio
                asyncio.set_event_loop(asyncio.new_event_loop())
            app = cls.notebook = cls.Application(app_dir=cls.lab_dir,
                                                 port=cls.port,
                                                 port_retries=0,
                                                 open_browser=False,
                                                 config_dir=cls.config_dir,
                                                 data_dir=cls.data_dir,
                                                 runtime_dir=cls.runtime_dir,
                                                 notebook_dir=cls.notebook_dir,
                                                 base_url=cls.url_prefix,
                                                 config=cls.config,
                                                 allow_root=True,
                                                 token=cls.token,
                                                 lab_config=cls.lab_config)
            # don't register signal handler during tests
            app.init_signal = lambda: None
            # clear log handlers and propagate to root for nose to capture it
            # needs to be redone after initialize, which reconfigures logging
            app.log.propagate = True
            app.log.handlers = []
            app.initialize(argv=[])
            app.log.propagate = True
            app.log.handlers = []
            loop = IOLoop.current()
            loop.add_callback(started.set)
            try:
                app.start()
            finally:
                # set the event, so failure to start doesn't cause a hang
                started.set()
                app.session_manager.close()

        cls.notebook_thread = Thread(target=start_thread)
        cls.notebook_thread.daemon = True
        cls.notebook_thread.start()
        started.wait()
        cls.wait_until_alive()
Ejemplo n.º 48
0
def make_app():
    # NBConvert config
    config = Config()
    config.NbconvertApp.fileext = 'html'
    config.CSSHTMLHeaderTransformer.enabled = False
    # don't strip the files prefix - we use it for redirects
    # config.Exporter.filters = {'strip_files_prefix': lambda s: s}

    # DEBUG env implies both autoreload and log-level
    if os.environ.get("DEBUG"):
        options.debug = True
        logging.getLogger().setLevel(logging.DEBUG)

    # setup memcache
    mc_pool = ThreadPoolExecutor(options.mc_threads)

    # setup formats
    formats = configure_formats(options, config, log.app_log)

    if options.processes:
        pool = ProcessPoolExecutor(options.processes)
    else:
        pool = ThreadPoolExecutor(options.threads)

    memcache_urls = os.environ.get('MEMCACHIER_SERVERS',
                                   os.environ.get('MEMCACHE_SERVERS'))

    # Handle linked Docker containers
    if (os.environ.get('NBCACHE_PORT')):
        tcp_memcache = os.environ.get('NBCACHE_PORT')
        memcache_urls = tcp_memcache.split('tcp://')[1]

    if (os.environ.get('NBINDEX_PORT')):
        log.app_log.info("Indexing notebooks")
        tcp_index = os.environ.get('NBINDEX_PORT')
        index_url = tcp_index.split('tcp://')[1]
        index_host, index_port = index_url.split(":")
        indexer = ElasticSearch(index_host, index_port)
    else:
        log.app_log.info("Not indexing notebooks")
        indexer = NoSearch()

    if options.no_cache:
        log.app_log.info("Not using cache")
        cache = MockCache()
    elif pylibmc and memcache_urls:
        kwargs = dict(pool=mc_pool)
        username = os.environ.get('MEMCACHIER_USERNAME', '')
        password = os.environ.get('MEMCACHIER_PASSWORD', '')
        if username and password:
            kwargs['binary'] = True
            kwargs['username'] = username
            kwargs['password'] = password
            log.app_log.info("Using SASL memcache")
        else:
            log.app_log.info("Using plain memecache")

        cache = AsyncMultipartMemcache(memcache_urls.split(','), **kwargs)
    else:
        log.app_log.info("Using in-memory cache")
        cache = DummyAsyncCache()

    # setup tornado handlers and settings

    template_path = pjoin(here, 'templates')
    static_path = pjoin(here, 'static')
    env = Environment(loader=FileSystemLoader(template_path), autoescape=True)
    env.filters['markdown'] = markdown.markdown
    try:
        git_data = git_info(here)
    except Exception as e:
        app_log.error("Failed to get git info: %s", e)
        git_data = {}
    else:
        git_data['msg'] = escape(git_data['msg'])

    if options.no_cache:
        # force jinja to recompile template every time
        env.globals.update(cache_size=0)
    env.globals.update(
        nrhead=nrhead,
        nrfoot=nrfoot,
        git_data=git_data,
        jupyter_info=jupyter_info(),
        len=len,
    )
    AsyncHTTPClient.configure(HTTPClientClass)
    client = AsyncHTTPClient()

    # load frontpage sections
    with io.open(options.frontpage, 'r') as f:
        frontpage_sections = json.load(f)

    # cache frontpage links for the maximum allowed time
    max_cache_uris = {''}
    for section in frontpage_sections:
        for link in section['links']:
            max_cache_uris.add('/' + link['target'])

    fetch_kwargs = dict(connect_timeout=10, )
    if options.proxy_host:
        fetch_kwargs.update(
            dict(proxy_host=options.proxy_host, proxy_port=options.proxy_port))

        log.app_log.info("Using web proxy {proxy_host}:{proxy_port}."
                         "".format(**fetch_kwargs))

    settings = dict(
        log_function=log_request,
        jinja2_env=env,
        static_path=static_path,
        client=client,
        formats=formats,
        default_format=options.default_format,
        providers=options.providers,
        provider_rewrites=options.provider_rewrites,
        config=config,
        index=indexer,
        cache=cache,
        cache_expiry_min=options.cache_expiry_min,
        cache_expiry_max=options.cache_expiry_max,
        max_cache_uris=max_cache_uris,
        frontpage_sections=frontpage_sections,
        pool=pool,
        gzip=True,
        render_timeout=20,
        localfile_path=os.path.abspath(options.localfiles),
        fetch_kwargs=fetch_kwargs,
        mathjax_url=options.mathjax_url,
    )

    # handle handlers
    handlers = init_handlers(formats, options.providers)

    if options.localfiles:
        log.app_log.warning(
            "Serving local notebooks in %s, this can be a security risk",
            options.localfiles)
        # use absolute or relative paths:
        local_handlers = [(r'/localfile/(.*)', LocalFileHandler)]
        handlers = (local_handlers + format_handlers(formats, local_handlers) +
                    handlers)

    # create the app
    return web.Application(handlers, debug=options.debug, **settings)
Ejemplo n.º 49
0
class MetadataHandlerHierarchyTest(MetadataTestBase):
    """Test Metadata REST API"""
    config = Config({'NotebookApp': {"nbserver_extensions": {"elyra": True}}})

    def setUp(self):
        # The _dir names here are fixtures that should be referenced by the appropriate
        # test methods once transition to jupyter_server occurs.
        self.namespace_location = os.path.join(
            jupyter_core.paths.jupyter_data_dir(), 'metadata',
            METADATA_TEST_NAMESPACE)

        env_path = getattr(jupyter_core.paths, 'ENV_JUPYTER_PATH')
        self.factory_location = os.path.join(env_path[0], 'metadata',
                                             METADATA_TEST_NAMESPACE)

        system_path = getattr(jupyter_core.paths, 'SYSTEM_JUPYTER_PATH')
        self.shared_location = os.path.join(system_path[0], 'metadata',
                                            METADATA_TEST_NAMESPACE)

        byo_instance = copy.deepcopy(byo_metadata_json)
        byo_instance['display_name'] = 'factory'
        create_json_file(self.factory_location, 'byo_1.json', byo_instance)
        create_json_file(self.factory_location, 'byo_2.json', byo_instance)
        create_json_file(self.factory_location, 'byo_3.json', byo_instance)

    def test_get_hierarchy_instances(self):
        # Ensure all valid metadata can be found
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        metadata = r.json()
        assert isinstance(metadata, dict)
        assert len(metadata) == 1
        instances = metadata[METADATA_TEST_NAMESPACE]
        assert len(instances) == 3
        assert isinstance(instances, list)
        assert get_instance(instances, 'name', 'byo_1')
        assert get_instance(instances, 'name', 'byo_2')
        assert get_instance(instances, 'name', 'byo_3')
        byo_3 = get_instance(instances, 'name', 'byo_3')
        assert byo_3['display_name'] == 'factory'

    def test_create_instance(self):
        """Create a simple instance - not conflicting with factory instances. """

        valid = copy.deepcopy(valid_metadata_json)
        valid['name'] = 'valid'
        body = json.dumps(valid)

        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  body=body,
                  method='POST',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 201
        assert r.headers.get('location') == r.request.path_url + '/valid'
        metadata = r.json()
        # Add expected "extra" fields to 'valid' so whole-object comparison is satisfied.
        # These are added during the pre_save() hook on the MockMetadataTest class instance.
        valid['for_update'] = False
        valid['special_property'] = valid['metadata']['required_test']
        assert metadata == valid

    def test_create_hierarchy_instance(self):
        """Attempts to create an instance from one in the hierarchy. """

        byo_instance = copy.deepcopy(byo_metadata_json)
        byo_instance['display_name'] = 'user'
        byo_instance['name'] = 'byo_2'
        body = json.dumps(byo_instance)

        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  body=body,
                  method='POST',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 409
        assert "already exists" in r.text

        # Confirm the instance was not changed
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        metadata = r.json()
        assert isinstance(metadata, dict)
        assert len(metadata) == 1
        instances = metadata[METADATA_TEST_NAMESPACE]
        assert len(instances) == 3
        assert isinstance(instances, list)
        byo_2 = get_instance(instances, 'name', 'byo_2')
        assert byo_2['display_name'] == 'factory'

    def test_create_invalid_instance(self):
        """Create a simple instance - not conflicting with factory instances. """

        invalid = copy.deepcopy(invalid_metadata_json)
        invalid['name'] = 'invalid'
        body = json.dumps(invalid)

        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  body=body,
                  method='POST',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 400
        assert "Validation failed for instance 'invalid'" in r.text

    def test_create_instance_missing_schema(self):
        """Attempt to create an instance using an invalid schema """

        missing_schema = copy.deepcopy(valid_metadata_json)
        missing_schema['name'] = 'missing_schema'
        missing_schema['schema_name'] = 'missing_schema'
        missing_schema.pop('display_name')
        body = json.dumps(missing_schema)

        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  body=body,
                  method='POST',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 404

        # Ensure instance was not created.  Can't use REST here since it will correctly trigger 404
        # even though an instance was created and not removed due to failure to validate (due to
        # missing schema).  Fixed by trapping the FileNotFoundError raised due to no schema.
        assert not os.path.exists(
            os.path.join(self.namespace_location, 'missing_schema.json'))

        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'missing_schema',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 404

    def test_update_non_existent(self):
        """Attempt to update a non-existent instance. """

        # Try to update a non-existent instance - 404 expected...
        valid = copy.deepcopy(valid_metadata_json)
        valid['name'] = 'valid'
        valid['metadata']['number_range_test'] = 7
        body = json.dumps(valid)

        # Update (non-existent) instance
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'valid',
                  body=body,
                  method='PUT',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 404

    def test_update_instance(self):
        """Update a simple instance. """

        # Create an instance, then update
        create_json_file(self.namespace_location, 'valid.json',
                         valid_metadata_json)
        valid = copy.deepcopy(valid_metadata_json)
        valid['name'] = 'valid'
        valid['metadata']['number_range_test'] = 7
        body = json.dumps(valid)

        # Update instance
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'valid',
                  body=body,
                  method='PUT',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        instance = r.json()
        assert instance['metadata']['number_range_test'] == 7

        # Confirm update via fetch
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'valid',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        instance = r.json()
        assert instance['metadata']['number_range_test'] == 7

    def test_invalid_update(self):
        """Update a simple instance with invalid metadata. """

        # Create an instance, then update with invalid metadata
        create_json_file(self.namespace_location, 'update_bad_md.json',
                         valid_metadata_json)

        # Fetch it to get the valid instance
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'update_bad_md',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        instance = r.json()

        # Now attempt the update with bad metadata and ensure previous still exists
        valid2 = copy.deepcopy(valid_metadata_json)
        valid2['name'] = 'valid'
        valid2['metadata']['number_range_test'] = 42
        body2 = json.dumps(valid2)

        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'update_bad_md',
                  body=body2,
                  method='PUT',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 400

        # Fetch again and ensure it matches the previous instance
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'update_bad_md',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        instance2 = r.json()
        assert instance2 == instance

    def test_update_fields(self):

        # Create an instance, then update with a new field
        create_json_file(self.namespace_location, 'update_fields.json',
                         valid_metadata_json)
        valid = copy.deepcopy(valid_metadata_json)
        valid['metadata']['number_range_test'] = 7
        body = json.dumps(valid)

        # Update instance adding number_range_test
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'update_fields',
                  body=body,
                  method='PUT',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        instance = r.json()
        assert instance['metadata']['number_range_test'] == 7

        # Add a new field (per schema) and remove another -
        valid['metadata'].pop('number_range_test')
        valid['metadata']['string_length_test'] = "valid len"
        body = json.dumps(valid)

        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'update_fields',
                  body=body,
                  method='PUT',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        instance = r.json()
        assert instance['metadata']['string_length_test'] == "valid len"
        assert 'number_range_test' not in instance['metadata']

    def test_update_hierarchy_instance(self):
        """Update a simple instance - that's conflicting with factory instances. """

        # Do not name intentionally, since this is an update
        byo_instance = copy.deepcopy(byo_metadata_json)
        byo_instance['display_name'] = 'user'
        byo_instance['metadata']['number_range_test'] = 7
        body = json.dumps(byo_instance)

        # Because this is considered an update, replacement is enabled.
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'byo_2',
                  body=body,
                  method='PUT',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200

        # Confirm the instances and ensure byo_2 is in USER area
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        metadata = r.json()
        assert isinstance(metadata, dict)
        assert len(metadata) == 1
        instances = metadata[METADATA_TEST_NAMESPACE]
        assert len(instances) == 3
        assert isinstance(instances, list)
        byo_2 = get_instance(instances, 'name', 'byo_2')
        assert byo_2['schema_name'] == byo_metadata_json['schema_name']
        assert byo_2['metadata']['number_range_test'] == 7

        # Attempt to rename the resource, exception expected.
        byo_2['name'] = 'byo_2_renamed'
        body = json.dumps(byo_2)

        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'byo_2',
                  body=body,
                  method='PUT',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 400
        assert "The attempt to rename instance" in r.text

        # Confirm no update occurred
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'byo_2',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 200
        instance = r.json()
        assert instance['name'] == 'byo_2'

    def test_delete_instance(self):
        """Create a simple instance - not conflicting with factory instances and delete it. """

        # First, attempt to delete non-existent resource, exception expected.
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'missing',
                  method='DELETE',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 404

        create_json_file(self.namespace_location, 'valid.json',
                         valid_metadata_json)

        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'valid',
                  method='DELETE',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 204
        assert len(r.text) == 0

        # Confirm deletion
        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'valid',
                  method='DELETE',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 404

    def test_delete_hierarchy_instance(self):
        """Create a simple instance - that conflicts with factory instances and delete it only if local. """

        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'byo_2',
                  method='DELETE',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 403

        # create local instance, delete should succeed
        create_json_file(self.namespace_location, 'byo_2.json',
                         byo_metadata_json)

        r = fetch(self.request,
                  'elyra',
                  'metadata',
                  METADATA_TEST_NAMESPACE,
                  'byo_2',
                  method='DELETE',
                  base_url=self.base_url(),
                  headers=self.auth_headers())
        assert r.status_code == 204
        assert len(r.text) == 0
Ejemplo n.º 50
0
from nbconvert.preprocessors import Preprocessor
from nbconvert import HTMLExporter
from traitlets.config import Config
from pathlib import Path

__all__ = ['read_nb', 'convert_nb', 'convert_all']

class HandleLinksPreprocessor(Preprocessor):
    "A preprocesser that replaces all the .ipynb by .html in links. "
    def preprocess_cell(self, cell, resources, index):
        if 'source' in cell and cell.cell_type == "markdown":
            cell.source = re.sub(r"\((.*)\.ipynb(.*)\)",r"(\1.html\2)",cell.source).replace('¶','')

        return cell, resources

exporter = HTMLExporter(Config())
exporter.exclude_input_prompt=True
exporter.exclude_output_prompt=True
#Loads the template to deal with hidden cells.
exporter.template_file = 'jekyll.tpl'
path = Path(__file__).parent
exporter.template_path.append(str(path))
#Preprocesser that converts the .ipynb links in .html
#exporter.register_preprocessor(HandleLinksPreprocessor, enabled=True)

def read_nb(fname):
    "Read the notebook in `fname`."
    with open(fname,'r') as f: return nbformat.reads(f.read(), as_version=4)

def convert_nb(fname, dest_path='.'):
    "Convert a notebook `fname` to html file in `dest_path`."
Ejemplo n.º 51
0
 def default_config(self):
     c = Config({'ExtractOutputPreprocessor':{'enabled':True}})
     c.merge(super(MarkdownExporter,self).default_config)
     return c