def test_build_artifacts_logs_and_failures(self): with pretty_logging(stream=mocks.StringIO()) as stream: self.registry.process_package('app') log = stream.getvalue() self.assertIn( "unable to import the target builder for the entry point " "'not_exist.js = calmjs_testing_dummy:not_exist' from package " "'app 1.0'", log) self.assertIn( "the builder referenced by the entry point " "'bad.js = calmjs_testing_dummy:bad_builder' from package " "'app 1.0' has an incompatible signature", log) # try again using the artifact builder from calmjs.registry import _inst _inst.records.pop('calmjs.artifacts', None) self.addCleanup(_inst.records.pop, 'calmjs.artifacts') _inst.records['calmjs.artifacts'] = self.registry builder = ArtifactBuilder('calmjs.artifacts') with pretty_logging(stream=mocks.StringIO()) as stream: self.assertFalse(builder(['app'])) log = stream.getvalue() self.assertIn( "unable to import the target builder for the entry point " "'not_exist.js = calmjs_testing_dummy:not_exist' from package " "'app 1.0'", log)
def test_set_node_path(self): stub_mod_call(self, cli) stub_base_which(self) node_path = mkdtemp(self) driver = cli.PackageManagerDriver(node_path=node_path, pkg_manager_bin='mgr') # ensure env is passed into the call. with pretty_logging(stream=mocks.StringIO()): driver.pkg_manager_install() self.assertEqual(self.call_args, ((['mgr', 'install'], ), { 'env': finalize_env({'NODE_PATH': node_path}), })) # will be overridden by instance settings. with pretty_logging(stream=mocks.StringIO()): driver.pkg_manager_install( env={ 'PATH': '.', 'MGR_ENV': 'dev', 'NODE_PATH': '/tmp/somewhere/else/node_mods', }) self.assertEqual(self.call_args, ((['mgr', 'install'], ), { 'env': finalize_env({ 'NODE_PATH': node_path, 'MGR_ENV': 'dev', 'PATH': '.' }), }))
def test_which_with_node_modules(self): driver = base.BaseDriver() # ensure that NODE_PATH is initially None driver.node_path = None driver.working_dir = mkdtemp(self) # initially should be empty, since no node_modules in either # directories that it should check with pretty_logging(stream=mocks.StringIO()) as s: self.assertIsNone(driver.which_with_node_modules()) # should not generate extra log messages. self.assertNotIn('will attempt', s.getvalue()) # having the NODE_PATH defined will result in such p1 = mkdtemp(self) p2 = mkdtemp(self) driver.node_path = pathsep.join([p1, p2]) with pretty_logging(stream=mocks.StringIO()) as s: self.assertIsNone(driver.which_with_node_modules()) # should not generate extra log messages, binary still not # assigned. self.assertNotIn('will attempt', s.getvalue()) driver.binary = 'dummy' with pretty_logging(stream=mocks.StringIO()) as s: self.assertIsNone(driver.which_with_node_modules()) # now the log should show what attempted. log = s.getvalue() self.assertIn( "'BaseDriver' instance will attempt to locate 'dummy' binary from " "its NODE_PATH of", log) self.assertIn(p1, log) self.assertIn(p2, log) self.assertIn("'BaseDriver' instance located 2 possible paths", log) # try again with working directory driver.node_path = None dwd_wd_nm = join(driver.working_dir, 'node_modules') os.mkdir(dwd_wd_nm) with pretty_logging(stream=mocks.StringIO()) as s: self.assertIsNone(driver.which_with_node_modules()) log = s.getvalue() # now the log should show what attempted. self.assertIn( "'BaseDriver' instance will attempt to locate 'dummy' binary from", log, ) self.assertIn(dwd_wd_nm, log) self.assertIn("located through the working directory", log) self.assertIn("'BaseDriver' instance located 1 possible paths", log)
def test_assemble_standard_emptied(self): with pretty_logging(logger='calmjs.rjs', stream=mocks.StringIO()) as s: build_js, config_js = self.assemble_spec_config( stub_missing_with_empty=1) self.assertNotIn('ERROR', s.getvalue()) self.assertIn( "source file(s) referenced modules that are missing in the " "build directory: 'jquery', 'some.pylike.module', 'underscore'", s.getvalue()) self.assertEqual( build_js['paths'], { 'jquery': 'empty:', 'some.pylike.module': 'empty:', 'underscore': 'empty:', }) self.assertEqual( config_js['paths'], { 'module1': 'module1.js?', 'module2': 'module2.js?', 'module3': 'module3.js?', 'jquery': 'empty:', 'some.pylike.module': 'empty:', 'underscore': 'empty:', })
def test_registry_load_working_set(self): # do note these mocking sets are for the registry; actual # filenames is not overridden (uses pkg_resources directly) working_set = mocks.WorkingSet( {'nunja.tmpl': [ 'nunja.testing.templates = nunja.testing:mold', ]}, dist=Distribution(project_name='nunjatesting', version='0.0')) with pretty_logging(logger='nunja', stream=mocks.StringIO()) as stream: registry = JinjaTemplateRegistry('nunja.tmpl', _working_set=working_set) self.assertIn('7 templates', stream.getvalue()) self.assertNotIn('scripts', stream.getvalue()) # to prevent the export of names into the calmjs toolchain, the # standard record retrieval provides nothing. self.assertEqual({}, registry.get_records_for_package('nunjatesting')) self.assertEqual(registry.get_record('nunja.testing.templates/basic'), {}) # records are available via an alternative method. self.assertEqual([ 'nunja.testing.templates/basic/template.nja', 'nunja.testing.templates/include_by_name/empty.nja', 'nunja.testing.templates/include_by_name/template.nja', 'nunja.testing.templates/include_by_value/template.nja', 'nunja.testing.templates/itemlist/template.nja', 'nunja.testing.templates/noinit/template.nja', 'nunja.testing.templates/problem/template.nja', ], sorted(registry.templates.keys()))
def test_registry_load_entry_point_missing_attrs(self): working_set = mocks.WorkingSet( { 'nunja.mold': [ 'nunja.testing.mold1 = nunja.testing', 'nunja.testing.mold2 = nunja:testing.mold', ] }, dist=Distribution(project_name='nunjatesting', version='0.0')) with pretty_logging(logger='nunja', stream=mocks.StringIO()) as stream: registry = MoldRegistry('nunja.mold', _working_set=working_set) msg = stream.getvalue() self.assertIn( "entry_point 'nunja.testing.mold1 = nunja.testing' " "from package 'nunjatesting 0.0' incompatible ", msg, ) self.assertIn( "entry_point 'nunja.testing.mold2 = nunja:testing.mold' " "from package 'nunjatesting 0.0' incompatible ", msg, ) records = registry.get_records_for_package('nunjatesting') self.assertEqual(records, {})
def test_root_runtime_bad_names(self): working_set = mocks.WorkingSet({ 'calmjs.runtime': [ 'bad name = calmjs.npm:npm.runtime', 'bad.name = calmjs.npm:npm.runtime', 'badname:likethis = calmjs.npm:npm.runtime', ] }) stderr = mocks.StringIO() with pretty_logging(logger='calmjs.runtime', level=DEBUG, stream=stderr): rt = runtime.Runtime(working_set=working_set) err = stderr.getvalue() self.assertIn("bad 'calmjs.runtime' entry point", err) stub_stdouts(self) with self.assertRaises(SystemExit): rt(['-h']) out = sys.stdout.getvalue() # this results in unnatural argparsing situation self.assertNotIn('bad name', out) # reserved for disambiguation self.assertNotIn('bad.name', out) self.assertNotIn('badname:likethis', out) # command listing naturally not available. self.assertNotIn('npm', out)
def test_registry_graceful_fail(self): working_set = mocks.WorkingSet({ 'calmjs.registry': [ 'failure = calmjs.testing.no_such_module:NoClass', ] }) registry = calmjs.registry.Registry('calmjs.registry', _working_set=working_set) with pretty_logging(stream=mocks.StringIO()) as stream: self.assertIsNone(registry.get_record('calmjs.module')) self.assertIn("'calmjs.module' does not resolve", stream.getvalue()) with pretty_logging(stream=mocks.StringIO()) as stream: self.assertIsNone(registry.get_record('failure')) self.assertIn("ImportError 'failure", stream.getvalue())
def test_update_artifact_metadata(self): # inject dummy module and add cleanup mod = ModuleType('calmjs_testing_dummy') mod.complete = generic_builder self.addCleanup(sys.modules.pop, 'calmjs_testing_dummy') sys.modules['calmjs_testing_dummy'] = mod working_dir = utils.mkdtemp(self) utils.make_dummy_dist(self, ( ('requires.txt', '\n'.join([ 'calmjs', ])), ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'artifact.js = calmjs_testing_dummy:complete', ])), ), 'app', '1.0', working_dir=working_dir) # mock a version of calmjs within that environment too utils.make_dummy_dist(self, (('entry_points.txt', ''), ), 'calmjs', '1.0', working_dir=working_dir) mock_ws = WorkingSet([working_dir]) registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) registry.update_artifact_metadata('app', {}) self.assertTrue(exists(registry.metadata.get('app'))) with pretty_logging(stream=mocks.StringIO()) as s: registry.update_artifact_metadata('calmjs', {}) self.assertIn("package 'calmjs' has not declare any artifacts", s.getvalue())
def test_pkg_manager_init_exists_and_overwrite(self): self.setup_requirements_json() cwd = mkdtemp(self) driver = cli.PackageManagerDriver( pkg_manager_bin='mgr', pkgdef_filename='requirements.json', dep_keys=('require', ), working_dir=cwd, ) target = join(cwd, 'requirements.json') with open(target, 'w') as fd: result = json.dump({"require": {}}, fd) with pretty_logging(stream=mocks.StringIO()) as err: driver.pkg_manager_init('calmpy.pip', interactive=False, overwrite=False) self.assertIn('not overwriting existing ', err.getvalue()) self.assertIn('requirements.json', err.getvalue()) with open(target) as fd: result = json.load(fd) self.assertNotEqual(result, {"require": {"setuptools": "25.1.6"}}) driver.pkg_manager_init('calmpy.pip', interactive=False, overwrite=True) with open(target) as fd: result = json.load(fd) self.assertEqual(result, { "require": { "setuptools": "25.1.6" }, "name": "calmpy.pip", })
def test_prepare_compile_assemble_verify_loaders_not_found(self): working_dir = utils.mkdtemp(self) mock_text_loader(working_dir) src_dir = utils.mkdtemp(self) index_file = join(src_dir, 'index.js') with open(index_file, 'w') as fd: fd.write('var hello = require("text!hello/world.txt");\n') webpack = toolchain.WebpackToolchain() spec = Spec( **{ 'build_dir': self.build_dir, 'export_target': join(working_dir, 'export.js'), webpack.webpack_bin_key: join(self.build_dir, 'webpack'), LOADERPLUGIN_SOURCEPATH_MAPS: {}, 'transpile_sourcepath': { 'index': index_file, }, 'working_dir': working_dir, 'verify_imports': True, }) with pretty_logging(stream=mocks.StringIO()) as s: webpack.prepare(spec) webpack.compile(spec) webpack.assemble(spec) self.assertIn("not in modules: %s" % (['text!hello/world.txt'], ), s.getvalue())
def test_normcase_registration(self): # create an empty working set for a clean-slate test. cwd = utils.mkdtemp(self) mock_ws = WorkingSet([]) dist_ = Distribution(cwd, project_name='pkg', version='1.0') dist_.egg_info = cwd # just lazy registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) # case sensitive test; have to patch the normcase at artifact # module with the nt version from ntpath import normcase as nt_normcase utils.stub_item_attr_value(self, artifact, 'normcase', nt_normcase) # using named case for case sensitivity test. c1 = EntryPoint.parse('case.js = dummy_builder:builder1') c1.dist = dist_ c2 = EntryPoint.parse('Case.js = dummy_builder:builder2') c2.dist = dist_ # use the error one ct = join(cwd, 'calmjs_artifacts', 'Case.js') with pretty_logging(stream=mocks.StringIO()) as stream: registry.register_entry_point(c1) registry.register_entry_point(c2) log = stream.getvalue() self.assertIn( "entry point 'Case.js = dummy_builder:builder2' from package " "'pkg 1.0' resolves to the path '%s' which was already " "registered to entry point 'case.js = dummy_builder:builder1'; " "conflicting entry point registration will be ignored." % ct, log) self.assertIn( "the file mapping error is caused by this platform's case-" "insensitive filename", log)
def test_conflict_registration(self): # create an empty working set for a clean-slate test. cwd = utils.mkdtemp(self) mock_ws = WorkingSet([]) registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) # using named case for case sensitivity test. st = join(cwd, 'calmjs_artifacts', 'Simple.js') dist_ = Distribution(cwd, project_name='pkg', version='1.0') dist_.egg_info = cwd # just lazy s1 = EntryPoint.parse('Simple.js = dummy_builder:builder1') s1.dist = dist_ s2 = EntryPoint.parse('Simple.js = dummy_builder:builder2') s2.dist = dist_ with pretty_logging(stream=mocks.StringIO()) as stream: registry.register_entry_point(s1) # normal registry usage shouldn't be able to do this. registry.register_entry_point(s2) log = stream.getvalue() self.assertIn( "entry point 'Simple.js = dummy_builder:builder2' from package " "'pkg 1.0' resolves to the path '%s' which was already " "registered to entry point 'Simple.js = dummy_builder:builder1'; " "conflicting entry point registration will be ignored." % st, log)
def test_pkg_manager_init_merge(self): self.setup_requirements_json() cwd = mkdtemp(self) driver = cli.PackageManagerDriver( pkg_manager_bin='mgr', pkgdef_filename='requirements.json', dep_keys=('require', ), working_dir=cwd, ) target = join(cwd, 'requirements.json') with open(target, 'w') as fd: result = json.dump({"require": {"calmpy": "1.0.0"}}, fd) driver.pkg_manager_init('calmpy.pip', merge=True, overwrite=True) self.assertNotEqual( result, { "require": { "calmpy": "1.0.0", "setuptools": "25.1.6", }, "name": "calmpy.pip", }) stub_mod_call(self, cli) stub_base_which(self) with pretty_logging(stream=mocks.StringIO()): # ensure the return value is True, assuming successful self.assertTrue( driver.pkg_manager_install('calmpy.pip', overwrite=True))
def test_iter_builders_side_effect_build_issue(self): mod = ModuleType('calmjs_testing_dummy') mod.complete = generic_builder self.addCleanup(sys.modules.pop, 'calmjs_testing_dummy') sys.modules['calmjs_testing_dummy'] = mod working_dir = utils.mkdtemp(self) utils.make_dummy_dist(self, (('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'artifact.js = calmjs_testing_dummy:complete', ])), ), 'app', '1.0', working_dir=working_dir) mock_ws = WorkingSet([working_dir]) registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) registry.update_artifact_metadata('app', {}) root = join(working_dir, 'app-1.0.egg-info', 'calmjs_artifacts') # clog the build directory so build cannot happen with open(join(root), 'w'): pass ep, toolchain, spec = next(registry.iter_builders_for('app')) check = [] spec.advise('after_prepare', check.append, True) with pretty_logging(stream=mocks.StringIO()) as stream: with self.assertRaises(ToolchainAbort): toolchain(spec) self.assertIn("an advice in group 'before_prepare' triggered an abort", stream.getvalue()) # should have stopped at before_prepare self.assertFalse(check)
def test_spec_missing_export_path_handling(self): with pretty_logging(stream=mocks.StringIO()) as stream: self.registry.process_package('blank') log = stream.getvalue() self.assertIn( "failed to produce a spec with the expected export_target", log)
def test_resolve_child_module_registries_lineage_malformed_loop(self): working_set = mocks.WorkingSet({}) root = BaseModuleRegistry('root', _working_set=working_set) parent = ChildModuleRegistry('root.child', _parent=root, _working_set=working_set) child = ChildModuleRegistry('root.child.child', _parent=parent, _working_set=working_set) grandchild = ChildModuleRegistry('root.child.child.child', _parent=child, _working_set=working_set) # force a bad loop parent.parent = grandchild with pretty_logging(stream=mocks.StringIO()) as log: with self.assertRaises(TypeError) as e: resolve_child_module_registries_lineage(parent) self.assertEqual( "registry 'root.child' was already recorded in the lineage, " "indicating that it may be some (grand)child of itself, which is " "an illegal reference in the registry system; previously resolved " "lineage is: ['root.child.child', 'root.child.child.child', " "'root.child']", str(e.exception)) self.assertIn( "the parent registry 'root.child.child.child' somehow has a " "longer name than its child registry 'root.child'; the underlying " "registry class may be constructed in an invalid manner", log.getvalue())
def test_iter_builders_verify_export_target(self): mod = ModuleType('calmjs_testing_dummy') mod.complete = generic_builder self.addCleanup(sys.modules.pop, 'calmjs_testing_dummy') sys.modules['calmjs_testing_dummy'] = mod working_dir = utils.mkdtemp(self) utils.make_dummy_dist(self, (('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'artifact.js = calmjs_testing_dummy:complete', 'invalid.js = calmjs_testing_dummy:complete', ])), ), 'app', '1.0', working_dir=working_dir) mock_ws = WorkingSet([working_dir]) class FakeArtifactRegistry(ArtifactRegistry): def verify_export_target(self, export_target): return 'invalid.js' not in export_target registry = FakeArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) # the invalid.js should be filtered out with pretty_logging(stream=mocks.StringIO()) as stream: self.assertEqual(1, len(list(registry.iter_builders_for('app')))) self.assertIn("invalid.js' has been rejected", stream.getvalue())
def test_get_bin_version_no_bin(self): stub_mod_check_output(self, cli, fake_error(OSError)) stub_base_which(self) with pretty_logging(stream=mocks.StringIO()) as err: results = cli._get_bin_version('some_app') self.assertIn("failed to execute 'some_app'", err.getvalue()) self.assertIsNone(results)
def test_registry_reserved(self): make_dummy_dist(self, (('entry_points.txt', '[calmjs.reserved]\n' 'calmjs.r1 = calmjs\n' 'calmjs.r3 = an.external\n' '\n' '[calmjs.registry]\n' 'calmjs.r1 = calmjs.module:ModuleRegistry\n' 'calmjs.r2 = calmjs.module:ModuleRegistry\n' 'calmjs.r3 = calmjs.module:ModuleRegistry\n'), ), 'calmjs', '1.0') make_dummy_dist(self, ( ( 'requires.txt', 'calmjs', ), ('entry_points.txt', '[calmjs.reserved]\n' 'calmjs.r1 = an.external\n' 'calmjs.r2 = calmjs\n' 'calmjs.r3 = calmjs\n' '\n' '[calmjs.registry]\n' 'calmjs.r1 = calmjs.testing.module3.module:CustomModuleRegistry\n' 'calmjs.r2 = calmjs.testing.module3.module:CustomModuleRegistry\n' 'calmjs.r3 = calmjs.testing.module3.module:CustomModuleRegistry\n' ), ), 'an.external', '2.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) with pretty_logging(stream=mocks.StringIO()) as stream: registry = calmjs.registry.Registry('calmjs.registry', _working_set=working_set) from calmjs.testing.module3.module import CustomModuleRegistry from calmjs.module import ModuleRegistry r1 = registry.get('calmjs.r1') r2 = registry.get('calmjs.r2') r3 = registry.get('calmjs.r3') # since this one is reserved to calmjs, not registered self.assertFalse(isinstance(r1, CustomModuleRegistry)) self.assertTrue(isinstance(r1, ModuleRegistry)) # whatever this is. self.assertTrue(isinstance(r2, ModuleRegistry)) # this one is reserved to an.external self.assertTrue(isinstance(r3, CustomModuleRegistry)) log = stream.getvalue() self.assertIn( "registry 'calmjs.r1' for 'calmjs.registry' is reserved for " "package 'calmjs'", log) self.assertIn( "registry 'calmjs.r3' for 'calmjs.registry' is reserved for " "package 'an.external'", log) self.assertIn( "registry 'calmjs.r2' for 'calmjs.registry' is already registered", log)
def test_no_op_default(self): working_set = mocks.WorkingSet( {__name__: [ 'calmjs.testing.module1 = calmjs.testing.module1', ]}) with pretty_logging(stream=mocks.StringIO()) as s: base.BaseModuleRegistry(__name__, _working_set=working_set) self.assertIn('NotImplemented', s.getvalue())
def test_install_arguments(self): stub_mod_call(self, cli) stub_base_which(self) driver = cli.PackageManagerDriver(pkg_manager_bin='mgr') with pretty_logging(stream=mocks.StringIO()): driver.pkg_manager_install(args=('--pedantic', )) self.assertEqual(self.call_args, ((['mgr', 'install', '--pedantic'], ), {}))
def test_no_declaration(self): with pretty_logging(stream=mocks.StringIO()) as stream: self.registry.process_package('undeclared') log = stream.getvalue() self.assertIn( "package 'undeclared' has not declared any entry points for the " "'calmjs.artifacts' registry for artifact construction", log)
def test_standard_construction(self): # this mock WorkingSet.find will always return a distribution working_set = mocks.WorkingSet({}) with pretty_logging(stream=mocks.StringIO()) as stream: registry = calmjs.registry.Registry('calmjs.registry', _working_set=working_set) self.assertEqual('calmjs.registry', registry.registry_name) self.assertEqual('', stream.getvalue())
def test_prepare_base(self): basedir = utils.mkdtemp(self) export_target = join(basedir, 'artifacts', 'export.js') with pretty_logging(stream=mocks.StringIO()) as s: self.assertTrue(prepare_export_location(export_target)) self.assertTrue(exists(join(basedir, 'artifacts'))) self.assertIn("artifacts", s.getvalue())
def test_paths_unset(self): stub_mod_call(self, cli) stub_base_which(self) driver = cli.PackageManagerDriver(pkg_manager_bin='mgr') with pretty_logging(stream=mocks.StringIO()): driver.pkg_manager_install() args, kwargs = self.call_args self.assertNotIn('PATH', kwargs) self.assertNotIn('cwd', kwargs)
def test_install_failure(self): stub_mod_call(self, cli, fake_error(IOError)) stub_base_which(self) driver = cli.PackageManagerDriver(pkg_manager_bin='mgr') with pretty_logging(stream=mocks.StringIO()) as stderr: with self.assertRaises(IOError): driver.mgr_install() val = stderr.getvalue() self.assertIn("invocation of the 'mgr' binary failed", val)
def test_artifact_generation_failure(self): with pretty_logging(stream=mocks.StringIO()) as stream: self.registry.process_package('nothing') log = stream.getvalue() self.assertIn( "the entry point " "'nothing.js = calmjs_testing_dummy:nothing_builder' from package " "'nothing 1.0' failed to generate an artifact", log)
def test_registry_load_working_set(self): # do note these mocking sets are for the registry; actual # filenames is not overridden (uses pkg_resources directly) working_set = mocks.WorkingSet( {'nunja.mold': [ 'nunja.testing.molds = nunja.testing:mold', ]}, dist=Distribution(project_name='nunjatesting', version='0.0')) with pretty_logging(logger='nunja', stream=mocks.StringIO()) as stream: registry = MoldRegistry('nunja.mold', _working_set=working_set) records = registry.get_records_for_package('nunjatesting') keys = [ 'nunja.testing.molds/include_by_name/index', 'nunja.testing.molds/include_by_value/index', 'nunja.testing.molds/itemlist/index', 'nunja.testing.molds/noinit/index', 'nunja.testing.molds/problem/index', 'text!nunja.testing.molds/basic/template.nja', 'text!nunja.testing.molds/include_by_name/empty.nja', 'text!nunja.testing.molds/include_by_name/template.nja', 'text!nunja.testing.molds/include_by_value/template.nja', 'text!nunja.testing.molds/itemlist/template.nja', 'text!nunja.testing.molds/noinit/template.nja', 'text!nunja.testing.molds/problem/template.nja', ] self.assertEqual(sorted(records.keys()), keys) self.assertIn('7 templates', stream.getvalue()) self.assertIn('5 scripts', stream.getvalue()) self.assertIn('generated 6 molds', stream.getvalue()) # select directly by mold_id through get_record self.assertEqual( sorted(registry.get_record('nunja.testing.molds/basic').keys()), ['text!nunja.testing.molds/basic/template.nja'], ) self.assertEqual( sorted(registry.get_record('nunja.testing.molds/itemlist').keys()), [ 'nunja.testing.molds/itemlist/index', 'text!nunja.testing.molds/itemlist/template.nja', ], ) self.assertEqual( sorted( registry.get_record( 'nunja.testing.molds/include_by_name').keys()), [ 'nunja.testing.molds/include_by_name/index', 'text!nunja.testing.molds/include_by_name/empty.nja', 'text!nunja.testing.molds/include_by_name/template.nja', ], )
def test_env_path_not_exist(self): stub_mod_call(self, cli) stub_base_which(self) bad_path = '/no/such/path/for/sure/at/here' driver = cli.PackageManagerDriver(pkg_manager_bin='mgr', env_path=bad_path) with pretty_logging(stream=mocks.StringIO()): driver.pkg_manager_install() args, kwargs = self.call_args self.assertNotEqual(kwargs['env']['PATH'].split(pathsep)[0], bad_path)