def test_root_runtime_bad_names(self): working_set = mocks.WorkingSet({ 'calmjs.runtime': [ 'bad name = calmjs.npm:npm.runtime', 'bad.name = calmjs.npm:npm.runtime', 'badname:likethis = calmjs.npm:npm.runtime', ] }) stderr = mocks.StringIO() with pretty_logging(logger='calmjs.runtime', level=DEBUG, stream=stderr): rt = runtime.Runtime(working_set=working_set) err = stderr.getvalue() self.assertIn("bad 'calmjs.runtime' entry point", err) stub_stdouts(self) with self.assertRaises(SystemExit): rt(['-h']) out = sys.stdout.getvalue() # this results in unnatural argparsing situation self.assertNotIn('bad name', out) # reserved for disambiguation self.assertNotIn('bad.name', out) self.assertNotIn('badname:likethis', out) # command listing naturally not available. self.assertNotIn('npm', out)
def setup_runtime(self): make_dummy_dist(self, (('bower.json', json.dumps({ 'name': 'site', 'dependencies': { 'jquery': '~3.1.0', }, })), ), 'example.package1', '1.0') make_dummy_dist(self, (('bower.json', json.dumps({ 'name': 'site', 'dependencies': { 'underscore': '~1.8.3', }, })), ), 'example.package2', '2.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) # Stub out the underlying data needed for the cli for the tests # to test against our custom data for reproducibility. stub_item_attr_value(self, dist, 'default_working_set', working_set) stub_mod_check_interactive(self, [cli], True) # Of course, apply a mock working set for the runtime instance # so it can use the bower runtime. working_set = mocks.WorkingSet({ 'calmjs.runtime': [ 'bower = calmjs.bower:bower.runtime', ], }) return runtime.Runtime(working_set=working_set)
def setup_runtime(self): # create a working set with our custom runtime entry point # TODO should really improve the test case to provide custom # runtime instances separate from actual data. working_set = mocks.WorkingSet({ 'calmjs.runtime': [ 'cmd = calmjs.npm:npm.runtime', ], }) return runtime.Runtime(working_set=working_set, prog='calmjs')
def test_root_runtime_errors_ignored(self): stub_stdouts(self) working_set = mocks.WorkingSet({ 'calmjs.runtime': [ 'foo = calmjs.nosuchmodule:no.where', 'bar = calmjs.npm:npm', 'npm = calmjs.npm:npm.runtime', ] }) rt = runtime.Runtime(working_set=working_set) with self.assertRaises(SystemExit): rt(['-h']) out = sys.stdout.getvalue() self.assertNotIn('foo', out) self.assertIn('npm', out)
def setup_runtime(self): make_dummy_dist(self, (('package.json', json.dumps({ 'name': 'site', 'dependencies': { 'jquery': '~3.1.0', }, })), ), 'example.package1', '1.0') make_dummy_dist(self, (('package.json', json.dumps({ 'name': 'site', 'dependencies': { 'underscore': '~1.8.3', }, })), ), 'example.package2', '2.0') make_dummy_dist(self, ( ('requires.txt', '\n'.join([ 'example.package1', 'example.package2', ])), ('package.json', json.dumps({ 'dependencies': { 'backbone': '~1.3.2', }, })), ), 'example.package3', '2.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) # Stub out the underlying data needed for the cli for the tests # to test against our custom data for reproducibility. stub_item_attr_value(self, dist, 'default_working_set', working_set) stub_mod_check_interactive(self, [cli], True) # Of course, apply a mock working set for the runtime instance # so it can use the npm runtime, however we will use a different # keyword. Note that the runtime is invoked using foo. working_set = mocks.WorkingSet({ 'calmjs.runtime': [ 'foo = calmjs.npm:npm.runtime', ], }) return runtime.Runtime(working_set=working_set)
def test_duplication_and_runtime_errors(self): """ Duplicated entry point names Naturally, there may be situations where different packages have registered entry_points with the same name. It will be great if that can be addressed. """ self.setup_dupe_runtime() make_dummy_dist(self, (('entry_points.txt', '[calmjs.runtime]\n' 'bar = calmjs.testing.utils:foo_runtime\n'), ), 'example1.foo', '1.0') make_dummy_dist(self, (('entry_points.txt', '[calmjs.runtime]\n' 'bar = calmjs.testing.utils:foo_runtime\n'), ), 'example2.foo', '1.0') make_dummy_dist(self, (('entry_points.txt', '[calmjs.runtime]\n' 'bar = calmjs.testing.utils:runtime_foo\n' 'baz = calmjs.testing.utils:runtime_foo\n'), ), 'example3.foo', '1.0') make_dummy_dist(self, (('entry_points.txt', '[calmjs.runtime]\n' 'bar = calmjs.testing.utils:runtime_foo\n' 'baz = calmjs.testing.utils:runtime_foo\n'), ), 'example4.foo', '1.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) stderr = mocks.StringIO() with pretty_logging(logger='calmjs.runtime', level=DEBUG, stream=stderr): rt = runtime.Runtime(working_set=working_set) msg = stderr.getvalue() self.assertIn( "duplicated registration of command 'baz' via entry point " "'baz = calmjs.testing.utils:runtime_foo' ignored; ", msg) self.assertIn( "a calmjs runtime command named 'bar' already registered.", msg) self.assertIn("'bar = calmjs.testing.utils:foo_runtime' from 'example", msg) self.assertIn("'bar = calmjs.testing.utils:runtime_foo' from 'example", msg) # Registration order is non-deterministic, so fallback is too self.assertIn("fallback command 'calmjs.testing.utils:", msg) self.assertIn("is already registered.", msg) # Try to use it stub_stdouts(self) with self.assertRaises(SystemExit): rt(['-h']) out = sys.stdout.getvalue() self.assertIn('bar', out) self.assertIn('baz', out) # Both fallbacks should be registered, to ensure disambiguation, # as the load order can be influenced randomly by dict ordering # or even the filesystem file load order. foo_runtime = 'calmjs.testing.utils:foo_runtime' runtime_foo = 'calmjs.testing.utils:runtime_foo' self.assertIn(runtime_foo, out) self.assertIn(foo_runtime, out) # see that the full one can be invoked and actually invoke the # underlying runtime stub_stdouts(self) with self.assertRaises(SystemExit): rt([foo_runtime, '-h']) out = sys.stdout.getvalue() self.assertIn(foo_runtime, out) self.assertIn("run 'npm install' with generated 'package.json';", out) stub_stdouts(self) with self.assertRaises(SystemExit): rt([runtime_foo, '-h']) out = sys.stdout.getvalue() self.assertIn(runtime_foo, out) self.assertIn("run 'npm install' with generated 'package.json';", out) # Time to escalate the problems one can cause... with self.assertRaises(RuntimeError): # yeah instances of root runtimes are NOT meant for reuse # by other runtime instances or argparsers, so this will # fail. rt.init_argparser(ArgumentParser()) stderr = mocks.StringIO() with pretty_logging(logger='calmjs.runtime', level=DEBUG, stream=stderr): rt.argparser = None rt.init() # A forced reinit shouldn't cause a major issue, but it will # definitely result in a distinct lack of named commands. self.assertNotIn( "Runtime instance has been used or initialized improperly.", msg) stub_stdouts(self) with self.assertRaises(SystemExit): rt(['-h']) out = sys.stdout.getvalue() self.assertNotIn('bar', out) self.assertNotIn('baz', out) # Now for the finale, where we really muck with the internals. stderr = mocks.StringIO() with pretty_logging(logger='calmjs.runtime', level=DEBUG, stream=stderr): # This normally shouldn't happen due to naming restriction, # i.e. where names with "." or ":" are disallowed so that # they are reserved for fallbacks; although if some other # forces are at work, like this... rt.runtimes[foo_runtime] = runtime.DriverRuntime(None) rt.runtimes[runtime_foo] = runtime.DriverRuntime(None) # Now, if one were to force a bad init to happen with # (hopefully forcibly) mismatched runtime instances, the # main runtime instance will simply explode into the logger # in a fit of critical level agony. rt.argparser = None rt.init() # EXPLOSION msg = stderr.getvalue() self.assertIn("CRITICAL", msg) self.assertIn( "Runtime instance has been used or initialized improperly.", msg)