def test_runtime_cli_method_none_with_empty_various(self): # this time, use the empty option utils.remember_cwd(self) utils.stub_stdouts(self) current_dir = utils.mkdtemp(self) export_target = join(current_dir, 'export_target.js') os.chdir(current_dir) with self.assertRaises(SystemExit) as e: # this should fail runtime.main([ 'rjs', 'service', 'site', '--bundle-map-method=none', '--export-target=' + export_target, '--source-registry=' + self.registry_name, ]) self.assertEqual(e.exception.args[0], 1) os.chdir(current_dir) with self.assertRaises(SystemExit) as e: # this time, apply empty, which should automatically patch # the configuration runtime.main([ 'rjs', 'service', 'site', '--empty', '--bundle-map-method=none', '--export-target=' + export_target, '--source-registry=' + self.registry_name, ]) self.assertEqual(e.exception.args[0], 0)
def test_npm_binary_not_found_debugger(self): from calmjs import utils def fake_post_mortem(*a, **kw): sys.stdout.write('(Pdb) ') remember_cwd(self) tmpdir = mkdtemp(self) os.chdir(tmpdir) rt = self.setup_runtime() # stub_stdin(self, u'quit\n') stub_stdouts(self) # ensure the binary is not found. stub_mod_call(self, cli, fake_error(IOError)) stub_item_attr_value(self, utils, 'post_mortem', fake_post_mortem) rt(['-dd', 'foo', '--install', 'example.package2']) self.assertIn("ERROR", sys.stderr.getvalue()) self.assertIn("invocation of the 'npm' binary failed;", sys.stderr.getvalue()) self.assertIn("terminating due to exception", sys.stderr.getvalue()) self.assertIn("Traceback ", sys.stderr.getvalue()) self.assertIn("(Pdb)", sys.stdout.getvalue()) stub_stdouts(self) self.assertNotIn("(Pdb)", sys.stdout.getvalue()) rt(['foo', '--install', 'example.package2', '--debugger']) self.assertIn("(Pdb)", sys.stdout.getvalue())
def test_filtered(self): stub_stdouts(self) parser = ArgumentParser() with self.assertRaises(SystemExit): parser.error('some random other error') self.assertIn('some random other error', sys.stderr.getvalue())
def test_integration_choices_in_list(self): argparser = ArgumentParser(prog='prog', add_help=False) argparser.add_argument( '-p', '--params', choices=['1', '2', '3'], action=StoreDelimitedListBase) parsed, extras = argparser.parse_known_args(['-p', '3']) self.assertEqual(parsed.params, ['3']) parsed, extras = argparser.parse_known_args(['-p', '3,2']) self.assertEqual(parsed.params, ['3', '2']) parsed, extras = argparser.parse_known_args(['-p', '3,2,1']) self.assertEqual(parsed.params, ['3', '2', '1']) parsed, extras = argparser.parse_known_args(['-p', '3,3,3']) self.assertEqual(parsed.params, ['3', '3', '3']) stub_stdouts(self) with self.assertRaises(SystemExit): argparser.parse_known_args(['-p', '3,2,1,0']) self.assertIn("(choose from '1', '2', '3')", sys.stderr.getvalue()) with self.assertRaises(SystemExit): argparser.parse_known_args(['-p', '0']) argparser.add_argument( '--dot', choices=['a', 'b', 'c'], action=StoreDelimitedListBase, sep='.') parsed, extras = argparser.parse_known_args(['--dot', 'a.b.c']) self.assertEqual(parsed.dot, ['a', 'b', 'c']) with self.assertRaises(SystemExit): argparser.parse_known_args(['--dot', 'a,b,c'])
def test_npm_init_existing_merge_overwrite(self): stub_stdouts(self) tmpdir = mkdtemp(self) # Write an initial thing with open(join(tmpdir, 'package.json'), 'w') as fd: json.dump({'dependencies': { 'jquery': '~3.0.0', 'underscore': '~1.8.0', }, 'devDependencies': { 'sinon': '~1.17.0' }, 'name': 'dummy'}, fd, indent=0) os.chdir(tmpdir) # Overwrite will supercede interactive. self.assertTrue(npm.npm_init( 'foo', merge=True, overwrite=True, interactive=True)) with open(join(tmpdir, 'package.json')) as fd: with self.assertRaises(ValueError): json.loads(fd.readline()) fd.seek(0) result = json.load(fd) # Merge results should be written when user agrees. self.assertEqual(result, { 'dependencies': { 'jquery': '~1.11.0', 'underscore': '~1.8.0', }, 'devDependencies': { 'sinon': '~1.17.0' }, 'name': 'foo', })
def test_runtime_cli_compile_explicit_registry_site(self): utils.stub_stdouts(self) current_dir, target_file = self.setup_runtime_main_env() os.chdir(current_dir) # Invoke the thing through the main runtime with self.assertRaises(SystemExit) as e: runtime.main([ 'rjs', 'site', '--source-registry-method=explicit', '--export-target=' + target_file, ]) self.assertEqual(e.exception.args[0], 0) with open(target_file) as fd: contents = fd.read() # As the registry is NOT declared for that package, it should # result in nothing. self.assertNotIn('framework/lib', contents) self.assertIn( 'no module registry declarations found using packages', sys.stderr.getvalue(), ) self.assertIn("'site'", sys.stderr.getvalue()) self.assertIn( "using acquisition method 'explicit'", sys.stderr.getvalue())
def test_karma_runtime_multiple_artifacts_multi_args(self): stub_stdouts(self) extra_artifact = join(mkdtemp(self), 'lib.js') with open(extra_artifact, 'w') as fd: fd.write(dedent(""" 'use strict'; var Lib = function(args) { }; Lib.prototype.add2 = function (i) { return i + i; }; """)) # use the full blown runtime rt = KarmaRuntime(self.driver) # the artifact in our case is identical to the source file artifact = resource_filename('calmjs.dev', 'main.js') rt([ 'run', '--artifact', artifact, '--artifact', extra_artifact, '--test-registry', 'calmjs.dev.module.tests', '--test-with-package', 'calmjs.dev', '-vv', ]) logs = sys.stderr.getvalue() self.assertIn("specified artifact '%s' found" % artifact, logs) self.assertIn("specified artifact '%s' found" % extra_artifact, logs)
def test_karma_runtime_integration_ignore_error(self): stub_stdouts(self) target = join(mkdtemp(self), 'target') build_dir = mkdtemp(self) stub_item_attr_value( self, mocks, 'dummy', ToolchainRuntime(NullToolchain()), ) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'null = calmjs.testing.mocks:dummy\n' ),), 'example.package', '1.0') working_set = WorkingSet([self._calmjs_testing_tmpdir]) rt = KarmaRuntime(self.driver, working_set=working_set) result = rt([ '-I', 'null', '--export-target', target, '--build-dir', build_dir, ]) self.assertIn('karma_config_path', result) self.assertTrue(exists(result['karma_config_path'])) self.assertFalse(result.get('karma_abort_on_test_failure')) self.assertIn( "karma exited with return code 1; continuing as specified", sys.stderr.getvalue() ) # ensure coverage isn't run at all. coverage_report_dir = join(build_dir, 'coverage') self.assertFalse(exists(coverage_report_dir))
def create_coverage_report(self, report_type): stub_stdouts(self) self.addCleanup( root_registry.records.pop, 'calmjs.dev.module.tests', None) build_dir = mkdtemp(self) coverage_dir = join(mkdtemp(self), 'coverage') # manipulate the registry to remove the fail test reg = root_registry.get('calmjs.dev.module.tests') reg.records['calmjs.dev.tests'].pop('calmjs/dev/tests/test_fail', '') # use the full blown runtime rt = KarmaRuntime(self.driver) # the artifact in our case is identical to the source file artifact_fn = resource_filename('calmjs.dev', 'main.js') result = rt([ '--artifact', artifact_fn, 'run', '--build-dir', build_dir, '--test-registry', 'calmjs.dev.module.tests', '--test-with-package', 'calmjs.dev', '--coverage', '--cover-artifact', '--cover-report-type', report_type, '--cover-report-dir', coverage_dir, ]) self.assertIn('karma_config_path', result) self.assertEqual(result['artifact_paths'], [artifact_fn]) self.assertTrue(exists(result['karma_config_path'])) # should exit cleanly self.assertNotIn( "karma exited with return code 1", sys.stderr.getvalue()) self.assertIn(artifact_fn, result['karma_config']['preprocessors']) self.assertTrue(exists(coverage_dir)) return coverage_dir, artifact_fn
def test_standard_manual_tests_fail_run_continued(self): stub_stdouts(self) main = resource_filename('calmjs.dev', 'main.js') test_fail = resource_filename('calmjs.dev.tests', 'test_fail.js') spec = Spec( # null toolchain does not prepare this transpile_sourcepath={ 'calmjs/dev/main': main, }, test_module_paths_map={ 'calmjs/test_fail': test_fail, }, # register warning karma_abort_on_test_failure=False, ) toolchain = NullToolchain() with pretty_logging( logger='calmjs.dev', stream=mocks.StringIO()) as log: self.driver.run(toolchain, spec) self.assertNotEqual(spec['karma_return_code'], 0) # linked continued self.assertIn('link', spec) self.assertIn( "karma exited with return code 1; continuing as specified", log.getvalue() )
def test_artifact_verify_fail_at_missing_artifact(self): # missing packages should also fail by default stub_stdouts(self) rt = self.setup_karma_artifact_runtime() self.assertFalse(rt(['missing'])) self.assertIn('artifact not found:', sys.stderr.getvalue()) self.assertIn('missing.js', sys.stderr.getvalue())
def test_npm_all_the_actions(self): remember_cwd(self) tmpdir = mkdtemp(self) os.chdir(tmpdir) stub_stdouts(self) stub_mod_call(self, cli) stub_base_which(self, which_npm) rt = self.setup_runtime() rt([ 'foo', '--install', '--view', '--init', 'example.package1', 'example.package2' ]) # inside stdout result = json.loads(sys.stdout.getvalue()) self.assertEqual(result['dependencies']['jquery'], '~3.1.0') self.assertEqual(result['dependencies']['underscore'], '~1.8.3') with open(join(tmpdir, 'package.json')) as fd: result = json.load(fd) self.assertEqual(result['dependencies']['jquery'], '~3.1.0') self.assertEqual(result['dependencies']['underscore'], '~1.8.3') # not foo install, but npm install since entry point specified # the actual runtime instance. self.assertEqual(self.call_args, (([which_npm, 'install'], ), {}))
def test_standalone_main_version(self): stub_stdouts(self) # the default call method does NOT call sys.exit. with self.assertRaises(SystemExit): yarn.yarn.runtime(['-V']) self.assertIn('calmjs', sys.stdout.getvalue()) self.assertIn('from', sys.stdout.getvalue())
def test_filtered(self): stub_stdouts(self) parser = ArgumentParser() with self.assertRaises(SystemExit): parser.error('some random other error') self.assertIn('some random other error', sys.stderr.getvalue())
def test_npm_init_write_name_merge(self): stub_stdouts(self) stub_stdin(self, 'Y') tmpdir = mkdtemp(self) # Write an initial thing with open(join(tmpdir, 'package.json'), 'w') as fd: json.dump({'dependencies': { 'jquery': '~1.8.9', 'underscore': '~1.8.0', }, 'devDependencies': { 'sinon': '~1.17.0' }, 'name': 'something_else'}, fd, indent=0) os.chdir(tmpdir) self.assertTrue(npm.npm_init('named', merge=True)) with open(join(tmpdir, 'package.json')) as fd: with self.assertRaises(ValueError): json.loads(fd.readline()) fd.seek(0) result = json.load(fd) # Merge results should be written when user agrees. self.assertEqual(result, { 'dependencies': { 'jquery': '~3.0.0', 'underscore': '~1.8.0', }, 'devDependencies': { 'sinon': '~1.17.0' }, # name derived from the package_json field. 'name': 'named-js', })
def test_runtime_cli_compile_explicit_site_legacy_flag(self): # same as previous test, but use the legacy flags. utils.stub_stdouts(self) current_dir, target_file = self.setup_runtime_main_env() os.chdir(current_dir) # Invoke the thing through the main runtime with self.assertRaises(SystemExit) as e: runtime.main([ 'rjs', 'site', '--source-map-method=explicit', '--bundle-map-method=none', '--export-target=' + target_file, '--source-registry=' + self.registry_name, ]) self.assertEqual(e.exception.args[0], 0) with open(target_file) as fd: contents = fd.read() # Since the package has no sources, and we disabled bundling of # sources (none works here because no code to automatically get # r.js to look for them), it should generate an empty bundle. self.assertEqual(contents, '(function () {}());') err = sys.stderr.getvalue() self.assertIn("flag '--source-map-method' is deprecated", err) self.assertIn("flag '--bundle-map-method' is deprecated", err)
def test_yarn_init_existing_merge_interactive_yes(self): stub_stdouts(self) stub_stdin(self, 'Y') tmpdir = mkdtemp(self) # Write an initial thing with open(join(tmpdir, 'package.json'), 'w') as fd: json.dump({'dependencies': { 'jquery': '~3.0.0', 'underscore': '~1.8.0', }, 'devDependencies': { 'sinon': '~1.17.0' }, 'name': 'dummy'}, fd, indent=0) os.chdir(tmpdir) self.assertTrue(yarn.yarn_init('foo', merge=True)) with open(join(tmpdir, 'package.json')) as fd: with self.assertRaises(ValueError): json.loads(fd.readline()) fd.seek(0) result = json.load(fd) # Merge results should be written when user agrees. self.assertEqual(result, { 'dependencies': { 'jquery': '~1.11.0', 'underscore': '~1.8.0', }, 'devDependencies': { 'sinon': '~1.17.0' }, 'name': 'foo', })
def test_runtime_cli_method_none_with_empty_various(self): # this time, use the empty option utils.remember_cwd(self) utils.stub_stdouts(self) current_dir = utils.mkdtemp(self) export_target = join(current_dir, 'export_target.js') os.chdir(current_dir) with self.assertRaises(SystemExit) as e: # this should fail runtime.main([ 'rjs', 'service', 'site', '--bundlepath-method=none', '--export-target=' + export_target, '--source-registry=' + self.registry_name, ]) self.assertEqual(e.exception.args[0], 1) os.chdir(current_dir) with self.assertRaises(SystemExit) as e: # this time, apply empty, which should automatically patch # the configuration runtime.main([ 'rjs', 'service', 'site', '--empty', '--bundlepath-method=none', '--export-target=' + export_target, '--source-registry=' + self.registry_name, ]) self.assertEqual(e.exception.args[0], 0)
def test_yarn_init_write_name_merge(self): stub_stdouts(self) stub_stdin(self, 'Y') tmpdir = mkdtemp(self) # Write an initial thing with open(join(tmpdir, 'package.json'), 'w') as fd: json.dump({'dependencies': { 'jquery': '~1.8.9', 'underscore': '~1.8.0', }, 'devDependencies': { 'sinon': '~1.17.0' }, 'name': 'something_else'}, fd, indent=0) os.chdir(tmpdir) self.assertTrue(yarn.yarn_init('named', merge=True)) with open(join(tmpdir, 'package.json')) as fd: with self.assertRaises(ValueError): json.loads(fd.readline()) fd.seek(0) result = json.load(fd) # Merge results should be written when user agrees. self.assertEqual(result, { 'dependencies': { 'jquery': '~3.0.0', 'underscore': '~1.8.0', }, 'devDependencies': { 'sinon': '~1.17.0' }, # name derived from the package_json field. 'name': 'named-js', })
def test_npm_init_existing_interactive_merge_no(self): stub_stdouts(self) stub_stdin(self, 'N') tmpdir = mkdtemp(self) # Write an initial thing with open(join(tmpdir, 'package.json'), 'w') as fd: json.dump({'dependencies': { 'jquery': '~3.0.0', 'underscore': '~1.8.0', }, 'devDependencies': { 'sinon': '~1.17.0' }, 'name': 'dummy'}, fd, indent=0) os.chdir(tmpdir) self.assertFalse(npm.npm_init('foo', merge=True, interactive=True)) with open(join(tmpdir, 'package.json')) as fd: with self.assertRaises(ValueError): json.loads(fd.readline()) fd.seek(0) result = json.load(fd) # Should not have written anything if user said no. self.assertEqual(result, { 'dependencies': { 'jquery': '~3.0.0', 'underscore': '~1.8.0', }, 'devDependencies': { 'sinon': '~1.17.0' }, 'name': 'dummy', })
def test_prepare_spec_artifacts(self): stub_stdouts(self) remember_cwd(self) tmpdir = mkdtemp(self) fake = join(tmpdir, 'fake.js') real = join(tmpdir, 'real.js') os.chdir(tmpdir) with open(real, 'w') as fd: fd.write('') with pretty_logging( logger='calmjs.dev', stream=mocks.StringIO()) as log: # note the relative paths spec = Spec(artifact_paths=['real.js', 'fake.js']) prepare_spec_artifacts(spec) # note that the full path is now specified. self.assertEqual(spec['artifact_paths'], [real]) self.assertIn('does not exists', log.getvalue()) self.assertIn(fake, log.getvalue()) # should still work with full paths. spec = Spec(artifact_paths=[real, fake]) prepare_spec_artifacts(spec) self.assertEqual(spec['artifact_paths'], [real])
def test_standalone_main_version(self): stub_stdouts(self) # the default call method does NOT call sys.exit. with self.assertRaises(SystemExit): yarn.yarn.runtime(['-V']) self.assertIn('calmjs', sys.stdout.getvalue()) self.assertIn('from', sys.stdout.getvalue())
def test_root_runtime_bad_names(self): working_set = mocks.WorkingSet({ 'calmjs.runtime': [ 'bad name = calmjs.npm:npm.runtime', 'bad.name = calmjs.npm:npm.runtime', 'badname:likethis = calmjs.npm:npm.runtime', ] }) stderr = mocks.StringIO() with pretty_logging(logger='calmjs.runtime', level=DEBUG, stream=stderr): rt = runtime.Runtime(working_set=working_set) err = stderr.getvalue() self.assertIn("bad 'calmjs.runtime' entry point", err) stub_stdouts(self) with self.assertRaises(SystemExit): rt(['-h']) out = sys.stdout.getvalue() # this results in unnatural argparsing situation self.assertNotIn('bad name', out) # reserved for disambiguation self.assertNotIn('bad.name', out) self.assertNotIn('badname:likethis', out) # command listing naturally not available. self.assertNotIn('npm', out)
def test_yarn_init_existing_interactive_merge_no(self): stub_stdouts(self) stub_stdin(self, 'N') tmpdir = mkdtemp(self) # Write an initial thing with open(join(tmpdir, 'package.json'), 'w') as fd: json.dump({'dependencies': { 'jquery': '~3.0.0', 'underscore': '~1.8.0', }, 'devDependencies': { 'sinon': '~1.17.0' }, 'name': 'dummy'}, fd, indent=0) os.chdir(tmpdir) self.assertFalse(yarn.yarn_init( 'foo', merge=True, callback=prompt_overwrite_json)) with open(join(tmpdir, 'package.json')) as fd: with self.assertRaises(ValueError): json.loads(fd.readline()) fd.seek(0) result = json.load(fd) # Should not have written anything if user said no. self.assertEqual(result, { 'dependencies': { 'jquery': '~3.0.0', 'underscore': '~1.8.0', }, 'devDependencies': { 'sinon': '~1.17.0' }, 'name': 'dummy', })
def test_runtime_cli_compile_explicit_registry_site(self): utils.stub_stdouts(self) current_dir, target_file = self.setup_runtime_main_env() os.chdir(current_dir) # Invoke the thing through the main runtime with self.assertRaises(SystemExit) as e: runtime.main([ 'rjs', 'site', '--source-registry-method=explicit', '--export-target=' + target_file, ]) self.assertEqual(e.exception.args[0], 0) with open(target_file) as fd: contents = fd.read() # As the registry is NOT declared for that package, it should # result in nothing. self.assertNotIn('framework/lib', contents) self.assertIn( 'no module registry declarations found using packages', sys.stderr.getvalue(), ) self.assertIn("'site'", sys.stderr.getvalue()) self.assertIn( "using acquisition method 'explicit'", sys.stderr.getvalue())
def test_karma_test_runner_standalone_artifact(self): """ What's the purpose of tests if they can't be executed any time, anywhere, against anything? """ utils.stub_stdouts(self) current_dir = utils.mkdtemp(self) export_target = join(current_dir, 'example_package.js') # first, generate our bundle. with self.assertRaises(SystemExit) as e: runtime.main([ 'rjs', 'example.package', '--export-target', export_target]) self.assertTrue(exists(export_target)) # leverage the karma run command to run the tests provided by # the example.package against the resulting artifact. with self.assertRaises(SystemExit) as e: runtime.main([ 'karma', 'run', '--test-package', 'example.package', # TODO make this argument optional '--test-registry', self.registry_name + '.tests', '--artifact', export_target, # this is critical '--toolchain-package', 'calmjs.rjs', ]) # tests should pass against the resultant bundle self.assertEqual(e.exception.args[0], 0)
def test_karma_test_runner_standalone_artifact(self): """ What's the purpose of tests if they can't be executed any time, anywhere, against anything? """ utils.stub_stdouts(self) current_dir = utils.mkdtemp(self) export_target = join(current_dir, 'example_package.js') # first, generate our bundle. with self.assertRaises(SystemExit) as e: runtime.main([ 'rjs', 'example.package', '--export-target', export_target]) self.assertTrue(exists(export_target)) # leverage the karma run command to run the tests provided by # the example.package against the resulting artifact. with self.assertRaises(SystemExit) as e: runtime.main([ 'karma', 'run', '--test-package', 'example.package', # TODO make this argument optional '--test-registry', self.registry_name + '.tests', '--artifact', export_target, # this is critical '--toolchain-package', 'calmjs.rjs', ]) # tests should pass against the resultant bundle self.assertEqual(e.exception.args[0], 0)
def test_bower_all_the_actions(self): remember_cwd(self) tmpdir = mkdtemp(self) os.chdir(tmpdir) stub_stdouts(self) stub_mod_call(self, cli) stub_base_which(self, which_bower) rt = self.setup_runtime() rt([ 'bower', '--install', '--view', '--init', 'example.package1', 'example.package2' ]) # inside stdout result = json.loads(sys.stdout.getvalue()) self.assertEqual(result['dependencies']['jquery'], '~3.1.0') self.assertEqual(result['dependencies']['underscore'], '~1.8.3') with open(join(tmpdir, 'bower.json')) as fd: result = json.load(fd) self.assertEqual(result['dependencies']['jquery'], '~3.1.0') self.assertEqual(result['dependencies']['underscore'], '~1.8.3') args, kwargs = self.call_args self.assertEqual(args, (['bower', 'install'], )) env = kwargs.pop('env', {}) self.assertEqual(kwargs, {}) # have to do both, due to that this is an actual integration # test and values will differ between environments self.assertEqual(finalize_env(env), finalize_env(env))
def test_karma_runtime_run_toolchain_auto_test_registry(self): def cleanup(): root_registry.records.pop('calmjs.dev.module.tests', None) root_registry.records.pop(CALMJS_TOOLCHAIN_ADVICE, None) self.addCleanup(cleanup) stub_stdouts(self) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.toolchain.advice]\n' 'calmjs.dev.toolchain:KarmaToolchain' ' = calmjs.tests.test_toolchain:dummy\n' ),), 'example.package', '1.0') # in the main distribution we did not define this to avoid # potential contamination of test data by this package with the # rest of the framework, so we stub that function _called = [] def fake_flatten_module_registry_names(package_names): _called.extend(package_names) return ['calmjs.dev.module'] from calmjs.dev import toolchain stub_item_attr_value( self, toolchain, 'flatten_module_registry_names', fake_flatten_module_registry_names ) working_set = WorkingSet([self._calmjs_testing_tmpdir]) root_registry.records[ CALMJS_TOOLCHAIN_ADVICE] = AdviceRegistry( CALMJS_TOOLCHAIN_ADVICE, _working_set=working_set) # manipulate the registry to remove the fail test reg = root_registry.get('calmjs.dev.module.tests') reg.records['calmjs.dev.tests'].pop('calmjs/dev/tests/test_fail', '') # use the full blown runtime rt = KarmaRuntime(self.driver) # the artifact in our case is identical to the source file artifact = resource_filename('calmjs.dev', 'main.js') result = rt([ 'run', '--artifact', artifact, '--test-with-package', 'calmjs.dev', '--toolchain-package', 'example.package', ]) self.assertIn('calmjs.dev', _called) self.assertIn('karma_config_path', result) self.assertEqual(result['artifact_paths'], [artifact]) # the spec key is written. self.assertEqual(result['dummy'], ['dummy']) self.assertEqual( result['calmjs_module_registry_names'], ['calmjs.dev.module']) self.assertIn( 'calmjs/dev/tests/test_main', result['test_module_paths_map'])
def test_calmjs_artifact_package_generation(self): utils.stub_stdouts(self) with self.assertRaises(SystemExit) as e: runtime.main(['artifact', 'build', 'example.package']) self.assertEqual(e.exception.args[0], 0) registry = get_registry('calmjs.artifacts') for e, t, spec in registry.iter_builders_for('example.package'): self.assertTrue(exists(spec['export_target']))
def test_calmjs_main_runtime_console_version(self): stub_stdouts(self) with self.assertRaises(SystemExit) as e: runtime.main(['npm', '-V']) self.assertEqual(e.exception.args[0], 0) # reports both versions. value = sys.stdout.getvalue() self.assertEqual(2, len(value.strip().splitlines()))
def test_calmjs_main_console_version_broken(self): stub_stdouts(self) stub_item_attr_value(self, runtime, 'default_working_set', pkg_resources.WorkingSet([mkdtemp(self)])) # make sure the bad case doesn't just blow up... with self.assertRaises(SystemExit) as e: runtime.main(['-V']) self.assertEqual(e.exception.args[0], 0) self.assertIn('? ? from ?', sys.stdout.getvalue())
def test_stub_stdouts(self): o_stdout = sys.stdout o_stderr = sys.stderr utils.stub_stdouts(self) self.assertIsNot(o_stdout, sys.stdout) self.assertIsNot(o_stderr, sys.stderr) self.doCleanups() self.assertIs(o_stdout, sys.stdout) self.assertIs(o_stderr, sys.stderr)
def test_stub_stdouts(self): o_stdout = sys.stdout o_stderr = sys.stderr utils.stub_stdouts(self) self.assertIsNot(o_stdout, sys.stdout) self.assertIsNot(o_stderr, sys.stderr) self.doCleanups() self.assertIs(o_stdout, sys.stdout) self.assertIs(o_stderr, sys.stderr)
def test_standalone_reuse_main(self): stub_stdouts(self) # the default call method does NOT call sys.exit. yarn.yarn.runtime(['calmjs', '-vv']) # Have the help work result = json.loads(sys.stdout.getvalue()) self.assertEqual(result['dependencies'], {}) err = sys.stderr.getvalue() self.assertIn('DEBUG', err)
def test_artifact_verify_fail_exit_first(self): # should not explode if the abort is triggered stub_stdouts(self) rt = self.setup_karma_artifact_runtime() self.assertFalse(rt(['calmjs.dev', '-x'])) self.assertIn( 'terminating due to expected unrecoverable condition', sys.stderr.getvalue() )
def test_standalone_reuse_main(self): stub_stdouts(self) # the default call method does NOT call sys.exit. yarn.yarn.runtime(['calmjs', '-vv']) # Have the help work result = json.loads(sys.stdout.getvalue()) self.assertEqual(result['dependencies'], {}) err = sys.stderr.getvalue() self.assertIn('DEBUG', err)
def test_read_dist_line_io_error(self): # We will mock up a Distribution object with some fake metadata. stub_stdouts(self) mock_provider = MockProvider({ 'list.txt': None # the MockProvider emulates IOError }) mock_dist = pkg_resources.Distribution( metadata=mock_provider, project_name='dummydist', version='0.0.0') results = calmjs_dist.read_dist_line_list(mock_dist, 'list.txt') self.assertEqual(results, [])
def test_artifact_verify_success(self): # should not explode if the abort is triggered # manipulate the registry to remove the fail test stub_stdouts(self) rt = self.setup_karma_artifact_runtime() reg = root_registry.get('calmjs.dev.module.tests') reg.records['calmjs.dev.tests'].pop('calmjs/dev/tests/test_fail', '') # should finally pass self.assertTrue(rt(['calmjs.dev']))
def test_base_version(self): # The version information should be missing but shouldn't result # in catastrophic errors. stub_stdouts(self) rt = runtime.BaseRuntime() with self.assertRaises(SystemExit): rt(['-V']) out = sys.stdout.getvalue() self.doCleanups() self.assertEqual(out, 'no package information available.')
def test_artifact_verify_fail_continue(self): # since there is a failure test case stub_stdouts(self) rt = self.setup_karma_artifact_runtime() self.assertFalse(rt(['calmjs.dev'])) self.assertIn('continuing as specified', sys.stderr.getvalue()) self.assertNotIn( "no artifacts or tests defined for package 'calmjs.dev'", sys.stderr.getvalue(), )
def test_artifact_verify_extra_artifacts_with_build_dir(self): # this one is provided only as convenience; this may be useful # for builders that construct a partial artifacts but using a # test rule that doesn't provide some requirements, or for # testing whether inclusion of that other artifact will cause # interference with the expected functionality of the artifact # to be tested with. extra_js = join(mkdtemp(self), 'extra.js') extra_test = join(mkdtemp(self), 'test_extra.js') with open(extra_js, 'w') as fd: fd.write('var extra = {value: "artifact"};') with open(extra_test, 'w') as fd: fd.write(dedent(""" 'use strict'; describe('emulated extra test', function() { it('extra artifact provided', function() { expect(window.extra.value).to.equal("artifact"); }); }); """.strip())) build_dir = mkdtemp(self) stub_stdouts(self) rt = self.setup_karma_artifact_runtime() # remove the fail test. reg = root_registry.get('calmjs.dev.module.tests') reg.records['calmjs.dev.tests'].pop('calmjs/dev/tests/test_fail', '') # inject our extra test to ensure the artifact that got added # still gets tested. reg.records['calmjs.dev.tests'][ 'calmjs/dev/tests/test_extra'] = extra_test self.assertTrue(rt([ '-vv', '--artifact', extra_js, '--build-dir', build_dir, '-u', 'calmjs.dev', 'calmjs.dev', ])) stderr = sys.stderr.getvalue() self.assertIn("specified artifact '%s' found" % extra_js, stderr) self.assertIn("artifact.js' found", stderr) with codecs.open( join(build_dir, 'karma.conf.js'), encoding='utf8') as fd: rawconf = es5(fd.read()) # manually and lazily extract the configuration portion config = json.loads(str( rawconf.children()[0].expr.right.elements[0].expr.args.items[0])) # the extra specified artifact must be before the rest. self.assertEqual(config['files'][0], extra_js)
def test_prompt_basic(self): stub_stdouts(self) stub_stdin(self, 'n') result = ui.prompt_overwrite_json( {'a': 1, 'b': 1}, {'a': 1, 'b': 2}, self.tmpjson) self.assertFalse(result) stdout = sys.stdout.getvalue() self.assertIn("'test.json'", stdout) self.assertIn(self.tmpjson, stdout) self.assertIn('- "b": 1', stdout) self.assertIn('+ "b": 2', stdout)
def test_karma_test_runner_basic(self): utils.stub_stdouts(self) current_dir = utils.mkdtemp(self) export_target = join(current_dir, 'example_package.js') with self.assertRaises(SystemExit) as e: runtime.main([ 'karma', 'rjs', 'example.package', '--export-target=' + export_target, ]) self.assertEqual(e.exception.args[0], 0) self.assertTrue(exists(export_target))
def test_karma_test_runner_basic(self): utils.stub_stdouts(self) current_dir = utils.mkdtemp(self) export_target = join(current_dir, 'example_package.js') with self.assertRaises(SystemExit) as e: runtime.main([ 'karma', 'rjs', 'example.package', '--export-target=' + export_target, ]) self.assertEqual(e.exception.args[0], 0) self.assertTrue(exists(export_target))
def test_read_dist_line_io_error(self): # We will mock up a Distribution object with some fake metadata. stub_stdouts(self) mock_provider = MockProvider({ 'list.txt': None # the MockProvider emulates IOError }) mock_dist = pkg_resources.Distribution(metadata=mock_provider, project_name='dummydist', version='0.0.0') results = calmjs_dist.read_dist_line_list(mock_dist, 'list.txt') self.assertEqual(results, [])
def test_npm_init_existing_malform(self): remember_cwd(self) tmpdir = mkdtemp(self) os.chdir(tmpdir) rt = self.setup_runtime() # create an existing malformed file with open(join(tmpdir, 'package.json'), 'w') as fd: fd.write('not a json') stub_stdouts(self) rt(['foo', '--init', 'example.package2']) self.assertIn("ignoring existing malformed", sys.stderr.getvalue())
def test_npm_interrupted(self): remember_cwd(self) tmpdir = mkdtemp(self) os.chdir(tmpdir) rt = self.setup_runtime() stub_stdouts(self) # ensure the binary is not found. stub_mod_call(self, cli, fake_error(KeyboardInterrupt)) rt(['foo', '--install', 'example.package2']) self.assertIn("CRITICAL", sys.stderr.getvalue()) self.assertIn("termination requested; aborted.", sys.stderr.getvalue())
def test_critical_log_exception(self): remember_cwd(self) tmpdir = mkdtemp(self) os.chdir(tmpdir) rt = self.setup_runtime() stub_stdouts(self) # ensure the binary is not found. stub_mod_call(self, cli, fake_error(RuntimeError('fake error'))) rt(['foo', '--install', 'example.package2']) self.assertIn("CRITICAL calmjs.runtime RuntimeError: fake error", sys.stderr.getvalue())
def test_artifact_verify_fail_at_replacement(self): # failure happening because there are no tests found when # execution for them are set up. stub_stdouts(self) rt = self.setup_karma_artifact_runtime() self.assertFalse(rt([ '-vv', 'calmjs.dev', '--test-with-package', 'missing' ])) # though mostly the tests is for the capturing of these messages self.assertIn("spec['test_package_names'] was", sys.stderr.getvalue()) self.assertIn("calmjs.dev'] replaced with", sys.stderr.getvalue()) self.assertIn("missing']", sys.stderr.getvalue())
def test_artifact_verify_fail_at_python_deps_missing(self): # entry_point referenced a package not installed, it should fail # too. stub_stdouts(self) rt = self.setup_karma_artifact_runtime() self.assertFalse(rt(['depsmissing'])) self.assertIn( "unable to import the target builder for the " "entry point 'artifact.js = not_installed:tester' from " "package 'depsmissing 1.0'", sys.stderr.getvalue(), )
def test_get_dist_package_read_error(self): # Quiet stdout from distutils logs stub_stdouts(self) mock_provider = MockProvider({ self.pkgname: None, # None will emulate IO error. }) mock_dist = pkg_resources.Distribution( metadata=mock_provider, project_name='dummydist', version='0.0.0') results = calmjs_dist.read_dist_egginfo_json(mock_dist) # Should still not fail. self.assertIsNone(results)
def test_get_dist_package_read_error(self): # Quiet stdout from distutils logs stub_stdouts(self) mock_provider = MockProvider({ self.pkgname: None, # None will emulate IO error. }) mock_dist = pkg_resources.Distribution(metadata=mock_provider, project_name='dummydist', version='0.0.0') results = calmjs_dist.read_dist_egginfo_json(mock_dist) # Should still not fail. self.assertIsNone(results)
def test_npm_view(self): stub_stdouts(self) rt = self.setup_runtime() rt(['foo', '--view', 'example.package1', 'example.package2']) result = json.loads(sys.stdout.getvalue()) self.assertEqual(result['dependencies']['jquery'], '~3.1.0') self.assertEqual(result['dependencies']['underscore'], '~1.8.3') stub_stdouts(self) rt(['foo', 'example.package1', 'example.package2']) result = json.loads(sys.stdout.getvalue()) self.assertEqual(result['dependencies']['jquery'], '~3.1.0') self.assertEqual(result['dependencies']['underscore'], '~1.8.3')
def test_npm_binary_not_found(self): remember_cwd(self) tmpdir = mkdtemp(self) os.chdir(tmpdir) rt = self.setup_runtime() stub_stdouts(self) # ensure the binary is not found. stub_mod_call(self, cli, fake_error(IOError)) rt(['foo', '--install', 'example.package2']) self.assertIn("ERROR", sys.stderr.getvalue()) self.assertIn("invocation of the 'npm' binary failed;", sys.stderr.getvalue())
def test_yarn_install_package_json_no_overwrite_interactive(self): """ Most of these package_json testing will be done in the next test class specific for ``yarn init``. """ # Testing the implied init call stub_mod_call(self, cli) stub_stdouts(self) stub_stdin(self, 'n\n') stub_check_interactive(self, True) tmpdir = mkdtemp(self) os.chdir(tmpdir) # All the pre-made setup. app = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': { 'jquery': '~1.11.0' }, })), ), 'foo', '1.9.0') working_set = WorkingSet() working_set.add(app, self._calmjs_testing_tmpdir) stub_item_attr_value(self, dist, 'default_working_set', working_set) # We are going to have a fake package.json with open(join(tmpdir, 'package.json'), 'w') as fd: json.dump({}, fd) # capture the logging explicitly as the conditions which # determines how the errors are outputted differs from different # test harnesses. Verify that later. with pretty_logging(stream=StringIO()) as stderr: # This is faked. yarn.yarn_install('foo', callback=prompt_overwrite_json) self.assertIn( "Overwrite '%s'? (Yes/No) [No] " % join(tmpdir, 'package.json'), sys.stdout.getvalue()) # Ensure the error message. Normally this is printed through # stderr via distutils custom logger and our handler bridge for # that which is tested elsewhere. self.assertIn("not continuing with 'yarn install'", stderr.getvalue()) with open(join(tmpdir, 'package.json')) as fd: result = fd.read() # This should remain unchanged as no to overwrite is default. self.assertEqual(result, '{}')
def test_runtime_cli_bundle_method_force_empty(self): utils.stub_stdouts(self) current_dir, target_file = self.setup_runtime_main_env() os.chdir(current_dir) build_dir = utils.mkdtemp(self) widget_slim_js = join(current_dir, 'widget_slim.js') with self.assertRaises(SystemExit) as e: runtime.main([ 'rjs', 'widget', '--build-dir=' + build_dir, '--empty', '--source-map-method=all', '--bundle-map-method=none', '--export-target=' + widget_slim_js, ]) self.assertEqual(e.exception.args[0], 0) # ensure that the bundled files are not copied self.assertFalse(exists(join(build_dir, 'underscore.js'))) self.assertFalse(exists(join(build_dir, 'jquery.js'))) with open(join(build_dir, 'build.js')) as fd: # strip off the header and footer build_js = json.loads(''.join(fd.readlines()[1:-1])) with open(join(build_dir, 'config.js')) as fd: # strip off the header and footer config_js = json.loads(''.join(fd.readlines()[4:-10])) self.assertEqual(build_js['paths'], { # this is missing because no sources actually poke into it, # whereas the previous test it showed up as extras_calmjs # 'jquery': 'empty:', 'underscore': 'empty:', }) self.assertEqual(sorted(build_js['include']), [ 'framework/lib', 'widget/core', 'widget/datepicker', 'widget/richedit', ]) self.assertEqual(config_js['paths'], { 'framework/lib': 'framework/lib.js?', 'widget/core': 'widget/core.js?', 'widget/datepicker': 'widget/datepicker.js?', 'widget/richedit': 'widget/richedit.js?', # this is picked up by the source analysis when empty option # is appied 'underscore': 'empty:', }) self.assertEqual(config_js['include'], [])
def test_runtime_cli_bundle_method_force_empty(self): utils.stub_stdouts(self) current_dir, target_file = self.setup_runtime_main_env() os.chdir(current_dir) build_dir = utils.mkdtemp(self) widget_slim_js = join(current_dir, 'widget_slim.js') with self.assertRaises(SystemExit) as e: runtime.main([ 'rjs', 'widget', '--build-dir=' + build_dir, '--empty', '--sourcepath-method=all', '--bundlepath-method=none', '--export-target=' + widget_slim_js, ]) self.assertEqual(e.exception.args[0], 0) # ensure that the bundled files are not copied self.assertFalse(exists(join(build_dir, 'underscore.js'))) self.assertFalse(exists(join(build_dir, 'jquery.js'))) with open(join(build_dir, 'build.js')) as fd: # strip off the header and footer build_js = json.loads(''.join(fd.readlines()[1:-1])) with open(join(build_dir, 'config.js')) as fd: # strip off the header and footer config_js = json.loads(''.join(fd.readlines()[4:-10])) self.assertEqual(build_js['paths'], { # this is missing because no sources actually poke into it, # whereas the previous test it showed up as extras_calmjs # 'jquery': 'empty:', 'underscore': 'empty:', }) self.assertEqual(sorted(build_js['include']), [ 'framework/lib', 'widget/core', 'widget/datepicker', 'widget/richedit', ]) self.assertEqual(config_js['paths'], { 'framework/lib': 'framework/lib.js?', 'widget/core': 'widget/core.js?', 'widget/datepicker': 'widget/datepicker.js?', 'widget/richedit': 'widget/richedit.js?', # this is picked up by the source analysis when empty option # is appied 'underscore': 'empty:', }) self.assertEqual(config_js['include'], [])
def test_prompt_non_interactive_null(self): stub_stdouts(self) stub_check_interactive(self, False) result = self.do_prompt( 'How are you?', 'I am fine thank you.\n', choices=( ('a', 'A'), ('b', 'B'), ('c', 'C'), ), # explicit validator negates the choices validator=ui.null_validator, ) self.assertIs(result, None) self.assertEqual(self.stdout.getvalue(), 'How are you? Aborted.\n')
def test_prompt_non_interactive_choices(self): stub_stdouts(self) stub_check_interactive(self, False) result = self.do_prompt( 'What are you?', 'c', choices=( ('a', 'A'), ('b', 'B'), ('c', 'C'), ), default_key=0, ) self.assertEqual(result, 'A') self.assertEqual( self.stdout.getvalue(), 'What are you? (a/b/c) [a] a\n')