def setUp(self): # save working directory remember_cwd(self) # All the pre-made setup. stub_mod_call(self, cli) app = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': {'jquery': '~1.11.0'}, })), ), 'foo', '1.9.0') underscore = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': {'underscore': '~1.8.0'}, })), ), 'underscore', '1.8.0') named = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': {'jquery': '~3.0.0'}, 'name': 'named-js', })), ), 'named', '2.0.0') working_set = WorkingSet() working_set.add(app, self._calmjs_testing_tmpdir) working_set.add(underscore, self._calmjs_testing_tmpdir) working_set.add(named, self._calmjs_testing_tmpdir) stub_item_attr_value(self, dist, 'default_working_set', working_set) stub_check_interactive(self, True)
def setup_runtime(self): make_dummy_dist(self, (('bower.json', json.dumps({ 'name': 'site', 'dependencies': { 'jquery': '~3.1.0', }, })), ), 'example.package1', '1.0') make_dummy_dist(self, (('bower.json', json.dumps({ 'name': 'site', 'dependencies': { 'underscore': '~1.8.3', }, })), ), 'example.package2', '2.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) # Stub out the underlying data needed for the cli for the tests # to test against our custom data for reproducibility. stub_item_attr_value(self, dist, 'default_working_set', working_set) stub_mod_check_interactive(self, [cli], True) # Of course, apply a mock working set for the runtime instance # so it can use the bower runtime. working_set = mocks.WorkingSet({ 'calmjs.runtime': [ 'bower = calmjs.bower:bower.runtime', ], }) return runtime.Runtime(working_set=working_set)
def test_relocated_distribution(self): root = mkdtemp(self) dummyns_path = join(root, 'dummyns') make_dummy_dist(self, (( 'namespace_packages.txt', 'dummyns\n', ), ( 'entry_points.txt', '[dummyns]\n' 'dummyns = dummyns:attr\n', ),), 'dummyns', '1.0', working_dir=root) working_set = pkg_resources.WorkingSet([ root, self.ds_egg_root, ]) # activate this as the working set stub_item_attr_value(self, pkg_resources, 'working_set', working_set) dummyns_ep = next(working_set.iter_entry_points('dummyns')) with pretty_logging(stream=StringIO()) as fd: p = indexer.resource_filename_mod_entry_point( 'dummyns', dummyns_ep) # since the actual location is not created) self.assertIsNone(p) self.assertIn("does not exist", fd.getvalue()) # retry with the module directory created at the expected location os.mkdir(dummyns_path) with pretty_logging(stream=StringIO()) as fd: p = indexer.resource_filename_mod_entry_point( 'dummyns', dummyns_ep) self.assertEqual(normcase(p), normcase(dummyns_path)) self.assertEqual('', fd.getvalue())
def test_normcase_registration(self): # create an empty working set for a clean-slate test. cwd = utils.mkdtemp(self) mock_ws = WorkingSet([]) dist_ = Distribution(cwd, project_name='pkg', version='1.0') dist_.egg_info = cwd # just lazy registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) # case sensitive test; have to patch the normcase at artifact # module with the nt version from ntpath import normcase as nt_normcase utils.stub_item_attr_value(self, artifact, 'normcase', nt_normcase) # using named case for case sensitivity test. c1 = EntryPoint.parse('case.js = dummy_builder:builder1') c1.dist = dist_ c2 = EntryPoint.parse('Case.js = dummy_builder:builder2') c2.dist = dist_ # use the error one ct = join(cwd, 'calmjs_artifacts', 'Case.js') with pretty_logging(stream=mocks.StringIO()) as stream: registry.register_entry_point(c1) registry.register_entry_point(c2) log = stream.getvalue() self.assertIn( "entry point 'Case.js = dummy_builder:builder2' from package " "'pkg 1.0' resolves to the path '%s' which was already " "registered to entry point 'case.js = dummy_builder:builder1'; " "conflicting entry point registration will be ignored." % ct, log) self.assertIn( "the file mapping error is caused by this platform's case-" "insensitive filename", log)
def test_standard(self): d_egg_root = join(mkdtemp(self), 'dummyns') make_dummy_dist(self, (( 'namespace_packages.txt', 'dummyns\n', ), ( 'entry_points.txt', '[dummyns]\n' 'dummyns = dummyns:attr\n', ),), 'dummyns', '1.0', working_dir=d_egg_root) working_set = pkg_resources.WorkingSet([ d_egg_root, self.ds_egg_root, ]) # ensure the working_set is providing the distributions being # mocked here so that resource_filename will resolve correctly stub_item_attr_value(self, pkg_resources, 'working_set', working_set) moddir = join(d_egg_root, 'dummyns') os.makedirs(moddir) # make this also a proper thing with open(join(moddir, '__init__.py'), 'w') as fd: fd.write('') dummyns_ep = next(working_set.iter_entry_points('dummyns')) p = indexer.resource_filename_mod_entry_point('dummyns', dummyns_ep) # finally, this should work. self.assertEqual(normcase(p), normcase(moddir))
def test_map_registry_name_to_test(self): working_set = WorkingSet({}) root = base.BaseModuleRegistry( 'root.module', _working_set=working_set) child = ChildModuleRegistry( 'root.module.child', _parent=root, _working_set=working_set) grandchild = ChildModuleRegistry( 'root.module.child.child', _parent=child, _working_set=working_set) stub_item_attr_value(self, dist, 'get', { r.registry_name: r for r in [root, child, grandchild]}.get) # no assumptions are made about missing registries self.assertEqual([ 'missing.module.child.tests', ], list(dist.map_registry_name_to_test(['missing.module.child']))) # standard registry self.assertEqual([ 'root.module.tests', ], list(dist.map_registry_name_to_test(['root.module']))) # grandchild registry self.assertEqual([ 'root.module.tests.child.child', ], list(dist.map_registry_name_to_test(['root.module.child.child'])))
def test_denormalized_package_names(self): working_dir = utils.mkdtemp(self) utils.make_dummy_dist(self, (('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'full.js = calmjs_testbuild:full', ])), ), 'de_normal_name', '1.0', working_dir=working_dir) mock_ws = WorkingSet([working_dir]) # stub the default working set in calmjs.dist for the resolver # to work. utils.stub_item_attr_value(self, dist, 'default_working_set', mock_ws) # still specify the working set. registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) self.assertEqual( 1, len(list(registry.iter_records_for('de_normal_name')))) # also test internal consistency self.assertIn('de_normal_name', registry.compat_builders['full']) self.assertIn('de_normal_name', registry.packages) default = registry.get_artifact_filename('de_normal_name', 'full.js') normal = registry.get_artifact_filename(safe_name('de_normal_name'), 'full.js') self.assertEqual(default, normal)
def test_normcase_registration(self): # create an empty working set for a clean-slate test. cwd = utils.mkdtemp(self) mock_ws = WorkingSet([]) dist_ = Distribution(cwd, project_name='pkg', version='1.0') dist_.egg_info = cwd # just lazy registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) # case sensitive test; have to patch the normcase at artifact # module with the nt version from ntpath import normcase as nt_normcase utils.stub_item_attr_value(self, artifact, 'normcase', nt_normcase) # using named case for case sensitivity test. c1 = EntryPoint.parse('case.js = dummy_builder:builder1') c1.dist = dist_ c2 = EntryPoint.parse('Case.js = dummy_builder:builder2') c2.dist = dist_ # use the error one ct = join(cwd, 'calmjs_artifacts', 'Case.js') with pretty_logging(stream=mocks.StringIO()) as stream: registry.register_entry_point(c1) registry.register_entry_point(c2) log = stream.getvalue() self.assertIn( "entry point 'Case.js = dummy_builder:builder2' from package " "'pkg 1.0' resolves to the path '%s' which was already " "registered to entry point 'case.js = dummy_builder:builder1'; " "conflicting entry point registration will be ignored." % ct, log ) self.assertIn( "the file mapping error is caused by this platform's case-" "insensitive filename", log )
def test_karma_runtime_integration_ignore_error(self): stub_stdouts(self) target = join(mkdtemp(self), 'target') build_dir = mkdtemp(self) stub_item_attr_value( self, mocks, 'dummy', ToolchainRuntime(NullToolchain()), ) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'null = calmjs.testing.mocks:dummy\n' ),), 'example.package', '1.0') working_set = WorkingSet([self._calmjs_testing_tmpdir]) rt = KarmaRuntime(self.driver, working_set=working_set) result = rt([ '-I', 'null', '--export-target', target, '--build-dir', build_dir, ]) self.assertIn('karma_config_path', result) self.assertTrue(exists(result['karma_config_path'])) self.assertFalse(result.get('karma_abort_on_test_failure')) self.assertIn( "karma exited with return code 1; continuing as specified", sys.stderr.getvalue() ) # ensure coverage isn't run at all. coverage_report_dir = join(build_dir, 'coverage') self.assertFalse(exists(coverage_report_dir))
def setUp(self): # save working directory remember_cwd(self) # All the pre-made setup. stub_mod_call(self, cli) app = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': {'jquery': '~1.11.0'}, })), ), 'foo', '1.9.0') underscore = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': {'underscore': '~1.8.0'}, })), ), 'underscore', '1.8.0') named = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': {'jquery': '~3.0.0'}, 'name': 'named-js', })), ), 'named', '2.0.0') working_set = WorkingSet() working_set.add(app, self._calmjs_testing_tmpdir) working_set.add(underscore, self._calmjs_testing_tmpdir) working_set.add(named, self._calmjs_testing_tmpdir) stub_item_attr_value(self, dist, 'default_working_set', working_set) stub_mod_check_interactive(self, [cli], True) # also save this self.inst_interactive = npm.npm.cli_driver.interactive
def test_npm_binary_not_found_debugger(self): from calmjs import utils def fake_post_mortem(*a, **kw): sys.stdout.write('(Pdb) ') remember_cwd(self) tmpdir = mkdtemp(self) os.chdir(tmpdir) rt = self.setup_runtime() # stub_stdin(self, u'quit\n') stub_stdouts(self) # ensure the binary is not found. stub_mod_call(self, cli, fake_error(IOError)) stub_item_attr_value(self, utils, 'post_mortem', fake_post_mortem) rt(['-dd', 'foo', '--install', 'example.package2']) self.assertIn("ERROR", sys.stderr.getvalue()) self.assertIn("invocation of the 'npm' binary failed;", sys.stderr.getvalue()) self.assertIn("terminating due to exception", sys.stderr.getvalue()) self.assertIn("Traceback ", sys.stderr.getvalue()) self.assertIn("(Pdb)", sys.stdout.getvalue()) stub_stdouts(self) self.assertNotIn("(Pdb)", sys.stdout.getvalue()) rt(['foo', '--install', 'example.package2', '--debugger']) self.assertIn("(Pdb)", sys.stdout.getvalue())
def test_karma_runtime_run_toolchain_auto_test_registry(self): def cleanup(): root_registry.records.pop('calmjs.dev.module.tests', None) root_registry.records.pop(CALMJS_TOOLCHAIN_ADVICE, None) self.addCleanup(cleanup) stub_stdouts(self) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.toolchain.advice]\n' 'calmjs.dev.toolchain:KarmaToolchain' ' = calmjs.tests.test_toolchain:dummy\n' ),), 'example.package', '1.0') # in the main distribution we did not define this to avoid # potential contamination of test data by this package with the # rest of the framework, so we stub that function _called = [] def fake_flatten_module_registry_names(package_names): _called.extend(package_names) return ['calmjs.dev.module'] from calmjs.dev import toolchain stub_item_attr_value( self, toolchain, 'flatten_module_registry_names', fake_flatten_module_registry_names ) working_set = WorkingSet([self._calmjs_testing_tmpdir]) root_registry.records[ CALMJS_TOOLCHAIN_ADVICE] = AdviceRegistry( CALMJS_TOOLCHAIN_ADVICE, _working_set=working_set) # manipulate the registry to remove the fail test reg = root_registry.get('calmjs.dev.module.tests') reg.records['calmjs.dev.tests'].pop('calmjs/dev/tests/test_fail', '') # use the full blown runtime rt = KarmaRuntime(self.driver) # the artifact in our case is identical to the source file artifact = resource_filename('calmjs.dev', 'main.js') result = rt([ 'run', '--artifact', artifact, '--test-with-package', 'calmjs.dev', '--toolchain-package', 'example.package', ]) self.assertIn('calmjs.dev', _called) self.assertIn('karma_config_path', result) self.assertEqual(result['artifact_paths'], [artifact]) # the spec key is written. self.assertEqual(result['dummy'], ['dummy']) self.assertEqual( result['calmjs_module_registry_names'], ['calmjs.dev.module']) self.assertIn( 'calmjs/dev/tests/test_main', result['test_module_paths_map'])
def test_artifact_test_simulation(self): # don't actually run karma, since we are not setting up the full # integration environment for this isolated test - also keep the # spec reference here and have the helper return it so the # simplified verification can be done. spec = Spec(karma_advice_group=None) def generic_tester(package_names, export_target): spec['export_target'] = export_target return KarmaToolchain(), spec, tester_mod = ModuleType('calmjs_dev_tester') tester_mod.generic = generic_tester self.addCleanup(sys.modules.pop, 'calmjs_dev_tester') sys.modules['calmjs_dev_tester'] = tester_mod working_dir = utils.mkdtemp(self) utils.make_dummy_dist(self, ( ('entry_points.txt', '\n'.join([ '[calmjs.artifacts.tests]', 'artifact.js = calmjs_dev_tester:generic', ])), ), 'app', '1.0', working_dir=working_dir) mock_ws = WorkingSet([working_dir]) utils.stub_item_attr_value(self, dist, 'default_working_set', mock_ws) registry = ArtifactTestRegistry( 'calmjs.artifacts.tests', _working_set=mock_ws) artifact_name = registry.get_artifact_filename('app', 'artifact.js') with self.assertRaises(ToolchainCancel) as e: # file not exist yet will cancel the execution registry.prepare_export_location(artifact_name) self.assertIn("missing export_target '", str(e.exception)) self.assertIn("artifact.js'", str(e.exception)) mkdir(dirname(artifact_name)) with open(artifact_name, 'w') as fd: fd.write('console.log("test artifact");\n') # no longer raise an exception registry.prepare_export_location(artifact_name) self.assertNotIn('before_prepare', spec._advices) registry.process_package('app') # cheat a bit by probing some private bits to see that the # relevant advice is planted but not executed self.assertEqual(1, len(spec._advices['before_prepare'])) # for whatever reason, instance methods are not identities of # itself thus `is` cannot be used as the validation operator. self.assertEqual( spec._advices['before_prepare'][0][0], registry.prepare_export_location, )
def setUp(self): self.build_dir = utils.mkdtemp(self) # mock the webpack executable with open(join(self.build_dir, 'webpack'), 'w'): pass # also stub the version finding. utils.stub_item_attr_value(self, toolchain, 'get_bin_version', lambda p, kw: (1, 0, 0))
def test_calmjs_main_console_version_broken(self): stub_stdouts(self) stub_item_attr_value(self, runtime, 'default_working_set', pkg_resources.WorkingSet([mkdtemp(self)])) # make sure the bad case doesn't just blow up... with self.assertRaises(SystemExit) as e: runtime.main(['-V']) self.assertEqual(e.exception.args[0], 0) self.assertIn('? ? from ?', sys.stdout.getvalue())
def setup_fake_webpack(self): # create the required mocks and stubs so that toolchain finds # a webpack version stub_item_attr_value(self, dev, 'get_bin_version', lambda p, kw: (1, 0, 0)) webpack = join(mkdtemp(self), 'webpack') with open(webpack, 'w'): pass return webpack
def test_stub_item_attr_value(self): marker = object() class Dummy(object): foo = marker utils.stub_item_attr_value(self, Dummy, 'foo', None) self.assertIsNone(Dummy.foo) self.doCleanups() self.assertIs(Dummy.foo, marker)
def test_stub_item_attr_value(self): marker = object() class Dummy(object): foo = marker utils.stub_item_attr_value(self, Dummy, 'foo', None) self.assertIsNone(Dummy.foo) self.doCleanups() self.assertIs(Dummy.foo, marker)
def test_norm_args(self): stub_item_attr_value(self, sys, 'argv', ['script']) self.assertEqual(runtime.norm_args(None), []) self.assertEqual(runtime.norm_args([]), []) self.assertEqual(runtime.norm_args(['arg']), ['arg']) stub_item_attr_value(self, sys, 'argv', ['script', '-h']) self.assertEqual(runtime.norm_args(None), ['-h']) self.assertEqual(runtime.norm_args([]), []) self.assertEqual(runtime.norm_args(['arg']), ['arg'])
def test_rmtree_win32(self): utils.stub_item_attr_value(self, sys, 'platform', 'win32') removed = [] def fake_rmtree(path): removed.append(path) raise IOError('fake') utils.stub_item_attr_value(self, utils, 'rmtree_', fake_rmtree) with warnings.catch_warnings(record=True): warnings.simplefilter('always') utils.rmtree('C:\\Windows') self.assertEqual(removed, ['C:\\Windows', '\\\\?\\C:\\Windows'])
def setup_requirements_json(self): # what kind of bizzaro world do the following users live in? requirements = {"require": {"setuptools": "25.1.6"}} mock_provider = MockProvider({ 'requirements.json': json.dumps(requirements), }) # seriously lolwat? mock_dist = pkg_resources.Distribution( metadata=mock_provider, project_name='calmpy.pip', version='0.0.0') working_set = pkg_resources.WorkingSet() working_set.add(mock_dist) stub_item_attr_value(self, dist, 'default_working_set', working_set) return working_set
def test_yarn_install_package_json_no_overwrite_interactive(self): """ Most of these package_json testing will be done in the next test class specific for ``yarn init``. """ # Testing the implied init call stub_mod_call(self, cli) stub_stdouts(self) stub_stdin(self, 'n\n') stub_check_interactive(self, True) tmpdir = mkdtemp(self) os.chdir(tmpdir) # All the pre-made setup. app = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': { 'jquery': '~1.11.0' }, })), ), 'foo', '1.9.0') working_set = WorkingSet() working_set.add(app, self._calmjs_testing_tmpdir) stub_item_attr_value(self, dist, 'default_working_set', working_set) # We are going to have a fake package.json with open(join(tmpdir, 'package.json'), 'w') as fd: json.dump({}, fd) # capture the logging explicitly as the conditions which # determines how the errors are outputted differs from different # test harnesses. Verify that later. with pretty_logging(stream=StringIO()) as stderr: # This is faked. yarn.yarn_install('foo', callback=prompt_overwrite_json) self.assertIn( "Overwrite '%s'? (Yes/No) [No] " % join(tmpdir, 'package.json'), sys.stdout.getvalue()) # Ensure the error message. Normally this is printed through # stderr via distutils custom logger and our handler bridge for # that which is tested elsewhere. self.assertIn("not continuing with 'yarn install'", stderr.getvalue()) with open(join(tmpdir, 'package.json')) as fd: result = fd.read() # This should remain unchanged as no to overwrite is default. self.assertEqual(result, '{}')
def test_karma_runtime_integration_coverage(self): class DummyToolchain(NullToolchain): """ Need this step to prepare some actual sources from this project, and we are cheating a bit due to the lack of actual registry setup. """ def prepare(self, spec): # manually set up the source and the tests. main = resource_filename( 'calmjs.dev', 'main.js') test_main = resource_filename( 'calmjs.dev.tests', 'test_main.js') spec.update(dict( transpile_sourcepath={ 'calmjs/dev/main': main, }, test_module_paths_map={ 'calmjs/test_main': test_main, }, )) stub_stdouts(self) target = join(mkdtemp(self), 'target') build_dir = mkdtemp(self) coverage_report_dir = join(build_dir, 'coverage') # ensure this does not already exist self.assertFalse(exists(coverage_report_dir)) stub_item_attr_value( self, mocks, 'dummy', ToolchainRuntime(DummyToolchain()), ) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'null = calmjs.testing.mocks:dummy\n' ),), 'example.package', '1.0') working_set = WorkingSet([self._calmjs_testing_tmpdir]) rt = KarmaRuntime(self.driver, working_set=working_set) result = rt([ '--coverage', '--cover-report-dir', coverage_report_dir, 'null', '--export-target', target, '--build-dir', build_dir, ]) # ensure coverage report created self.assertTrue(result['coverage_enable']) self.assertTrue(exists(coverage_report_dir))
def setup_requirements_json(self): # what kind of bizzaro world do the following users live in? requirements = {"require": {"setuptools": "25.1.6"}} mock_provider = MockProvider({ 'requirements.json': json.dumps(requirements), }) # seriously lolwat? mock_dist = pkg_resources.Distribution(metadata=mock_provider, project_name='calmpy.pip', version='0.0.0') working_set = pkg_resources.WorkingSet() working_set.add(mock_dist) stub_item_attr_value(self, dist, 'default_working_set', working_set) return working_set
def test_yarn_install_package_json_no_overwrite_interactive(self): """ Most of these package_json testing will be done in the next test class specific for ``yarn init``. """ # Testing the implied init call stub_mod_call(self, cli) stub_stdouts(self) stub_stdin(self, 'n\n') stub_check_interactive(self, True) tmpdir = mkdtemp(self) os.chdir(tmpdir) # All the pre-made setup. app = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': {'jquery': '~1.11.0'}, })), ), 'foo', '1.9.0') working_set = WorkingSet() working_set.add(app, self._calmjs_testing_tmpdir) stub_item_attr_value(self, dist, 'default_working_set', working_set) # We are going to have a fake package.json with open(join(tmpdir, 'package.json'), 'w') as fd: json.dump({}, fd) # capture the logging explicitly as the conditions which # determines how the errors are outputted differs from different # test harnesses. Verify that later. with pretty_logging(stream=StringIO()) as stderr: # This is faked. yarn.yarn_install('foo', callback=prompt_overwrite_json) self.assertIn( "Overwrite '%s'? (Yes/No) [No] " % join(tmpdir, 'package.json'), sys.stdout.getvalue()) # Ensure the error message. Normally this is printed through # stderr via distutils custom logger and our handler bridge for # that which is tested elsewhere. self.assertIn("not continuing with 'yarn install'", stderr.getvalue()) with open(join(tmpdir, 'package.json')) as fd: result = fd.read() # This should remain unchanged as no to overwrite is default. self.assertEqual(result, '{}')
def setup_runtime(self): make_dummy_dist(self, (('package.json', json.dumps({ 'name': 'site', 'dependencies': { 'jquery': '~3.1.0', }, })), ), 'example.package1', '1.0') make_dummy_dist(self, (('package.json', json.dumps({ 'name': 'site', 'dependencies': { 'underscore': '~1.8.3', }, })), ), 'example.package2', '2.0') make_dummy_dist(self, ( ('requires.txt', '\n'.join([ 'example.package1', 'example.package2', ])), ('package.json', json.dumps({ 'dependencies': { 'backbone': '~1.3.2', }, })), ), 'example.package3', '2.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) # Stub out the underlying data needed for the cli for the tests # to test against our custom data for reproducibility. stub_item_attr_value(self, dist, 'default_working_set', working_set) stub_mod_check_interactive(self, [cli], True) # Of course, apply a mock working set for the runtime instance # so it can use the npm runtime, however we will use a different # keyword. Note that the runtime is invoked using foo. working_set = mocks.WorkingSet({ 'calmjs.runtime': [ 'foo = calmjs.npm:npm.runtime', ], }) return runtime.Runtime(working_set=working_set)
def test_rmtree_test(self): path = mkdtemp(self) utils.rmtree(path) self.assertFalse(exists(path)) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') utils.rmtree(path) self.assertFalse(w) utils.stub_item_attr_value( self, utils, 'rmtree_', utils.fake_error(IOError)) path2 = mkdtemp(self) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') utils.rmtree(path2) self.assertIn("rmtree failed to remove", str(w[-1].message))
def test_auto_self_reference(self): # ensure that the identity is returned working_set = mocks.WorkingSet({ 'calmjs.registry': [ # correct self-referential definition 'calmjs.registry = calmjs.registry:Registry', 'calmjsregistry = calmjs.registry:Registry', ], 'calmjsregistry': [ # unrelated self-referential definition 'calmjs.registry = calmjs.registry:Registry', # incorrect self-referential type 'calmjsregistry = calmjs.module:ModuleRegistry', ], }) # stub out real working sets because usage of standard APIs stub_item_attr_value(self, calmjs.registry, 'working_set', working_set) stub_item_attr_value(self, calmjs.base, 'working_set', working_set) with pretty_logging(stream=mocks.StringIO()) as stream: registry = calmjs.registry.Registry('calmjs.registry') self.assertFalse(registry.records) mismatched = registry.get('calmjsregistry') # not the same name self.assertTrue(isinstance(mismatched, calmjs.registry.Registry)) self.assertIsNot(mismatched, registry) # correct identity self.assertIs(registry, registry.get('calmjs.registry')) self.assertIn('calmjs.registry', registry.records) # unrelated registry also unrelated = mismatched.get('calmjs.registry') self.assertTrue(isinstance(unrelated, calmjs.registry.Registry)) self.assertIsNot(unrelated, registry) mistyped = mismatched.get('calmjsregistry') # not a None self.assertTrue(mistyped) # also not identity, as they are not the same type. self.assertIsNot(mistyped, mismatched) self.assertIn( "registry 'calmjs.registry' has entry point 'calmjs.registry = " "calmjs.registry:Registry' which is the identity registration", stream.getvalue(), )
def test_auto_self_reference(self): # ensure that the identity is returned working_set = mocks.WorkingSet({ 'calmjs.registry': [ # correct self-referential definition 'calmjs.registry = calmjs.registry:Registry', 'calmjsregistry = calmjs.registry:Registry', ], 'calmjsregistry': [ # unrelated self-referential definition 'calmjs.registry = calmjs.registry:Registry', # incorrect self-referential type 'calmjsregistry = calmjs.module:ModuleRegistry', ], }) # stub out real working sets because usage of standard APIs stub_item_attr_value(self, calmjs.registry, 'working_set', working_set) stub_item_attr_value(self, calmjs.base, 'working_set', working_set) with pretty_logging(stream=mocks.StringIO()) as stream: registry = calmjs.registry.Registry('calmjs.registry') self.assertFalse(registry.records) mismatched = registry.get('calmjsregistry') # not the same name self.assertTrue(isinstance(mismatched, calmjs.registry.Registry)) self.assertIsNot(mismatched, registry) # correct identity self.assertIs(registry, registry.get('calmjs.registry')) self.assertIn('calmjs.registry', registry.records) # unrelated registry also unrelated = mismatched.get('calmjs.registry') self.assertTrue(isinstance(unrelated, calmjs.registry.Registry)) self.assertIsNot(unrelated, registry) mistyped = mismatched.get('calmjsregistry') # not a None self.assertTrue(mistyped) # also not identity, as they are not the same type. self.assertIsNot(mistyped, mismatched) self.assertIn( "registry 'calmjs.registry' has entry point 'calmjs.registry = " "calmjs.registry:Registry' which is the identity registration", stream.getvalue(), )
def test_yarn_install_package_json_overwrite_interactive(self): # Testing the implied init call stub_mod_call(self, cli) stub_stdin(self, 'y\n') stub_stdouts(self) tmpdir = mkdtemp(self) os.chdir(tmpdir) # All the pre-made setup. app = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': { 'jquery': '~1.11.0' }, })), ), 'foo', '1.9.0') working_set = WorkingSet() working_set.add(app, self._calmjs_testing_tmpdir) stub_item_attr_value(self, dist, 'default_working_set', working_set) # We are going to have a fake package.json with open(join(tmpdir, 'package.json'), 'w') as fd: json.dump({}, fd) # This is faked. yarn.yarn_install('foo', overwrite=True) with open(join(tmpdir, 'package.json')) as fd: config = json.load(fd) # Overwritten self.assertEqual( config, { 'dependencies': { 'jquery': '~1.11.0' }, 'devDependencies': {}, 'name': 'foo', }) # No log level set. self.assertEqual(sys.stdout.getvalue(), '') self.assertEqual(sys.stderr.getvalue(), '')
def test_missing_runtime_arg(self): stub_stdouts(self) stub_item_attr_value( self, mocks, 'dummy', ToolchainRuntime(NullToolchain()), ) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'null = calmjs.testing.mocks:dummy\n' ),), 'example.package', '1.0') working_set = WorkingSet([self._calmjs_testing_tmpdir]) rt = KarmaRuntime(self.driver, working_set=working_set) rt([]) # standard help printed self.assertIn('usage:', sys.stdout.getvalue()) self.assertIn( 'karma testrunner integration for calmjs', sys.stdout.getvalue())
def test_karma_runtime_integration_default_abort_on_error(self): stub_stdouts(self) target = join(mkdtemp(self), 'target') build_dir = mkdtemp(self) stub_item_attr_value( self, mocks, 'dummy', ToolchainRuntime(NullToolchain()), ) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'null = calmjs.testing.mocks:dummy\n' ),), 'example.package', '1.0') working_set = WorkingSet([self._calmjs_testing_tmpdir]) rt = KarmaRuntime(self.driver, working_set=working_set) result = rt( ['null', '--export-target', target, '--build-dir', build_dir]) self.assertFalse(result)
def test_init_argparser_with_valid_toolchains(self): stub_item_attr_value( self, mocks, 'dummy', ToolchainRuntime(NullToolchain()), ) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'null = calmjs.testing.mocks:dummy\n' ),), 'example.package', '1.0') working_set = WorkingSet([self._calmjs_testing_tmpdir]) runtime = KarmaRuntime(self.driver, working_set=working_set) argparser = runtime.argparser stream = mocks.StringIO() argparser.print_help(file=stream) self.assertIn('--test-registry', stream.getvalue()) self.assertIn('null', stream.getvalue())
def setUp(self): remember_cwd(self) app = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': {'jquery': '~1.11.0'}, })), ), 'foo', '1.9.0') working_set = WorkingSet() working_set.add(app, self._calmjs_testing_tmpdir) # Stub out the flatten_egginfo_json calls with one that uses our # custom working_set here. stub_item_attr_value(self, dist, 'default_working_set', working_set) # Quiet stdout from distutils logs stub_stdouts(self) # Force auto-detected interactive mode to True, because this is # typically executed within an interactive context. stub_check_interactive(self, True)
def setUp(self): remember_cwd(self) app = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': {'jquery': '~1.11.0'}, })), ), 'foo', '1.9.0') working_set = WorkingSet() working_set.add(app, self._calmjs_testing_tmpdir) # Stub out the flatten_egginfo_json calls with one that uses our # custom working_set here. stub_item_attr_value(self, dist, 'default_working_set', working_set) # Quiet stdout from distutils logs stub_stdouts(self) # Force auto-detected interactive mode to True, because this is # typically executed within an interactive context. stub_mod_check_interactive(self, [cli], True)
def test_write_config_override(self): nodejs_version = (6, 0, 0) def config_writer(karma_config, fd): karma_config['foo'] = 'bar' fd.write(json.dumps(karma_config)) stub_item_attr_value( self, cli, 'get_node_version', lambda: nodejs_version) build_dir = mkdtemp(self) spec = Spec( build_dir=build_dir, karma_config={}, karma_config_writer=config_writer, ) driver = cli.KarmaDriver() with pretty_logging( logger='calmjs.dev', stream=mocks.StringIO()) as log: driver.write_config(spec) karma_conf_js = join(build_dir, 'karma.conf.js') self.assertIn("' with writer", log.getvalue()) self.assertIn(karma_conf_js, log.getvalue()) self.assertNotIn('WARNING', log.getvalue()) # naturally, this is NOT a valid karma.conf.js since what was # written is just an ordinary JSON file. with open(karma_conf_js) as fd: self.assertEqual({'files': [], 'foo': 'bar'}, json.load(fd)) # try writing again using a "lower" less supported nodejs version nodejs_version = (4, 9, 1) with pretty_logging( logger='calmjs.dev', stream=mocks.StringIO()) as log: driver.write_config(spec) self.assertIn('WARNING', log.getvalue()) self.assertIn( "an 'Invalid config file' or 'Error: cannot find module", log.getvalue() )
def test_karma_runtime_integration_explicit_arguments(self): stub_stdouts(self) target = join(mkdtemp(self), 'target') build_dir = mkdtemp(self) stub_item_attr_value( self, mocks, 'dummy', ToolchainRuntime(NullToolchain()), ) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'null = calmjs.testing.mocks:dummy\n' ),), 'example.package', '1.0') working_set = WorkingSet([self._calmjs_testing_tmpdir]) rt = KarmaRuntime(self.driver, working_set=working_set) result = rt([ '--test-registry', 'calmjs.no_such_registry', '--test-with-package', 'no_such_pkg', '-vv', '-I', 'null', '--export-target', target, '--build-dir', build_dir, ]) self.assertIn('karma_config_path', result) self.assertTrue(exists(result['karma_config_path'])) self.assertFalse(result.get('karma_abort_on_test_failure')) self.assertIn( "karma exited with return code 1; continuing as specified", sys.stderr.getvalue() ) self.assertIn( "spec has 'test_package_names' explicitly specified", sys.stderr.getvalue() ) self.assertIn( "spec has 'calmjs_test_registry_names' explicitly specified", sys.stderr.getvalue() ) self.assertIn( "karma driver to extract tests from packages ['no_such_pkg'] " "using registries ['calmjs.no_such_registry'] for testing", sys.stderr.getvalue() )
def test_nested_namespace(self): self.called = None def _exists(p): self.called = p return exists(p) working_set = pkg_resources.WorkingSet([ self.ds_egg_root, ]) stub_item_attr_value(self, pkg_resources, 'working_set', working_set) stub_item_attr_value(self, indexer, 'exists', _exists) dummyns_ep = next(working_set.iter_entry_points('dummyns.submod')) p = indexer.resource_filename_mod_entry_point( 'dummyns.submod', dummyns_ep) self.assertEqual(p, self.called) with open(join(p, 'data.txt')) as fd: data = fd.read() self.assertEqual(data, self.nested_data)
def test_relocated_distribution(self): root = mkdtemp(self) dummyns_path = join(root, 'dummyns') make_dummy_dist(self, ( ( 'namespace_packages.txt', 'dummyns\n', ), ( 'entry_points.txt', '[dummyns]\n' 'dummyns = dummyns:attr\n', ), ), 'dummyns', '1.0', working_dir=root) working_set = pkg_resources.WorkingSet([ root, self.ds_egg_root, ]) # activate this as the working set stub_item_attr_value(self, pkg_resources, 'working_set', working_set) dummyns_ep = next(working_set.iter_entry_points('dummyns')) with pretty_logging(stream=StringIO()) as fd: p = indexer.resource_filename_mod_entry_point( 'dummyns', dummyns_ep) # since the actual location is not created) self.assertIsNone(p) self.assertIn("does not exist", fd.getvalue()) # retry with the module directory created at the expected location os.mkdir(dummyns_path) with pretty_logging(stream=StringIO()) as fd: p = indexer.resource_filename_mod_entry_point( 'dummyns', dummyns_ep) self.assertEqual(normcase(p), normcase(dummyns_path)) self.assertEqual('', fd.getvalue())
def test_yarn_install_package_json_overwrite_interactive(self): # Testing the implied init call stub_mod_call(self, cli) stub_stdin(self, 'y\n') stub_stdouts(self) tmpdir = mkdtemp(self) os.chdir(tmpdir) # All the pre-made setup. app = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': {'jquery': '~1.11.0'}, })), ), 'foo', '1.9.0') working_set = WorkingSet() working_set.add(app, self._calmjs_testing_tmpdir) stub_item_attr_value(self, dist, 'default_working_set', working_set) # We are going to have a fake package.json with open(join(tmpdir, 'package.json'), 'w') as fd: json.dump({}, fd) # This is faked. yarn.yarn_install('foo', overwrite=True) with open(join(tmpdir, 'package.json')) as fd: config = json.load(fd) # Overwritten self.assertEqual(config, { 'dependencies': {'jquery': '~1.11.0'}, 'devDependencies': {}, 'name': 'foo', }) # No log level set. self.assertEqual(sys.stdout.getvalue(), '') self.assertEqual(sys.stderr.getvalue(), '')
def test_nested_namespace(self): self.called = None def _exists(p): self.called = p return exists(p) working_set = pkg_resources.WorkingSet([ self.ds_egg_root, ]) stub_item_attr_value(self, pkg_resources, 'working_set', working_set) stub_item_attr_value(self, indexer, 'exists', _exists) dummyns_ep = next(working_set.iter_entry_points('dummyns.submod')) p = indexer.resource_filename_mod_entry_point('dummyns.submod', dummyns_ep) self.assertEqual(p, self.called) with open(join(p, 'data.txt')) as fd: data = fd.read() self.assertEqual(data, self.nested_data)
def test_no_calmjs_dev(self): __import__ = builtins.__import__ def import_(name, *a, **kw): if name == 'calmjs.dev': raise ImportError("No module named 'calmjs.dev'") return __import__(name, *a, **kw) stub_item_attr_value(self, builtins, '__import__', import_) spec = Spec() # just to cover the fake import above from calmjs.toolchain import Spec as Spec_ self.assertIs(Spec, Spec_) with pretty_logging(stream=StringIO()) as s: karma_webpack(spec) self.assertNotIn('karma_config', spec) self.assertIn( "package 'calmjs.dev' not available; cannot apply webpack", s.getvalue(), )
def test_no_calmjs_dev(self): __import__ = builtins.__import__ def import_(name, *a, **kw): if name == 'calmjs.dev': raise ImportError("No module named 'calmjs.dev'") return __import__(name, *a, **kw) stub_item_attr_value(self, builtins, '__import__', import_) spec = Spec() # just to cover the fake import above from calmjs.toolchain import Spec as Spec_ self.assertIs(Spec, Spec_) with pretty_logging(stream=StringIO()) as s: karma_requirejs(spec) self.assertNotIn('karma_config', spec) self.assertIn( "package 'calmjs.dev' not available; cannot apply requirejs", s.getvalue(), )
def test_standard(self): d_egg_root = join(mkdtemp(self), 'dummyns') make_dummy_dist(self, ( ( 'namespace_packages.txt', 'dummyns\n', ), ( 'entry_points.txt', '[dummyns]\n' 'dummyns = dummyns:attr\n', ), ), 'dummyns', '1.0', working_dir=d_egg_root) working_set = pkg_resources.WorkingSet([ d_egg_root, self.ds_egg_root, ]) # ensure the working_set is providing the distributions being # mocked here so that resource_filename will resolve correctly stub_item_attr_value(self, pkg_resources, 'working_set', working_set) moddir = join(d_egg_root, 'dummyns') os.makedirs(moddir) # make this also a proper thing with open(join(moddir, '__init__.py'), 'w') as fd: fd.write('') dummyns_ep = next(working_set.iter_entry_points('dummyns')) p = indexer.resource_filename_mod_entry_point('dummyns', dummyns_ep) # finally, this should work. self.assertEqual(normcase(p), normcase(moddir))
def test_denormalized_package_names(self): working_dir = utils.mkdtemp(self) utils.make_dummy_dist(self, ( ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'full.js = calmjs_testbuild:full', ])), ), 'de_normal_name', '1.0', working_dir=working_dir) mock_ws = WorkingSet([working_dir]) # stub the default working set in calmjs.dist for the resolver # to work. utils.stub_item_attr_value(self, dist, 'default_working_set', mock_ws) # still specify the working set. registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) self.assertEqual( 1, len(list(registry.iter_records_for('de_normal_name')))) # also test internal consistency self.assertIn('de_normal_name', registry.compat_builders['full']) self.assertIn('de_normal_name', registry.packages) default = registry.get_artifact_filename('de_normal_name', 'full.js') normal = registry.get_artifact_filename( safe_name('de_normal_name'), 'full.js') self.assertEqual(default, normal)
def test_build_artifacts_success(self): # inject dummy module and add cleanup mod = ModuleType('calmjs_testing_dummy') mod.extra = generic_builder mod.complete = generic_builder mod.partial = generic_builder self.addCleanup(sys.modules.pop, 'calmjs_testing_dummy') sys.modules['calmjs_testing_dummy'] = mod working_dir = utils.mkdtemp(self) utils.make_dummy_dist(self, ( ('requires.txt', '\n'.join([ 'calmjs', ])), ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'artifact.js = calmjs_testing_dummy:complete', 'partial.js = calmjs_testing_dummy:partial', ])), ), 'app', '1.0', working_dir=working_dir) # mock a version of calmjs within that environment too utils.make_dummy_dist(self, ( ('entry_points.txt', ''), ), 'calmjs', '1.0', working_dir=working_dir) def version(bin_path, version_flag='-v', kw={}): return '0.0.0' mock_ws = WorkingSet([working_dir]) utils.stub_item_attr_value(self, dist, 'default_working_set', mock_ws) utils.stub_item_attr_value( self, artifact, 'get_bin_version_str', version) registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) # quick check of the artifact metadata beforehand self.assertEqual({}, registry.get_artifact_metadata('app')) registry.process_package('app') complete = list(registry.resolve_artifacts_by_builder_compat( ['app'], 'complete')) partial = list(registry.resolve_artifacts_by_builder_compat( ['app'], 'partial')) self.assertEqual(len(complete), 1) self.assertEqual(len(partial), 1) self.assertEqual(basename(complete[0]), 'artifact.js') self.assertEqual(basename(partial[0]), 'partial.js') with open(complete[0]) as fd: self.assertEqual(fd.read(), 'app') with open(partial[0]) as fd: self.assertEqual(fd.read(), 'app') self.assertEqual({ 'calmjs_artifacts': { 'artifact.js': { 'builder': 'calmjs_testing_dummy:complete', 'toolchain_bases': [ {'calmjs.testing.artifact:ArtifactToolchain': { 'project_name': 'calmjs', 'version': '1.0', }}, {'calmjs.toolchain:NullToolchain': { 'project_name': 'calmjs', 'version': '1.0', }}, {'calmjs.toolchain:Toolchain': { 'project_name': 'calmjs', 'version': '1.0', }} ], 'toolchain_bin': ['artifact', '0.0.0'], }, 'partial.js': { 'builder': 'calmjs_testing_dummy:partial', 'toolchain_bases': [ {'calmjs.testing.artifact:ArtifactToolchain': { 'project_name': 'calmjs', 'version': '1.0'}}, {'calmjs.toolchain:NullToolchain': { 'project_name': 'calmjs', 'version': '1.0', }}, {'calmjs.toolchain:Toolchain': { 'project_name': 'calmjs', 'version': '1.0', }} ], 'toolchain_bin': ['artifact', '0.0.0'], } }, 'versions': [ 'app 1.0', 'calmjs 1.0', ] }, registry.get_artifact_metadata('app')) # test that the 'calmjs_artifacts' listing only grows - the only # way to clean this is to remove and rebuild egg-info directly. utils.make_dummy_dist(self, ( ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'extra.js = calmjs_testing_dummy:extra', ])), ), 'app', '1.0', working_dir=working_dir) mock_ws = WorkingSet([working_dir]) utils.stub_item_attr_value(self, dist, 'default_working_set', mock_ws) utils.stub_item_attr_value( self, artifact, 'get_bin_version_str', version) registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) registry.process_package('app') self.assertEqual(3, len(registry.get_artifact_metadata('app')[ 'calmjs_artifacts'])) self.assertIn('extra.js', registry.get_artifact_metadata('app')[ 'calmjs_artifacts']) # try again using the artifact builder from calmjs.registry import _inst _inst.records.pop('calmjs.artifacts', None) self.addCleanup(_inst.records.pop, 'calmjs.artifacts') _inst.records['calmjs.artifacts'] = registry builder = ArtifactBuilder('calmjs.artifacts') self.assertTrue(builder(['app']))
def setUp(self): # bad dummy builder def bad_builder(): "Wrong function signature" # produces wrong output def malformed_builder(package_names, export_target): "does not produce an artifact" return NullToolchain() def blank_spec(package_names, export_target): "does not produce an artifact" return NullToolchain(), Spec() # nothing dummy builder def nothing_builder(package_names, export_target): "does not produce an artifact" return NullToolchain(), Spec(export_target=export_target) # inject dummy module and add cleanup mod = ModuleType('calmjs_testing_dummy') mod.bad_builder = bad_builder mod.nothing_builder = nothing_builder mod.malformed_builder = malformed_builder mod.blank_spec = blank_spec self.addCleanup(sys.modules.pop, 'calmjs_testing_dummy') sys.modules['calmjs_testing_dummy'] = mod working_dir = utils.mkdtemp(self) utils.make_dummy_dist(self, (('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'not_exist.js = calmjs_testing_dummy:not_exist', 'bad.js = calmjs_testing_dummy:bad_builder', 'nothing.js = calmjs_testing_dummy:nothing_builder', ])), ), 'app', '1.0', working_dir=working_dir) utils.make_dummy_dist(self, (('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'bad.js = calmjs_testing_dummy:bad_builder', 'nothing.js = calmjs_testing_dummy:nothing_builder', ])), ), 'bad', '1.0', working_dir=working_dir) utils.make_dummy_dist(self, (('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'malformed.js = calmjs_testing_dummy:malformed_builder', ])), ), 'malformed', '1.0', working_dir=working_dir) utils.make_dummy_dist(self, (('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'blank.js = calmjs_testing_dummy:blank_spec', ])), ), 'blank', '1.0', working_dir=working_dir) utils.make_dummy_dist(self, (('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'nothing.js = calmjs_testing_dummy:nothing_builder', ])), ), 'nothing', '1.0', working_dir=working_dir) mock_ws = WorkingSet([working_dir]) utils.stub_item_attr_value(self, dist, 'default_working_set', mock_ws) self.registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws)
def test_build_artifacts_success(self): # inject dummy module and add cleanup mod = ModuleType('calmjs_testing_dummy') mod.extra = generic_builder mod.complete = generic_builder mod.partial = generic_builder self.addCleanup(sys.modules.pop, 'calmjs_testing_dummy') sys.modules['calmjs_testing_dummy'] = mod working_dir = utils.mkdtemp(self) utils.make_dummy_dist(self, ( ('requires.txt', '\n'.join([ 'calmjs', ])), ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'artifact.js = calmjs_testing_dummy:complete', 'partial.js = calmjs_testing_dummy:partial', ])), ), 'app', '1.0', working_dir=working_dir) # mock a version of calmjs within that environment too utils.make_dummy_dist(self, (('entry_points.txt', ''), ), 'calmjs', '1.0', working_dir=working_dir) def version(bin_path, version_flag='-v', kw={}): return '0.0.0' mock_ws = WorkingSet([working_dir]) utils.stub_item_attr_value(self, dist, 'default_working_set', mock_ws) utils.stub_item_attr_value(self, artifact, 'get_bin_version_str', version) registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) # quick check of the artifact metadata beforehand self.assertEqual({}, registry.get_artifact_metadata('app')) registry.process_package('app') complete = list( registry.resolve_artifacts_by_builder_compat(['app'], 'complete')) partial = list( registry.resolve_artifacts_by_builder_compat(['app'], 'partial')) self.assertEqual(len(complete), 1) self.assertEqual(len(partial), 1) self.assertEqual(basename(complete[0]), 'artifact.js') self.assertEqual(basename(partial[0]), 'partial.js') with open(complete[0]) as fd: self.assertEqual(fd.read(), 'app') with open(partial[0]) as fd: self.assertEqual(fd.read(), 'app') self.assertEqual( { 'calmjs_artifacts': { 'artifact.js': { 'builder': 'calmjs_testing_dummy:complete', 'toolchain_bases': [{ 'calmjs.testing.artifact:ArtifactToolchain': { 'project_name': 'calmjs', 'version': '1.0', } }, { 'calmjs.toolchain:NullToolchain': { 'project_name': 'calmjs', 'version': '1.0', } }, { 'calmjs.toolchain:Toolchain': { 'project_name': 'calmjs', 'version': '1.0', } }], 'toolchain_bin': ['artifact', '0.0.0'], }, 'partial.js': { 'builder': 'calmjs_testing_dummy:partial', 'toolchain_bases': [{ 'calmjs.testing.artifact:ArtifactToolchain': { 'project_name': 'calmjs', 'version': '1.0' } }, { 'calmjs.toolchain:NullToolchain': { 'project_name': 'calmjs', 'version': '1.0', } }, { 'calmjs.toolchain:Toolchain': { 'project_name': 'calmjs', 'version': '1.0', } }], 'toolchain_bin': ['artifact', '0.0.0'], } }, 'versions': [ 'app 1.0', 'calmjs 1.0', ] }, registry.get_artifact_metadata('app')) # test that the 'calmjs_artifacts' listing only grows - the only # way to clean this is to remove and rebuild egg-info directly. utils.make_dummy_dist(self, (('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'extra.js = calmjs_testing_dummy:extra', ])), ), 'app', '1.0', working_dir=working_dir) mock_ws = WorkingSet([working_dir]) utils.stub_item_attr_value(self, dist, 'default_working_set', mock_ws) utils.stub_item_attr_value(self, artifact, 'get_bin_version_str', version) registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) registry.process_package('app') self.assertEqual( 3, len(registry.get_artifact_metadata('app')['calmjs_artifacts'])) self.assertIn( 'extra.js', registry.get_artifact_metadata('app')['calmjs_artifacts']) # try again using the artifact builder from calmjs.registry import _inst _inst.records.pop('calmjs.artifacts', None) self.addCleanup(_inst.records.pop, 'calmjs.artifacts') _inst.records['calmjs.artifacts'] = registry builder = ArtifactBuilder('calmjs.artifacts') self.assertTrue(builder(['app']))
def test_basic(self): working_dir = utils.mkdtemp(self) utils.make_dummy_dist(self, ( ('requires.txt', '\n'.join([ ])), ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'full.js = calmjs_testbuild:full', 'base.lib.js = calmjs_testbuild:lib', ])), ), 'base', '1.0', working_dir=working_dir) utils.make_dummy_dist(self, ( ('requires.txt', '\n'.join([ 'base', ])), ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'full.js = calmjs_testbuild:full', 'lib1.lib.js = calmjs_testbuild:lib', ])), ), 'lib1', '1.0', working_dir=working_dir) utils.make_dummy_dist(self, ( ('requires.txt', '\n'.join([ 'base', ])), ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'full.js = calmjs_testbuild:full', 'lib2.lib.js = calmjs_testbuild_extended:lib', ])), ), 'lib2', '1.0', working_dir=working_dir) utils.make_dummy_dist(self, ( ('requires.txt', '\n'.join([ 'lib1', 'lib2', ])), ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'full.js = calmjs_testbuild:full', # this one doesn't provided a standalone library ])), ), 'app1', '1.0', working_dir=working_dir) mock_ws = WorkingSet([working_dir]) # stub the default working set in calmjs.dist for the resolver # to work. utils.stub_item_attr_value(self, dist, 'default_working_set', mock_ws) # still specify the working set. registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) self.assertNotEqual(len(list(registry.iter_records())), 0) self.assertEqual( normcase(join( working_dir, 'lib1-1.0.egg-info', 'calmjs_artifacts', 'lib1.lib.js' )), normcase(registry.get_artifact_filename('lib1', 'lib1.lib.js')), ) self.assertEqual([], list(registry.resolve_artifacts_by_builder_compat( ['no_such_package'], 'full'))) self.assertEqual([], list(registry.resolve_artifacts_by_builder_compat( ['lib1'], 'no_such_rule'))) self.assertPathsEqual([ join(working_dir, 'lib1-1.0.egg-info', 'calmjs_artifacts', 'full.js'), ], list(registry.resolve_artifacts_by_builder_compat( ['lib1'], 'full'))) self.assertPathsEqual([ join(working_dir, 'base-1.0.egg-info', 'calmjs_artifacts', 'base.lib.js'), join(working_dir, 'lib1-1.0.egg-info', 'calmjs_artifacts', 'lib1.lib.js'), ], list(registry.resolve_artifacts_by_builder_compat( ['lib1'], 'lib', dependencies=True))) self.assertPathsEqual([ join(working_dir, 'base-1.0.egg-info', 'calmjs_artifacts', 'base.lib.js'), join(working_dir, 'lib2-1.0.egg-info', 'calmjs_artifacts', 'lib2.lib.js'), join(working_dir, 'lib1-1.0.egg-info', 'calmjs_artifacts', 'lib1.lib.js'), ], list(registry.resolve_artifacts_by_builder_compat( ['lib2', 'lib1'], 'lib', dependencies=True))) self.assertPathsEqual([ join(working_dir, 'base-1.0.egg-info', 'calmjs_artifacts', 'base.lib.js'), join(working_dir, 'lib1-1.0.egg-info', 'calmjs_artifacts', 'lib1.lib.js'), join(working_dir, 'lib2-1.0.egg-info', 'calmjs_artifacts', 'lib2.lib.js'), ], list(registry.resolve_artifacts_by_builder_compat( ['app1'], 'lib', dependencies=True))) entry_point = registry.belongs_to(join( working_dir, 'base-1.0.egg-info', 'calmjs_artifacts', 'base.lib.js', )) self.assertEqual('base', entry_point.dist.project_name) self.assertEqual('base.lib.js', entry_point.name)
def test_basic(self): working_dir = utils.mkdtemp(self) utils.make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'full.js = calmjs_testbuild:full', 'base.lib.js = calmjs_testbuild:lib', ])), ), 'base', '1.0', working_dir=working_dir) utils.make_dummy_dist(self, ( ('requires.txt', '\n'.join([ 'base', ])), ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'full.js = calmjs_testbuild:full', 'lib1.lib.js = calmjs_testbuild:lib', ])), ), 'lib1', '1.0', working_dir=working_dir) utils.make_dummy_dist(self, ( ('requires.txt', '\n'.join([ 'base', ])), ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'full.js = calmjs_testbuild:full', 'lib2.lib.js = calmjs_testbuild_extended:lib', ])), ), 'lib2', '1.0', working_dir=working_dir) utils.make_dummy_dist( self, ( ('requires.txt', '\n'.join([ 'lib1', 'lib2', ])), ( 'entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'full.js = calmjs_testbuild:full', # this one doesn't provided a standalone library ])), ), 'app1', '1.0', working_dir=working_dir) mock_ws = WorkingSet([working_dir]) # stub the default working set in calmjs.dist for the resolver # to work. utils.stub_item_attr_value(self, dist, 'default_working_set', mock_ws) # still specify the working set. registry = ArtifactRegistry('calmjs.artifacts', _working_set=mock_ws) self.assertNotEqual(len(list(registry.iter_records())), 0) self.assertEqual( normcase( join(working_dir, 'lib1-1.0.egg-info', 'calmjs_artifacts', 'lib1.lib.js')), normcase(registry.get_artifact_filename('lib1', 'lib1.lib.js')), ) self.assertEqual([], list( registry.resolve_artifacts_by_builder_compat( ['no_such_package'], 'full'))) self.assertEqual([], list( registry.resolve_artifacts_by_builder_compat( ['lib1'], 'no_such_rule'))) self.assertPathsEqual([ join(working_dir, 'lib1-1.0.egg-info', 'calmjs_artifacts', 'full.js'), ], list(registry.resolve_artifacts_by_builder_compat(['lib1'], 'full'))) self.assertPathsEqual([ join(working_dir, 'base-1.0.egg-info', 'calmjs_artifacts', 'base.lib.js'), join(working_dir, 'lib1-1.0.egg-info', 'calmjs_artifacts', 'lib1.lib.js'), ], list( registry.resolve_artifacts_by_builder_compat( ['lib1'], 'lib', dependencies=True))) self.assertPathsEqual([ join(working_dir, 'base-1.0.egg-info', 'calmjs_artifacts', 'base.lib.js'), join(working_dir, 'lib2-1.0.egg-info', 'calmjs_artifacts', 'lib2.lib.js'), join(working_dir, 'lib1-1.0.egg-info', 'calmjs_artifacts', 'lib1.lib.js'), ], list( registry.resolve_artifacts_by_builder_compat( ['lib2', 'lib1'], 'lib', dependencies=True))) self.assertPathsEqual([ join(working_dir, 'base-1.0.egg-info', 'calmjs_artifacts', 'base.lib.js'), join(working_dir, 'lib1-1.0.egg-info', 'calmjs_artifacts', 'lib1.lib.js'), join(working_dir, 'lib2-1.0.egg-info', 'calmjs_artifacts', 'lib2.lib.js'), ], list( registry.resolve_artifacts_by_builder_compat( ['app1'], 'lib', dependencies=True))) entry_point = registry.belongs_to( join( working_dir, 'base-1.0.egg-info', 'calmjs_artifacts', 'base.lib.js', )) self.assertEqual('base', entry_point.dist.project_name) self.assertEqual('base.lib.js', entry_point.name)
def setUp(self): utils.stub_item_attr_value(self, toolchain, 'get_bin_version', lambda p, kw: (1, 0, 0))
def setUp(self): # bad dummy builder def bad_builder(): "Wrong function signature" # produces wrong output def malformed_builder(package_names, export_target): "does not produce an artifact" return NullToolchain() def blank_spec(package_names, export_target): "does not produce an artifact" return NullToolchain(), Spec() # nothing dummy builder def nothing_builder(package_names, export_target): "does not produce an artifact" return NullToolchain(), Spec(export_target=export_target) # inject dummy module and add cleanup mod = ModuleType('calmjs_testing_dummy') mod.bad_builder = bad_builder mod.nothing_builder = nothing_builder mod.malformed_builder = malformed_builder mod.blank_spec = blank_spec self.addCleanup(sys.modules.pop, 'calmjs_testing_dummy') sys.modules['calmjs_testing_dummy'] = mod working_dir = utils.mkdtemp(self) utils.make_dummy_dist(self, ( ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'not_exist.js = calmjs_testing_dummy:not_exist', 'bad.js = calmjs_testing_dummy:bad_builder', 'nothing.js = calmjs_testing_dummy:nothing_builder', ])), ), 'app', '1.0', working_dir=working_dir) utils.make_dummy_dist(self, ( ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'bad.js = calmjs_testing_dummy:bad_builder', 'nothing.js = calmjs_testing_dummy:nothing_builder', ])), ), 'bad', '1.0', working_dir=working_dir) utils.make_dummy_dist(self, ( ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'malformed.js = calmjs_testing_dummy:malformed_builder', ])), ), 'malformed', '1.0', working_dir=working_dir) utils.make_dummy_dist(self, ( ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'blank.js = calmjs_testing_dummy:blank_spec', ])), ), 'blank', '1.0', working_dir=working_dir) utils.make_dummy_dist(self, ( ('entry_points.txt', '\n'.join([ '[calmjs.artifacts]', 'nothing.js = calmjs_testing_dummy:nothing_builder', ])), ), 'nothing', '1.0', working_dir=working_dir) mock_ws = WorkingSet([working_dir]) utils.stub_item_attr_value(self, dist, 'default_working_set', mock_ws) self.registry = ArtifactRegistry( 'calmjs.artifacts', _working_set=mock_ws)
def test_prepare_assemble(self): tmpdir = utils.mkdtemp(self) with open(join(tmpdir, 'r.js'), 'w'): # mock a r.js file. pass spec = Spec( # this is not written export_target=join(tmpdir, 'bundle.js'), build_dir=tmpdir, transpiled_modpaths={ 'example/module': '/path/to/src/example/module' }, bundled_modpaths={ 'bundled_pkg': '/path/to/bundled/index', 'bundled_empty': 'empty:', }, plugins_modpaths={'loader/plugin!resource/name': '/resource/name'}, transpiled_targets={ 'example/module': '/path/to/src/example/module.js', }, bundled_targets={ 'bundled_pkg': '/path/to/bundled/index.js', 'bundled_txt': '/path/to/bundled/txt', 'bundled_dir': '/path/to/bundled/dir.js', 'bundled_empty': 'empty:', }, plugins_targets={ 'resource/name': '/resource/name', }, export_module_names=[ 'example/module', 'bundled_dir', 'bundled_pkg', 'bundled_txt', 'bundled_empty', 'loader/plugin!resource/name', ], ) # we are going to fake the is_file checks utils.stub_item_attr_value(self, toolchain, 'isfile', lambda x: not x.endswith('dir.js')) rjs = toolchain.RJSToolchain() spec[rjs.rjs_bin_key] = join(tmpdir, 'r.js') rjs.prepare(spec) # skip the compile step as those entries are manually applied. with pretty_logging(logger='calmjs.rjs', stream=mocks.StringIO()) as s: # the parser will try to load the file rjs.assemble(spec) self.assertIn('No such file or directory', s.getvalue()) self.assertIn( join(*('path/to/src/example/module.js'.split('/'))), s.getvalue(), ) self.assertTrue(exists(join(tmpdir, 'build.js'))) self.assertTrue(exists(join(tmpdir, 'config.js'))) with open(join(tmpdir, 'build.js')) as fd: # strip off the header and footer as this is for r.js build_js = json.loads(''.join(fd.readlines()[1:-1])) with open(join(tmpdir, 'config.js')) as fd: # strip off the header and footer as this is for r.js config_js = json.loads(''.join(fd.readlines()[4:-10])) self.assertEqual(build_js['paths'], { 'bundled_empty': 'empty:', }) self.assertEqual(build_js['include'], [ 'example/module', 'bundled_dir', 'bundled_pkg', 'bundled_txt', 'bundled_empty', 'loader/plugin!resource/name', ]) self.assertEqual( config_js['paths'], { 'example/module': '/path/to/src/example/module.js?', 'bundled_pkg': '/path/to/bundled/index.js?', 'bundled_txt': '/path/to/bundled/txt', 'bundled_dir': '/path/to/bundled/dir.js', 'resource/name': '/resource/name', }) self.assertEqual(config_js['include'], [])
def test_prepare_assemble(self): tmpdir = utils.mkdtemp(self) with open(join(tmpdir, 'r.js'), 'w'): # mock a r.js file. pass spec = Spec( # this is not written export_target=join(tmpdir, 'bundle.js'), build_dir=tmpdir, transpiled_modpaths={ 'example/module': '/path/to/src/example/module' }, bundled_modpaths={ 'bundled_pkg': '/path/to/bundled/index', 'bundled_empty': 'empty:', }, plugins_modpaths={ 'loader/plugin!resource/name': '/resource/name' }, transpiled_targetpaths={ 'example/module': '/path/to/src/example/module.js', }, bundled_targetpaths={ 'bundled_pkg': '/path/to/bundled/index.js', 'bundled_txt': '/path/to/bundled/txt', 'bundled_dir': '/path/to/bundled/dir.js', 'bundled_empty': 'empty:', }, plugins_targetpaths={ 'resource/name': '/resource/name', }, export_module_names=[ 'example/module', 'bundled_dir', 'bundled_pkg', 'bundled_txt', 'bundled_empty', 'loader/plugin!resource/name', ], ) # we are going to fake the is_file checks utils.stub_item_attr_value( self, toolchain, 'isfile', lambda x: not x.endswith('dir.js')) rjs = toolchain.RJSToolchain() spec[rjs.rjs_bin_key] = join(tmpdir, 'r.js') rjs.prepare(spec) # skip the compile step as those entries are manually applied. with pretty_logging(logger='calmjs.rjs', stream=mocks.StringIO()) as s: # the parser will try to load the file rjs.assemble(spec) self.assertIn('No such file or directory', s.getvalue()) self.assertIn( join(*('path/to/src/example/module.js'.split('/'))), s.getvalue(), ) self.assertTrue(exists(join(tmpdir, 'build.js'))) self.assertTrue(exists(join(tmpdir, 'config.js'))) with open(join(tmpdir, 'build.js')) as fd: # strip off the header and footer as this is for r.js build_js = json.loads(''.join(fd.readlines()[1:-1])) with open(join(tmpdir, 'config.js')) as fd: # strip off the header and footer as this is for r.js config_js = json.loads(''.join(fd.readlines()[4:-10])) self.assertEqual(build_js['paths'], { 'bundled_empty': 'empty:', }) self.assertEqual(build_js['include'], [ 'example/module', 'bundled_dir', 'bundled_pkg', 'bundled_txt', 'bundled_empty', 'loader/plugin!resource/name', ]) self.assertEqual(config_js['paths'], { 'example/module': '/path/to/src/example/module.js?', 'bundled_pkg': '/path/to/bundled/index.js?', 'bundled_txt': '/path/to/bundled/txt', 'bundled_dir': '/path/to/bundled/dir.js', 'resource/name': '/resource/name', }) self.assertEqual(config_js['include'], [])