Exemplo n.º 1
0
    def setUp(self):
        # save working directory
        remember_cwd(self)

        # All the pre-made setup.
        stub_mod_call(self, cli)
        app = make_dummy_dist(self, (
            ('requires.txt', '\n'.join([])),
            ('package.json', json.dumps({
                'dependencies': {'jquery': '~1.11.0'},
            })),
        ), 'foo', '1.9.0')
        underscore = make_dummy_dist(self, (
            ('requires.txt', '\n'.join([])),
            ('package.json', json.dumps({
                'dependencies': {'underscore': '~1.8.0'},
            })),
        ), 'underscore', '1.8.0')
        named = make_dummy_dist(self, (
            ('requires.txt', '\n'.join([])),
            ('package.json', json.dumps({
                'dependencies': {'jquery': '~3.0.0'},
                'name': 'named-js',
            })),
        ), 'named', '2.0.0')
        working_set = WorkingSet()
        working_set.add(app, self._calmjs_testing_tmpdir)
        working_set.add(underscore, self._calmjs_testing_tmpdir)
        working_set.add(named, self._calmjs_testing_tmpdir)
        stub_item_attr_value(self, dist, 'default_working_set', working_set)
        stub_check_interactive(self, True)
Exemplo n.º 2
0
 def test_prompt_choices_canceled(self):
     # Extra choices with a specific validator will not work
     stub_check_interactive(self, True)
     result = self.do_prompt(
         'How are you?', '', validator=fake_error(KeyboardInterrupt))
     self.assertIsNone(result, None)
     self.assertEqual(
         self.stdout.getvalue(),
         'How are you? Aborted.\n')
Exemplo n.º 3
0
 def test_stub_check_interactive(self):
     from calmjs import ui
     original = ui.check_interactive
     self.assertIs(ui.check_interactive, original)
     utils.stub_check_interactive(self, None)
     self.assertIsNot(ui.check_interactive, original)
     # it now returns this typically invalid result for testing
     self.assertIsNone(ui.check_interactive())
     self.doCleanups()
     self.assertIs(ui.check_interactive, original)
Exemplo n.º 4
0
 def test_pkg_manager_cmd_production_flag_warnings_noninteractive(self):
     stub_check_interactive(self, False)
     driver = cli.PackageManagerDriver(pkg_manager_bin='mgr', devkey='blah')
     with pretty_logging(stream=mocks.StringIO()) as log:
         self.assertEqual(driver._prodev_flag(None, None, True), [])
     self.assertIn('WARNING', log.getvalue())
     self.assertIn(
         'undefined production flag may result in unexpected installation '
         'behavior', log.getvalue())
     self.assertIn("non-interactive", log.getvalue())
     self.assertIn("'blah' may be ignored", log.getvalue())
Exemplo n.º 5
0
 def test_pkg_manager_cmd_production_flag_warnings_noninteractive(self):
     stub_check_interactive(self, False)
     driver = cli.PackageManagerDriver(pkg_manager_bin='mgr', devkey='blah')
     with pretty_logging(stream=mocks.StringIO()) as log:
         self.assertEqual(driver._prodev_flag(None, None, True), [])
     self.assertIn('WARNING', log.getvalue())
     self.assertIn(
         'undefined production flag may result in unexpected installation '
         'behavior', log.getvalue()
     )
     self.assertIn("non-interactive", log.getvalue())
     self.assertIn("'blah' may be ignored", log.getvalue())
Exemplo n.º 6
0
    def test_yarn_install_package_json_no_overwrite_interactive(self):
        """
        Most of these package_json testing will be done in the next test
        class specific for ``yarn init``.
        """

        # Testing the implied init call
        stub_mod_call(self, cli)
        stub_stdouts(self)
        stub_stdin(self, 'n\n')
        stub_check_interactive(self, True)
        tmpdir = mkdtemp(self)
        os.chdir(tmpdir)

        # All the pre-made setup.
        app = make_dummy_dist(self, (
            ('requires.txt', '\n'.join([])),
            ('package.json',
             json.dumps({
                 'dependencies': {
                     'jquery': '~1.11.0'
                 },
             })),
        ), 'foo', '1.9.0')
        working_set = WorkingSet()
        working_set.add(app, self._calmjs_testing_tmpdir)
        stub_item_attr_value(self, dist, 'default_working_set', working_set)

        # We are going to have a fake package.json
        with open(join(tmpdir, 'package.json'), 'w') as fd:
            json.dump({}, fd)

        # capture the logging explicitly as the conditions which
        # determines how the errors are outputted differs from different
        # test harnesses.  Verify that later.
        with pretty_logging(stream=StringIO()) as stderr:
            # This is faked.
            yarn.yarn_install('foo', callback=prompt_overwrite_json)

        self.assertIn(
            "Overwrite '%s'? (Yes/No) [No] " % join(tmpdir, 'package.json'),
            sys.stdout.getvalue())
        # Ensure the error message.  Normally this is printed through
        # stderr via distutils custom logger and our handler bridge for
        # that which is tested elsewhere.
        self.assertIn("not continuing with 'yarn install'", stderr.getvalue())

        with open(join(tmpdir, 'package.json')) as fd:
            result = fd.read()
        # This should remain unchanged as no to overwrite is default.
        self.assertEqual(result, '{}')
Exemplo n.º 7
0
 def test_prompt_non_interactive_choices(self):
     stub_stdouts(self)
     stub_check_interactive(self, False)
     result = self.do_prompt(
         'What are you?', 'c', choices=(
             ('a', 'A'),
             ('b', 'B'),
             ('c', 'C'),
         ),
         default_key=0,
     )
     self.assertEqual(result, 'A')
     self.assertEqual(
         self.stdout.getvalue(), 'What are you? (a/b/c) [a] a\n')
Exemplo n.º 8
0
 def test_prompt_non_interactive_null(self):
     stub_stdouts(self)
     stub_check_interactive(self, False)
     result = self.do_prompt(
         'How are you?', 'I am fine thank you.\n', choices=(
             ('a', 'A'),
             ('b', 'B'),
             ('c', 'C'),
         ),
         # explicit validator negates the choices
         validator=ui.null_validator,
     )
     self.assertIs(result, None)
     self.assertEqual(self.stdout.getvalue(), 'How are you? Aborted.\n')
Exemplo n.º 9
0
 def test_prompt_basic_choice_overridden(self):
     # Extra choices with a specific validator will not work
     stub_check_interactive(self, True)
     result = self.do_prompt(
         'How are you?', 'I am fine thank you.\n', choices=(
             ('a', 'A'),
             ('b', 'B'),
             ('c', 'C'),
         ),
         # explicit validator negates the choices
         validator=ui.null_validator,
     )
     self.assertEqual(result, 'I am fine thank you.')
     self.assertEqual(self.stdout.getvalue(), 'How are you? ')
Exemplo n.º 10
0
    def test_yarn_install_package_json_no_overwrite_interactive(self):
        """
        Most of these package_json testing will be done in the next test
        class specific for ``yarn init``.
        """

        # Testing the implied init call
        stub_mod_call(self, cli)
        stub_stdouts(self)
        stub_stdin(self, 'n\n')
        stub_check_interactive(self, True)
        tmpdir = mkdtemp(self)
        os.chdir(tmpdir)

        # All the pre-made setup.
        app = make_dummy_dist(self, (
            ('requires.txt', '\n'.join([])),
            ('package.json', json.dumps({
                'dependencies': {'jquery': '~1.11.0'},
            })),
        ), 'foo', '1.9.0')
        working_set = WorkingSet()
        working_set.add(app, self._calmjs_testing_tmpdir)
        stub_item_attr_value(self, dist, 'default_working_set', working_set)

        # We are going to have a fake package.json
        with open(join(tmpdir, 'package.json'), 'w') as fd:
            json.dump({}, fd)

        # capture the logging explicitly as the conditions which
        # determines how the errors are outputted differs from different
        # test harnesses.  Verify that later.
        with pretty_logging(stream=StringIO()) as stderr:
            # This is faked.
            yarn.yarn_install('foo', callback=prompt_overwrite_json)

        self.assertIn(
            "Overwrite '%s'? (Yes/No) [No] " % join(tmpdir, 'package.json'),
            sys.stdout.getvalue())
        # Ensure the error message.  Normally this is printed through
        # stderr via distutils custom logger and our handler bridge for
        # that which is tested elsewhere.
        self.assertIn("not continuing with 'yarn install'", stderr.getvalue())

        with open(join(tmpdir, 'package.json')) as fd:
            result = fd.read()
        # This should remain unchanged as no to overwrite is default.
        self.assertEqual(result, '{}')
Exemplo n.º 11
0
 def test_prompt_choices_only(self):
     # Extra choices with a specific validator will not work
     stub_check_interactive(self, True)
     result = self.do_prompt(
         'Nice day today.\nHow are you?', 'I am fine thank you.\n',
         choices=(
             ('a', 'A'),
             ('b', 'B'),
             ('c', 'C'),
         ),
         default_key=1,
     )
     self.assertEqual(result, 'B')
     self.assertEqual(
         self.stdout.getvalue(),
         'Nice day today.\n'
         'How are you? (a/b/c) [b] '  # I am fine thank you.\n
         'Invalid choice.\n'
         'How are you? (a/b/c) [b] '
     )
Exemplo n.º 12
0
    def setUp(self):
        remember_cwd(self)

        app = make_dummy_dist(self, (
            ('requires.txt', '\n'.join([])),
            ('package.json', json.dumps({
                'dependencies': {'jquery': '~1.11.0'},
            })),
        ), 'foo', '1.9.0')

        working_set = WorkingSet()
        working_set.add(app, self._calmjs_testing_tmpdir)

        # Stub out the flatten_egginfo_json calls with one that uses our
        # custom working_set here.
        stub_item_attr_value(self, dist, 'default_working_set', working_set)
        # Quiet stdout from distutils logs
        stub_stdouts(self)
        # Force auto-detected interactive mode to True, because this is
        # typically executed within an interactive context.
        stub_check_interactive(self, True)
Exemplo n.º 13
0
    def test_pkg_manager_cmd_production_flag_unset(self):
        stub_check_interactive(self, False)
        stub_mod_call(self, cli)
        stub_base_which(self)
        self.setup_requirements_json()
        driver = cli.PackageManagerDriver(
            pkg_manager_bin='mgr', pkgdef_filename='requirements.json',
            dep_keys=('require',), devkey='require',
        )

        with pretty_logging(stream=mocks.StringIO()) as log:
            driver.pkg_manager_install(['calmpy.pip'])

        self.assertIn('WARNING', log.getvalue())
        self.assertIn(
            'undefined production flag may result in unexpected installation '
            'behavior', log.getvalue()
        )
        self.assertIn("non-interactive", log.getvalue())
        self.assertIn("'require' may be ignored", log.getvalue())
        self.assertEqual(self.call_args[0], (['mgr', 'install'],))
Exemplo n.º 14
0
    def setUp(self):
        # save working directory
        remember_cwd(self)

        # All the pre-made setup.
        stub_mod_call(self, cli)
        app = make_dummy_dist(self, (
            ('requires.txt', '\n'.join([])),
            ('package.json',
             json.dumps({
                 'dependencies': {
                     'jquery': '~1.11.0'
                 },
             })),
        ), 'foo', '1.9.0')
        underscore = make_dummy_dist(self, (
            ('requires.txt', '\n'.join([])),
            ('package.json',
             json.dumps({
                 'dependencies': {
                     'underscore': '~1.8.0'
                 },
             })),
        ), 'underscore', '1.8.0')
        named = make_dummy_dist(self, (
            ('requires.txt', '\n'.join([])),
            ('package.json',
             json.dumps({
                 'dependencies': {
                     'jquery': '~3.0.0'
                 },
                 'name': 'named-js',
             })),
        ), 'named', '2.0.0')
        working_set = WorkingSet()
        working_set.add(app, self._calmjs_testing_tmpdir)
        working_set.add(underscore, self._calmjs_testing_tmpdir)
        working_set.add(named, self._calmjs_testing_tmpdir)
        stub_item_attr_value(self, dist, 'default_working_set', working_set)
        stub_check_interactive(self, True)
Exemplo n.º 15
0
    def test_pkg_manager_cmd_production_flag_unset(self):
        stub_check_interactive(self, False)
        stub_mod_call(self, cli)
        stub_base_which(self)
        self.setup_requirements_json()
        driver = cli.PackageManagerDriver(
            pkg_manager_bin='mgr',
            pkgdef_filename='requirements.json',
            dep_keys=('require', ),
            devkey='require',
        )

        with pretty_logging(stream=mocks.StringIO()) as log:
            driver.pkg_manager_install(['calmpy.pip'])

        self.assertIn('WARNING', log.getvalue())
        self.assertIn(
            'undefined production flag may result in unexpected installation '
            'behavior', log.getvalue())
        self.assertIn("non-interactive", log.getvalue())
        self.assertIn("'require' may be ignored", log.getvalue())
        self.assertEqual(self.call_args[0], (['mgr', 'install'], ))
Exemplo n.º 16
0
    def setUp(self):
        remember_cwd(self)

        app = make_dummy_dist(self, (
            ('requires.txt', '\n'.join([])),
            ('package.json',
             json.dumps({
                 'dependencies': {
                     'jquery': '~1.11.0'
                 },
             })),
        ), 'foo', '1.9.0')

        working_set = WorkingSet()
        working_set.add(app, self._calmjs_testing_tmpdir)

        # Stub out the flatten_egginfo_json calls with one that uses our
        # custom working_set here.
        stub_item_attr_value(self, dist, 'default_working_set', working_set)
        # Quiet stdout from distutils logs
        stub_stdouts(self)
        # Force auto-detected interactive mode to True, because this is
        # typically executed within an interactive context.
        stub_check_interactive(self, True)
Exemplo n.º 17
0
 def setUp(self):
     remember_cwd(self)
     stub_os_environ(self)
     stub_check_interactive(self, True)
Exemplo n.º 18
0
 def test_prompt_basic(self):
     stub_check_interactive(self, True)
     result = self.do_prompt('How are you?', 'I am fine thank you.\n')
     self.assertEqual(result, 'I am fine thank you.')
Exemplo n.º 19
0
 def setUp(self):
     remember_cwd(self)
     stub_os_environ(self)
     stub_check_interactive(self, True)
Exemplo n.º 20
0
 def setUp(self):
     self.tmpdir = mkdtemp(self)
     self.tmpjson = join(self.tmpdir, 'test.json')
     stub_check_interactive(self, True)