class PhonebookTestCase(unittest.TestCase): def setUp(self): self.env = TestFileEnvironment('./scratch') self.prefix = os.getcwd() # load phonebook fixture. Need to use separate name to prevent # overwriting actual file. with open('phonebook_fixture.txt') as f: with open('phonebook_fixture.pb', 'wb') as phonebook_fixture: for line in f: phonebook_fixture.write(line) def tearDown(self): os.remove('phonebook_fixture.pb') # helper methods for ensuring things were/weren't added to files. def assert_not_added(self, entry_fields): result = self.env.run('cat %s/phonebook_fixture.pb' % self.prefix) for value in entry_fields: nose.tools.assert_not_in(value, result.stdout) def assert_added(self, entry_fields): result = self.env.run('cat %s/phonebook_fixture.pb' % self.prefix) for value in entry_fields: nose.tools.assert_in(value, result.stdout)
class CLITest(unittest.TestCase): def setUp(self): import githook self.githook = githook self.githook.app.config['TESTING'] = True self.tempdir = tempfile.mkdtemp() self.env = TestFileEnvironment( os.path.join(self.tempdir,'test-output'), ignore_hidden=False) @unittest.skipIf(*is_travis) def test_no_config(self): result = self.env.run('bin/python %s' % os.path.join(here, "..", "__init__.py"), expect_error=True, cwd=os.path.join(here, '../', '../') ) self.assertEqual(result.returncode, 1) self.assertEqual(result.stderr, u'CRITICAL:root:Configuration file not found. Please specify one.\n') # TODO This loops. :D Need another way of testing daemons. @unittest.skipIf(True, 'It loops') def test_ok_config(self): self.env.run('bin/python -m githook -c githook/tests/config/okconfig.ini', cwd=os.path.join(here, '../', '../') )
def main(): """ Main method Returns 0 on success, 1 on failure """ start = timer() script = 'bin/csv2ofx --help' env = TestFileEnvironment('.scripttest') tmpfile = NamedTemporaryFile(dir=parent_dir, delete=False) tmpname = tmpfile.name tests = [ (2, 'default.csv', 'default.qif', 'oq'), (3, 'default.csv', 'default_w_splits.qif', 'oqS Category'), (4, 'xero.csv', 'xero.qif', 'oqc Description -m xero'), (5, 'mint.csv', 'mint.qif', 'oqS Category -m mint'), ( 6, 'mint.csv', 'mint_alt.qif', 'oqs20150613 -e20150614 -S Category -m mint' ), (7, 'default.csv', 'default.ofx', 'oe 20150908'), (8, 'default.csv', 'default_w_splits.ofx', 'oS Category'), (9, 'mint.csv', 'mint.ofx', 'oS Category -m mint'), ] try: env.run(script, cwd=parent_dir) print('\nScripttest #1: %s ... ok' % script) for test_num, example_filename, check_filename, opts in tests: example = p.join(example_dir, example_filename) check = p.join(check_dir, check_filename) checkfile = open(check) script = 'bin/csv2ofx -%s %s %s' % (opts, example, tmpname) env.run(script, cwd=parent_dir) args = [checkfile.readlines(), open(tmpname).readlines()] kwargs = {'fromfile': 'expected', 'tofile': 'got'} diffs = list(unified_diff(*args, **kwargs)) if diffs: loc = ' '.join(script.split(' ')[:-1]) msg = "ERROR from test #%i! Output:\n\t%s\n" % (test_num, loc) msg += "doesn't match hash of\n\t%s\n" % check sys.stderr.write(msg) sys.exit(''.join(diffs)) else: short_script = 'csv2ofx -%s %s %s' % ( opts, example_filename, check_filename) print('Scripttest #%i: %s ... ok' % (test_num, short_script)) except Exception as e: sys.exit(e) else: time = timer() - start print('%s' % '-' * 70) print('Ran %i scripttests in %0.3fs\n\nOK' % (test_num, time)) sys.exit(0) finally: checkfile.close os.unlink(tmpname)
def get_env(self, check_help_version=True): #import tempfile #env = TestFileEnvironment(tempfile.mkdtemp(suffix='', prefix='test_' + script)) env = TestFileEnvironment() # Use Agg backend for plots. #env.writefile("matplotlibrc", "backend: Agg") #with open(os.path.join(env.base_path, "matplotlibrc"), "wt") as fh: # fh.write("backend: Agg\n") if check_help_version: # Start with --help. If this does not work... r = env.run(self.script, "--help", expect_stderr=self.expect_stderr) assert r.returncode == 0 # Script must provide a version option r = env.run(self.script, "--version", expect_stderr=self.expect_stderr) assert r.returncode == 0 print("stderr", r.stderr) print("stdout", r.stdout) verstr = r.stdout.strip() # deprecation warnings break --version #if not verstr: verstr = r.stdout.strip() # py3k #assert str(verstr) == str(abilab.__version__) return env
def test_stop_service_minimal_docker_container(): env = ScriptTestEnvironment() env.run('ansible-container', 'run', '--detached', cwd=project_dir('minimal_sleep'), expect_stderr=True) result = env.run('ansible-container', 'stop', 'minimal1', cwd=project_dir('minimal_sleep'), expect_stderr=True) assert "Stopping ansible_minimal1_1 ... done" in result.stderr assert "Stopping ansible_minimal2_1 ... done" not in result.stderr
def test_setting_ansible_container_envar(): env = ScriptTestEnvironment() result = env.run('ansible-container', '--debug', 'build', cwd=project_dir('environment'), expect_stderr=True) assert "web MYVAR=foo ANSIBLE_CONTAINER=1" in result.stdout assert "db MYVAR=foo ANSIBLE_CONTAINER=1" in result.stdout assert "mw ANSIBLE_CONTAINER=1" in result.stdout
class TestBasicCommands(TestCase): """Test basic command-line invoke of the program""" def setUp(self): self.env = TestFileEnvironment('./test-output') def test_default(self): res = self.env.run('../run.py', '-s', '1') assert 'Timeout' in res.stdout self.assertEqual(res.returncode, 0) def test_collect(self): res = self.env.run('../run.py', '-s', '1', '-c') assert 'Collecting' in res.stdout self.assertEqual(res.returncode, 0) def test_log(self): res = self.env.run('../run.py', '-s', '1', '--log', 'log.txt') assert 'Timeout' in res.stdout assert 'log.txt' in res.files_created self.assertEqual(res.returncode, 0) def test_segment(self): res = self.env.run('../run.py', '-s', '1', '--segment', '0.2') assert 'Timeout' in res.stdout self.assertEqual(res.returncode, 0) def tearDown(self): pass
def test_no_command_shows_help(): env = ScriptTestEnvironment() result = env.run('ansible-container', expect_error=True) assert result.returncode == 2 assert len(result.stdout) == 0 assert "usage: ansible-container" in result.stderr assert "ansible-container: error:" in result.stderr
class TestProgramsHelp(unittest.TestCase): def spawn_tests(self): programs = os.listdir(programs_WD) not_checked = [] for prog in programs: if prog in ['__init__.py', 'program_envs.py']: continue if not prog.endswith('.py') or '#' in prog: not_checked.append(prog) continue if prog.lower() != prog: continue #res = env.run(prog, '-h') return prog def setUp(self): if os.path.exists('./new-test-output'): shutil.rmtree('./new-test-output') self.env = TestFileEnvironment('./new-test-output') #if not os.path.exists('./new-test-output'): # os.mkdir('./new-test-output') def tearDown(self): if os.path.exists('./new-test-output'): shutil.rmtree('./new-test-output') def test_cmd_line(self): print 'programs_WD', programs_WD programs = os.listdir(programs_WD) not_checked = [] for prog in programs: print "Testing help message for:", prog if prog in ['__init__.py', 'program_envs.py']: continue if 'gui' in prog: continue if not prog.endswith('.py') or '#' in prog: not_checked.append(prog) continue if prog.lower() != prog: continue if sys.platform in ['win32', 'win62']: prog = prog[:-3] res = self.env.run(prog, '-h') #except AssertionError as ex: # not_checked.append(prog) # print 'ex', type(ex) # print res print 'not_checked', not_checked def test_guis(self): tests = ['pmag_gui.py', 'magic_gui.py', 'demag_gui.py', 'thellier_gui.py'] for prog in tests: if sys.platform in ['win32', 'win62']: prog = prog[:-3] print 'testing:', prog res = self.env.run(prog, '-h')
def main(script, tests, verbose=False, stop=True): """ Main method Returns 0 on success, 1 on failure """ failures = 0 logger = gogo.Gogo(__name__, verbose=verbose).logger short_script = p.basename(script) env = TestFileEnvironment('.scripttest') start = timer() for pos, test in enumerate(tests): num = pos + 1 opts, arguments, expected = test joined_opts = ' '.join(opts) if opts else '' joined_args = '"%s"' % '" "'.join(arguments) if arguments else '' command = "%s %s %s" % (script, joined_opts, joined_args) short_command = "%s %s %s" % (short_script, joined_opts, joined_args) result = env.run(command, cwd=p.abspath(p.dirname(p.dirname(__file__)))) output = result.stdout if isinstance(expected, bool): text = StringIO(output).read() outlines = [str(bool(text))] checklines = StringIO(str(expected)).readlines() elif p.isfile(expected): outlines = StringIO(output).readlines() with open(expected, encoding='utf-8') as f: checklines = f.readlines() else: outlines = StringIO(output).readlines() checklines = StringIO(expected).readlines() args = [checklines, outlines] kwargs = {'fromfile': 'expected', 'tofile': 'got'} diffs = ''.join(unified_diff(*args, **kwargs)) passed = not diffs if not passed: failures += 1 msg = "ERROR! Output from test #%i:\n %s\n" % (num, short_command) msg += "doesn't match:\n %s\n" % expected msg += diffs if diffs else '' else: logger.debug(output) msg = 'Scripttest #%i: %s ... ok' % (num, short_command) logger.info(msg) if stop and failures: break time = timer() - start logger.info('%s' % '-' * 70) end = 'FAILED (failures=%i)' % failures if failures else 'OK' logger.info('Ran %i scripttests in %0.3fs\n\n%s' % (num, time, end)) sys.exit(failures)
def test_script(self): with HTTMock(mock_TwitterK): env = TestFileEnvironment('myria_upload') res = env.run('''myria_upload --relation TwitterK --program test --overwrite --hostname localhost --port 12345 --dry''', stdin='foo,bar\n1,b\n3,c', expect_stderr=True) eq_(res.stdout, '''1,b\n3,c\n''')
def env(): env_ = FileEnvironment(ignore_hidden=False) auth_file = Path(env_.base_path) / "konch_auth" env_.environ["KONCH_AUTH_FILE"] = str(auth_file) env_.environ["KONCH_EDITOR"] = "echo" yield env_ try: auth_file.unlink() except FileNotFoundError: pass
class TestIntegration(unittest.TestCase): def setUp(self): self.env = TestFileEnvironment(test_path, template_path=template_path) def test_init(self): result = self.env.run("gpc", "init", expect_stderr=True) created_filenames = result.files_created.keys() self.assertTrue("log" in created_filenames) self.assertTrue("log/data" in created_filenames) self.assertTrue("log/schema.sql" in created_filenames) self.assertTrue("storage" in created_filenames) def test_make_target(self): self.env.run("gpc", "init", expect_stderr=True) self.env.writefile("gpc.yaml", frompath="simple.yaml") result = self.env.run("gpc", "make", "c", expect_stderr=True) created_filenames = list(result.files_created.keys()) self.assertTrue("c" in created_filenames) created_filenames.remove("c") self.assertTrue(any([s.startswith("storage/") for s in created_filenames])) self.assertTrue(any([s.startswith("log/data/") for s in created_filenames])) def test_make_target_cached(self): call(["cp", "-r", template_path + "/.gpc", test_path]) call(["cp", "-r", template_path + "/log", test_path]) call(["cp", "-r", template_path + "/storage", test_path]) self.env.writefile("gpc.yaml", frompath="simple.yaml") result = self.env.run("gpc", "make", "c", expect_stderr=True) created_filenames = result.files_created.keys() self.assertTrue("c" in created_filenames) self.assertTrue(len(created_filenames) == 1)
def test_build_with_variables(): env = ScriptTestEnvironment() result = env.run('ansible-container', 'build', '--save-build-container', '--with-variables', 'foo=bar', 'bar=baz', cwd=project_dir('minimal'), expect_stderr=True) assert "Aborting on container exit" in result.stdout assert "Exported minimal-minimal with image ID " in result.stderr result = env.run('docker', 'inspect', '--format="{{ .Config.Env }}"', 'ansible_ansible-container_1', expect_stderr=True) assert "foo=bar" in result.stdout assert "bar=baz" in result.stdout
def test_build_with_volumes(): env = ScriptTestEnvironment() volume_string = "{0}:{1}:{2}".format(os.getcwd(), '/projectdir', 'ro') result = env.run('ansible-container', 'build', '--save-build-container', '--with-volumes', volume_string, cwd=project_dir('minimal'), expect_stderr=True) assert "Aborting on container exit" in result.stdout assert "Exported minimal-minimal with image ID " in result.stderr result = env.run('docker', 'inspect', '--format="{{range .Mounts}}{{ .Source }}:{{ .Destination }}:{{ .Mode}} {{ end }}"', 'ansible_ansible-container_1', expect_stderr=True) volumes = result.stdout.split(' ') assert volume_string in volumes
def test_shipit_kube(): env = ScriptTestEnvironment() # Should run shipit kube to success result = env.run('ansible-container', '--debug', 'shipit', 'kube', '--pull-from', 'https://index.docker.io/v1/ansible', cwd=project_dir('postgres'), expect_error=True) assert result.returncode == 0 assert "Role postgres created" in result.stderr # Should create a role result = env.run('ls ansible/roles', cwd=project_dir('postgres')) assert "postgres-kubernetes" in result.stdout # Should create a playbook result = env.run('ls ansible', cwd=project_dir('postgres')) assert "shipit-kubernetes.yml" in result.stdout
def get_env(self): #import tempfile #env = TestFileEnvironment(tempfile.mkdtemp(suffix='', prefix='test_' + script)) env = TestFileEnvironment() # Use Agg backend for plots. #env.writefile("matplotlibrc", "backend : Agg") # Start with --help. If this does not work... env.run(self.script, "--help") # Script must provide a version option #r = env.run(self.script, "--version", expect_stderr=True) #assert r.stderr.strip() == "%s version %s" % (os.path.basename(self.script), abilab.__version__) return env
def test_bad_symlink(tmpdir): """ symlinks only work in UNIX """ if sys.platform == 'win32': return env = TestFileEnvironment(str(tmpdir), start_clear=False) res = env.run(sys.executable, '-c', '''\ import os os.symlink(os.path.join('does', 'not', 'exist.txt'), "does-not-exist.txt") ''') assert 'does-not-exist.txt' in res.files_created, res.files_created assert res.files_created['does-not-exist.txt'].invalid # Just make sure there's no error: str(res)
class BaseTemplateTest(unittest.TestCase): def setUp(self): self.tempdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tempdir) # docs httpcripttest//pythonpaste.org/scripttest/ self.env = TestFileEnvironment( os.path.join(self.tempdir, 'test-output'), ignore_hidden=False, ) def create_template(self): """Run mr.bob to create your template.""" options = { 'dir': os.path.join(os.path.dirname(__file__)), 'template': self.template, 'project': self.project, } return self.env.run( '%(dir)s/bin/mrbob -O %(project)s --config ' '%(dir)s/test_answers.ini %(dir)s/bobtemplates/%(template)s' % options)
class TestCommands(TestCase): @classmethod def setUpClass(cls): create_run_script() def setUp(self): self.env = TestFileEnvironment('./test-output') def test_send_no_args(self): cmd = 'python ../run.py send' self.env.run(*shlex.split(cmd), expect_error=True) def test_send_alias_stdin(self): cmd = 'python ../run.py send -a "%s"' % (TEST_ALIAS) self.env.run(*shlex.split(cmd), stdin=b'Hi')
class BaseTemplateTest(unittest.TestCase): # credit bobtemplates.plone def setUp(self): self.maxDiff = None self.tempdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tempdir) # docs http://pythonpaste.org/scripttest/ self.env = TestFileEnvironment( os.path.join(self.tempdir, 'test-output'), ignore_hidden=False, ) def create_template(self): """Run mr.bob to create your template.""" options = { 'dir': os.path.join(os.path.dirname(__file__)), 'template': self.template, 'addon': self.addon, 'answers_file': self.answers_file, 'target_dir': self.target_dir, } return self.env.run( 'mrbob -O %(target_dir)s --config ' '%(dir)s/%(answers_file)s bobtemplates.odoo:%(template)s' % options)
class TestSpec(object): td_dir = "{0}/test_data/".format(tests_dir) bin_dir = os.path.split(tests_dir)[0] + "/" exe = "python {0}mybin.py".format(bin_dir) def setup_method(self, method): self.env = TestFileEnvironment("{0}/test_output/".format(tests_dir)) @pytest.mark.parametrize( ("package", "options", "expected"), [ ("Jinja2", "", "python-Jinja2.spec"), ("Jinja2", "-b3", "python-Jinja2_base.spec"), ("Jinja2", "-t epel7", "python-Jinja2_epel7.spec"), ("Jinja2", "-t epel6", "python-Jinja2_epel6.spec"), ("buildkit", "-b2", "python-buildkit.spec"), ("StructArray", "-b2", "python-StructArray.spec"), ("Sphinx", "-r python-sphinx", "python-sphinx.spec"), ], ) @pytest.mark.spectest def test_spec(self, package, options, expected): with open(self.td_dir + expected) as fi: self.spec_content = fi.read() res = self.env.run("{0} {1} {2}".format(self.exe, package, options)) # changelog have to be cut from spec files assert set(res.stdout.split("\n")[1:-4]) == set(self.spec_content.split("\n")[1:-4])
class TestSpec(object): td_dir = '{0}/test_data/'.format(tests_dir) bin_dir = os.path.split(tests_dir)[0] + '/' exe = 'python {0}mybin.py'.format(bin_dir) def setup_method(self, method): self.env = TestFileEnvironment('{0}/test_output/'.format(tests_dir)) @pytest.mark.parametrize(('package', 'options', 'expected'), [ ('Jinja2', '-v2.8', 'python-Jinja2.spec'), ('Jinja2', '-v2.8 -b3', 'python-Jinja2_base.spec'), ('Jinja2', '-v2.8 -t epel7', 'python-Jinja2_epel7.spec'), ('Jinja2', '-v2.8 -t epel6', 'python-Jinja2_epel6.spec'), ('buildkit', '-v0.2.2 -b2', 'python-buildkit.spec'), ('StructArray', '-v0.1 -b2', 'python-StructArray.spec'), ('Sphinx', '-v1.5 -r python-sphinx', 'python-sphinx.spec'), ]) @pytest.mark.webtest def test_spec(self, package, options, expected): with open(self.td_dir + expected) as fi: self.spec_content = fi.read() res = self.env.run('{0} {1} {2}'.format(self.exe, package, options), expect_stderr=True) # changelog have to be cut from spec files assert set(res.stdout.split('\n')[1:-4]) == set(self.spec_content.split('\n')[1:-4])
class BaseTemplateTest(unittest.TestCase): """Base class for all spirit.bob test cases.""" def setUp(self): self.tempdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tempdir) # docs http://pythonpaste.org/scripttest/ self.env = TestFileEnvironment( os.path.join(self.tempdir, 'test-output'), ignore_hidden=False, ) def create_template(self): """Run mr.bob to create your template.""" options = { 'dir': os.path.join(os.path.dirname(__file__)), 'template': self.template, 'answers_file': self.answers_file, } return self.env.run( '{dir}/bin/mrbob --config ' '{dir}/{answers_file} {dir}/src/spirit/bob/{template}'.format( **options ) )
def setUp(self): self.tempdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tempdir) # docs http://pythonpaste.org/scripttest/ self.env = TestFileEnvironment(os.path.join(self.tempdir, 'test-output'), ignore_hidden=False)
def setUp(self): self.env = TestFileEnvironment( base_path=TEST_DIR, template_path=TEMPLATE_DIR, cwd=os.getcwd(), ignore_hidden=False) self.env.writefile(INPUT_FILE_NAME, frompath='test_freq_2_then_1_then_8_then_4.fda')
def setUp(self): super(Shell, self).setUp() migrate_path = os.path.dirname(sys.executable) # PATH to migrations development script folder log.debug('PATH for ScriptTest: %s', migrate_path) self.env = TestFileEnvironment( base_path=os.path.join(self.temp_usable_dir, 'env'), )
def main(verbose=False): env = TestFileEnvironment('.scripttest') test_num = 1 # Test main usage result = env.run('%s --help' % script) if verbose: print(result.stdout) usage = 'usage: ckanny [<namespace>.]<command> [<args>]' assert result.stdout.split('\n')[0] == usage print('\nScripttest: #%i ... ok' % test_num) test_num += 1 # Test command usage commands = [ 'ds.delete', 'ds.update', 'ds.upload', 'fs.fetch', 'fs.migrate', 'fs.upload', 'pk.create', 'pk.update'] for command in commands: result = env.run('%s %s --help' % (script, command)) if verbose: print(result.stdout) usage = 'usage: %s %s\n' % (script, command) assert ' '.join(result.stdout.split(' ')[:3]) == usage print('Scripttest: %s ... ok' % command) test_num += 1 # Test version result = env.run('%s ver' % script) if verbose: print(result.stdout) assert result.stdout.split('\n')[0] == 'v%s' % version print('Scripttest: #%i ... ok' % test_num) # End of testing print('-----------------------------') print('Ran %i tests\n\nOK' % test_num) exit(0)
def reset_env(): global env env = TestFileEnvironment(base_path, ignore_hidden=False) env.run('virtualenv', '--no-site-packages', env.base_path) # To avoid the 0.9c8 svn 1.5 incompatibility: env.run('%s/bin/easy_install' % env.base_path, 'http://peak.telecommunity.com/snapshots/setuptools-0.7a1dev-r66388.tar.gz') env.run('mkdir', 'src')
class TestManagement(TestCase): @staticmethod def _remvoe_db_file(): if os.path.exists(DB_PATH): os.remove(DB_PATH) def setUp(self): # removes the database file self._remvoe_db_file() # sets up ScriptTest testing environement self.env = TestFileEnvironment( base_path = TESTS_OUTPUT_PATH, start_clear = True, ) os.chdir(TESTS_OUTPUT_PATH) def tearDown(self): # restores current directory os.chdir(BASE_PATH) # removes files created during the tests self.env.clear() # remove the test output folder shutil.rmtree(TESTS_OUTPUT_PATH) # removes the database file self._remvoe_db_file() def test_test_cmd(self): r = self.env.run('%s test' % os.path.join(EXAMPLE_PATH, 'manage.py')) self.assertEquals(r.stdout, "Hello world!\n") def test_create_admin(self): r = self.env.run('%s create_admin' % os.path.join(EXAMPLE_PATH, 'manage.py')) self.assertEquals(r.stdout, "Admin user %(user)s (password: %(pwd)s) created successfully.\n" % { 'user': ADMIN_USER, 'pwd': ADMIN_PWD, }) r = self.env.run('%s create_admin' % os.path.join(EXAMPLE_PATH, 'manage.py')) self.assertEquals(r.stdout, "Admin user %(user)s already exists!\n" % { 'user': ADMIN_USER, })
def test_help_option_shows_help_for_shipit_engine_command(): env = ScriptTestEnvironment() result = env.run('ansible-container', 'shipit', 'kube', '--help') assert "usage: ansible-container shipit kube" in result.stdout
# TODO: optimize file pathing in TestFileEnvironment for different platforms. from scripttest import TestFileEnvironment import filecmp import os env = TestFileEnvironment('./scratch') def synthetic(): env.clear() print('Testing input 12 3 21') encode_result_0 = env.run('..\\ant', '12', '3', '21', expect_error=False) assert encode_result_0.returncode == 0 assert encode_result_0.stdout.startswith('72469') print('OK') print('Testing input 67 12 19') encode_result_0 = env.run('..\\ant', '67', '12', '19', expect_error=False) assert encode_result_0.returncode == 0 assert encode_result_0.stdout.startswith('25920') print('OK') print('Testing input 137 67 25') encode_result_0 = env.run('..\\ant', '137', '67', '25', expect_error=False) assert encode_result_0.returncode == 0 assert encode_result_0.stdout.startswith('259359') print('OK') print('Testing input 15 653 25') encode_result_0 = env.run('..\\ant', '15', '653', '25', expect_error=False)
#!/usr/bin/env python from __future__ import print_function from __future__ import unicode_literals from scripttest import TestFileEnvironment import subprocess env = TestFileEnvironment('./_test') debug = False # DEFINE ANSIBLE VERSION ANSIBLE_2_4 = "/home/kbyers/VENV/py27_venv/bin/ansible-playbook" #ANSIBLE_2_4 = "/home/kbyers/VENV/ans_2_4/bin/ansible-playbook" ACTIVE_VERSION = ANSIBLE_2_4 # DEFINE TEST CASES PROGRAMS = [ ('/home/kbyers/ansible_course/class3/exercises/exercise1.yml', { 'tests': { 'return_code': 0, 'return_strings': [ 'nxos1 : ok=2 changed=1 unreachable=0 failed=0', 'nxos2 : ok=2 changed=1 unreachable=0 failed=0', "changed: [nxos1] => (item={u'name': u'blue', u'vlan_id': 301})", "changed: [nxos2] => (item={u'name': u'blue', u'vlan_id': 301})", "changed: [nxos1] => (item={u'name': u'red', u'vlan_id': 302})", "changed: [nxos2] => (item={u'name': u'red', u'vlan_id': 302})", "changed: [nxos1] => (item={u'name': u'green', u'vlan_id': 303})", "changed: [nxos2] => (item={u'name': u'green', u'vlan_id': 303})", "changed: [nxos1] => (item={u'name': u'yellow', u'vlan_id': 304})",
from scripttest import TestFileEnvironment from filecmp import cmp testdir = "tests/testenv_graphprot_classification_ls/" env = TestFileEnvironment(testdir) def test_classification_ls(): "Run parameter linesearch." call = """../../GraphProt.pl -mode classification -action ls --onlyseq \ -fasta ../testclip.train.positives.fa \ -negfasta ../testclip.train.negatives.fa -prefix CL_ls --keep-tmp""" env.run(call) assert cmp("tests/CL_ls.params", testdir + "CL_ls.params") def test_classification_train_from_lsparam(): "Train a model using parameter file." call = """../../GraphProt.pl -mode classification -action train --onlyseq\ -fasta ../testclip.train.positives.fa \ -negfasta ../testclip.train.negatives.fa -params ../CL_ls.params \ -prefix CL_train_from_ls --keep-tmp""" env.run(call) assert cmp("tests/CL_train_from_ls.model", testdir + "CL_train_from_ls.model")
def __init__(self, environ=None, sitecustomize=None): import virtualenv self.root_path = fast_test_env_root self.backup_path = fast_test_env_backup self.scratch_path = self.root_path / self.scratch # We will set up a virtual environment at root_path. self.venv_path = self.root_path / self.venv if not environ: environ = os.environ.copy() environ = clear_environ(environ) environ['PIP_DOWNLOAD_CACHE'] = str(download_cache) environ['PIP_NO_INPUT'] = '1' environ['PIP_LOG_FILE'] = str(self.root_path / 'pip-log.txt') TestFileEnvironment.__init__(self, self.root_path, ignore_hidden=False, environ=environ, split_cmd=False, start_clear=False, cwd=self.scratch_path, capture_temp=True, assert_no_temp=True) virtualenv_paths = virtualenv.path_locations(self.venv_path) for id, path in zip(('venv', 'lib', 'include', 'bin'), virtualenv_paths): #fix for virtualenv issue #306 if hasattr(sys, "pypy_version_info") and id == 'lib': path = os.path.join(self.venv_path, 'lib-python', pyversion) setattr(self, id + '_path', Path(path)) setattr(self, id, relpath(self.root_path, path)) assert self.venv == TestPipEnvironment.venv # sanity check if hasattr(sys, "pypy_version_info"): self.site_packages = self.venv / 'site-packages' else: self.site_packages = self.lib / 'site-packages' self.user_base_path = self.venv_path / 'user' self.user_site_path = self.venv_path / 'user' / 'lib' / self.lib.name / 'site-packages' self.user_site = relpath(self.root_path, self.user_site_path) self.environ["PYTHONUSERBASE"] = self.user_base_path # put the test-scratch virtualenv's bin dir first on the PATH self.environ['PATH'] = Path.pathsep.join( (self.bin_path, self.environ['PATH'])) self.use_distribute = os.environ.get('PIP_TEST_USE_DISTRIBUTE', False) if self.root_path.exists: rmtree(self.root_path) if self.backup_path.exists: shutil.copytree(self.backup_path, self.root_path, True) else: demand_dirs(self.venv_path) demand_dirs(self.scratch_path) # Create a virtualenv and remember where it's putting things. create_virtualenv(self.venv_path, distribute=self.use_distribute) demand_dirs(self.user_site_path) # create easy-install.pth in user_site, so we always have it updated instead of created open(self.user_site_path / 'easy-install.pth', 'w').close() # test that test-scratch virtualenv creation produced sensible venv python result = self.run('python', '-c', 'import sys; print(sys.executable)') pythonbin = result.stdout.strip() if Path(pythonbin).noext != self.bin_path / 'python': raise RuntimeError( "Oops! 'python' in our test environment runs %r" " rather than expected %r" % (pythonbin, self.bin_path / 'python')) # make sure we have current setuptools to avoid svn incompatibilities if not self.use_distribute: install_setuptools(self) # Uninstall whatever version of pip came with the virtualenv. # Earlier versions of pip were incapable of # self-uninstallation on Windows, so we use the one we're testing. self.run( 'python', '-c', '"import sys; sys.path.insert(0, %r); import pip; sys.exit(pip.main());"' % os.path.dirname(here), 'uninstall', '-vvv', '-y', 'pip') # Install this version instead self.run('python', 'setup.py', 'install', cwd=src_folder, expect_stderr=True) shutil.copytree(self.root_path, self.backup_path, True) #create sitecustomize.py and add patches self._create_empty_sitecustomize() self._use_cached_pypi_server() if sitecustomize: self._add_to_sitecustomize(sitecustomize) assert self.root_path.exists # Ensure that $TMPDIR exists (because we use start_clear=False, it's not created for us) if self.temp_path and not os.path.exists(self.temp_path): os.makedirs(self.temp_path)
def before_scenario(context, scenario): context.env = TestFileEnvironment('./tmp')
def setUp(self): self.env = TestFileEnvironment('./test-output')
def test_socialize(): env = TestFileEnvironment('./test-output') result = env.run('socl', 'me', expect_error=True) assert result.stderr == "You aren't authenticated yet. Please use the login or register command to do so.\n"
def test_abidoc(self): """Testing abiconf.py script""" env = TestFileEnvironment() # Start with --help. If this does not work... print(self.script) env.run(self.script, "--help") # Script must provide a version option #r = env.run(self.script, "--version", expect_stderr=True) #assert r.stderr.strip() == "%s version %s" % (os.path.basename(self.script), abilab.__version__) # Test hostname env.run(self.script, "hostname", self.verbose) env.run(self.script, "hostname", "zenobe", self.verbose) # Test list env.run(self.script, "list", self.verbose) # Test keys env.run(self.script, "keys", "intel", self.verbose) env.run(self.script, "keys", "intel", "mkl", self.verbose) # Test doc env.run(self.script, "doc", self.verbose) # Test opts env.run(self.script, "opts", self.verbose) # Test load #env.run(self.script, "load", "acfile", self.verbose) # Test workon env.run(self.script, "workon", self.verbose) env.run(self.script, "workon", "zenobe-intel-impi-mkl.ac", self.verbose)
def test_run_in_uninitialized_directory_fails(): env = ScriptTestEnvironment() result = env.run('ansible-container', 'run', expect_error=True) assert result.returncode == 1 assert result.stdout == '' assert "No Ansible Container project data found" in result.stderr
def test_help_option_shows_help_for_push_command(): env = ScriptTestEnvironment() result = env.run('ansible-container', 'push', '--help') assert "usage: ansible-container push" in result.stdout
def test_invalid_command_fails(): env = ScriptTestEnvironment() result = env.run('ansible-container', 'invalid', expect_error=True) assert result.returncode == 2 assert len(result.stdout) == 0 assert "ansible-container: error: argument subcommand: invalid choice: 'invalid'" in result.stderr
def test_help_option_shows_help(): env = ScriptTestEnvironment() result = env.run('ansible-container', '--help') assert "usage: ansible-container" in result.stdout
def test_no_command_shows_help(): env = ScriptTestEnvironment() result = env.run('ansible-container', expect_error=True) assert result.returncode == 2 assert len(result.stdout) == 0 assert "ansible-container: error: too few arguments" in result.stderr
def env(): return TestFileEnvironment(ignore_hidden=False)
def setup_method(self, method): self.temp_dir = tempfile.mkdtemp() self.env = TestFileEnvironment(self.temp_dir, start_clear=False)
def test_run_minimal_docker_container(): env = ScriptTestEnvironment() result = env.run('ansible-container', 'run', cwd=project_dir('minimal')) assert "ansible_minimal_1 exited with code 0" in result.stdout
class TestCLI(): """ Contains setups, teardowns, and tests for CLI """ @classmethod def setup_class(cls): """ Create a config file to read from """ with open(CONFIGFILE, 'w') as config: config.write(CONFIG_YAML) @classmethod def teardown_class(cls): """ Remove config file """ os.remove(CONFIGFILE) def setup(self): """ Create a file to print to and set up env""" self.env = None self.default_args = None self.env = TestFileEnvironment( base_path=TEST_DIR, cwd=os.getcwd(), ) self.default_args = ( 'python-escpos', '-c', CONFIGFILE, ) fhandle = open(DEVFILE, 'a') try: os.utime(DEVFILE, None) finally: fhandle.close() def teardown(self): """ Destroy printer file and env """ os.remove(DEVFILE) self.env.clear() def test_cli_help(self): """ Test getting help from cli """ result = self.env.run('python-escpos', '-h') assert not result.stderr assert 'usage' in result.stdout def test_cli_version(self): """ Test the version string """ result = self.env.run('python-escpos', 'version') assert not result.stderr assert_equals(escpos.__version__, result.stdout.strip()) @nottest # disable this test as it is not that easy anymore to predict the outcome of this call def test_cli_text(self): """ Make sure text returns what we sent it """ test_text = 'this is some text' result = self.env.run(*(self.default_args + ( 'text', '--txt', test_text, ))) assert not result.stderr assert DEVFILE_NAME in result.files_updated.keys() assert_equals(result.files_updated[DEVFILE_NAME].bytes, test_text + '\n') def test_cli_text_inavlid_args(self): """ Test a failure to send valid arguments """ result = self.env.run(*(self.default_args + ('text', '--invalid-param', 'some data')), expect_error=True, expect_stderr=True) assert_equals(result.returncode, 2) assert 'error:' in result.stderr assert not result.files_updated
def test_shipit_in_uninitialized_directory_fails(): env = ScriptTestEnvironment() result = env.run('ansible-container', 'shipit', expect_error=True) assert result.returncode == 1 assert result.stdout == '' assert "No such file or directory" in result.stderr
class PyCliTest(object): 'Base class for all test cases.' def __init__(self, description, tmp_wd='tests-out', clear_base_dir=True): self.description = description self.command = '' self.negative_case = False self.ignore_err = False self.tmp_wd = tmp_wd self._create_temp_working_dir() self.env = TestFileEnvironment(base_path=self.working_dir, start_clear=clear_base_dir) def before_all(self): raise NotImplementedError("before_all not implemented.") def _create_temp_working_dir(self): self.working_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), self.tmp_wd) def with_cli(self, command): self.command = command return self def setup(self): # Do some initializations required to run the tests logit("Setting up the test [%s]" % (self.description)) return self def run(self, stdin_values=None): # Execute the test. logit("Running command: [%s]" % (self.command)) stdin_param = None expect_error_value = False expect_stderr_value = False start_time = time.time() if Config.dummy_run: self.status = self.env.run("echo [%s]" % (self.command)) else: if stdin_values != None: stdin_param = bytearray(stdin_values, 'utf-8') if (self.negative_case): expect_error_value = True expect_stderr_value = True elif (self.ignore_err): expect_error_value = False expect_stderr_value = True self.status = self.env.run(self.command, expect_error=expect_error_value, stdin=stdin_param, expect_stderr=expect_stderr_value) end_time = time.time() self.print_time(round(end_time - start_time, 3)) logit("returncode: [%d]" % (self.status.returncode)) logit("stdout: [%s]" % (self.status.stdout)) logit("stderr: [%s]" % (self.status.stderr)) return self def print_time(self, time): time_str = "Execution time - " if (Config.time_readable_format): if (time < 1): time_str += str(round(time * 1000)) + " ms" elif (time > 60): m, s = divmod(time, 60) time_str += str(round(m)) + " min " + str(round( s, 3)) + " seconds" else: time_str += str(round(time, 3)) + " seconds" else: time_str += str(round(time * 1000)) + " ms" print(time_str) def teardown(self): # Do some cleanup logit("Running teardown [%s]" % (self.description) + " ") shutil.rmtree(self.working_dir, ignore_errors=True) return self def execute_test(self, negative_case=False, ignore_err=False, stdin_values=None): print("\nTest case [%s] - " % (self.description), end="") self.negative_case = negative_case self.ignore_err = ignore_err self.setup() if stdin_values != None: self.run(stdin_values) else: self.run() self.teardown() return self def command_is_successful(self): if not Config.dummy_run: assert self.status.returncode == 0, 'Test Failed' print("Command was successful.") return self def command_should_fail(self): if not Config.dummy_run: assert self.status.returncode != 0, 'Test Failed' print("Command has failed as expected.") return self def command_response_should_have(self, msg): if not Config.dummy_run: assert msg in self.status.stdout print("Response has [%s]." % (msg)) return self def command_response_should_be_empty(self): if not Config.dummy_run: assert self.status.stdout == "" print("Response is empty") return self def command_response_should_not_have(self, msg): if not Config.dummy_run: assert msg not in self.status.stdout print("Response does not have [%s]." % (msg)) return self def command_error_should_have(self, msg): if not Config.dummy_run: assert msg in self.status.stderr print("Error message has [%s]." % (msg)) return self def command_error_should_not_have(self, msg): if not Config.dummy_run: assert msg in self.status.stderr print("Error message does not have [%s]." % (msg)) return self def command_created_file(self, file_name): logit("Checking for file [%s] in files created [%s]" % (file_name, ', '.join(self.status.files_created))) if not Config.dummy_run: assert file_name in self.status.files_created, file_name + ' NOT created' print("File [%s] was created." % (file_name)) return self def command_deleted_file(self, file_name): logit("Checking for file [%s] in files deleted [%s]" % (file_name, ', '.join(self.status.files_deleted))) if not Config.dummy_run: assert file_name in self.status.files_deleted, file_name + ' NOT deleted' print("File [%s] was deleted." % (file_name)) return self def command_updated_file(self, file_name): logit("Checking for file [%s] in files updated [%s]" % (file_name, ', '.join(self.status.files_updated))) if not Config.dummy_run: assert file_name in self.status.files_updated, file_name + ' NOT updated' print("File [%s] was updated." % (file_name)) return self # Match the output with the given pattern. def command_should_match_pattern(self, pattern): regex = re.compile(pattern) if not Config.dummy_run: assert bool(regex.match(self.status.stdout)), ( "Expected output %s.\nActual output %s" % (pattern, self.status.stdout)) print("Command was successful.") return self # Match the std error with the given pattern. def command_error_should_match_pattern(self, pattern): regex = re.compile(pattern) if not Config.dummy_run: assert bool(regex.match(self.status.stderr)), ( "Expected output %s.\nActual output %s" % (pattern, self.status.stdout)) print("Command was successful.") return self # Get exit status for command def get_exitstatus(self): return self.status.returncode
def test_init_empty_directory(): env = ScriptTestEnvironment() result = env.run('ansible-container', 'init', expect_stderr=True) assert result.stdout == '' assert "Ansible Container initialized" in result.stderr
#!/usr/bin/env python # encoding: utf-8 from os.path import dirname, abspath, join as pjoin from functools import partial from scripttest import TestFileEnvironment here = dirname(abspath(__file__)) env = TestFileEnvironment(pjoin(here, './test-output')) feed1 = '%s/feeds/jenkins.rss' % here cmd = 'rsstail' run = partial(env.run, expect_stderr=True) def test_run_no_args_no_opts(): r = run(cmd) assert r.returncode == 0 assert 'General Options:' in r.stdout def test_run_initial(): r = run(cmd + ' -e 1 --initial 3 %s' % feed1) assert len(r.stdout.splitlines()) == 3 def test_run_order(): r = run(cmd + ' -e 1 --reverse --title %s' % feed1) exp = [
def env(): yield TestFileEnvironment(cwd=".", )
def before_scenario(context, _): context.env = TestFileEnvironment( base_path=os.path.join(root_dir, 'tmp/features'))