def test_compose_v2_build_and_run(): env = ScriptTestEnvironment() result = env.run('ansible-container', '--debug', 'build', cwd=project_dir('postgres'), expect_stderr=True) assert "ansible_ansible-container_1 exited with code 0" in result.stderr result = env.run('docker', 'volume', 'ls') assert "ansible_logs" in result.stdout assert "ansible_postgres-postgresql_var_lib_postgresql_data" in result.stdout result = env.run('ansible-container', '--debug', 'run', '-d', cwd=project_dir('postgres'), expect_stderr=True) assert "Deploying application in detached mode" in result.stderr # Give the containers a chance to start and reach a 'ready' state time.sleep(10) result = env.run('ansible-container', '--debug', 'stop', cwd=project_dir('postgres'), expect_stderr=True) assert "Stopping ansible_postgresql_1 ... done" in result.stderr assert "Stopping ansible_nginx_1 ... done" in result.stderr
class PhonebookTestCase(unittest.TestCase): def setUp(self): self.env = TestFileEnvironment('./scratch') self.prefix = os.getcwd() # load phonebook fixture. Need to use separate name to prevent # overwriting actual file. with open('phonebook_fixture.txt') as f: with open('phonebook_fixture.pb', 'wb') as phonebook_fixture: for line in f: phonebook_fixture.write(line) def tearDown(self): os.remove('phonebook_fixture.pb') # helper methods for ensuring things were/weren't added to files. def assert_not_added(self, entry_fields): result = self.env.run('cat %s/phonebook_fixture.pb' % self.prefix) for value in entry_fields: nose.tools.assert_not_in(value, result.stdout) def assert_added(self, entry_fields): result = self.env.run('cat %s/phonebook_fixture.pb' % self.prefix) for value in entry_fields: nose.tools.assert_in(value, result.stdout)
def get_env(self, check_help_version=True): #import tempfile #env = TestFileEnvironment(tempfile.mkdtemp(suffix='', prefix='test_' + script)) env = TestFileEnvironment() # Use Agg backend for plots. #env.writefile("matplotlibrc", "backend: Agg") #with open(os.path.join(env.base_path, "matplotlibrc"), "wt") as fh: # fh.write("backend: Agg\n") if check_help_version: # Start with --help. If this does not work... r = env.run(self.script, "--help", expect_stderr=self.expect_stderr) assert r.returncode == 0 # Script must provide a version option r = env.run(self.script, "--version", expect_stderr=self.expect_stderr) assert r.returncode == 0 print("stderr", r.stderr) print("stdout", r.stdout) verstr = r.stdout.strip() # deprecation warnings break --version #if not verstr: verstr = r.stdout.strip() # py3k #assert str(verstr) == str(abilab.__version__) return env
def main(): """ Main method Returns 0 on success, 1 on failure """ start = timer() script = 'bin/csv2ofx --help' env = TestFileEnvironment('.scripttest') tmpfile = NamedTemporaryFile(dir=parent_dir, delete=False) tmpname = tmpfile.name tests = [ (2, 'default.csv', 'default.qif', 'oq'), (3, 'default.csv', 'default_w_splits.qif', 'oqS Category'), (4, 'xero.csv', 'xero.qif', 'oqc Description -m xero'), (5, 'mint.csv', 'mint.qif', 'oqS Category -m mint'), ( 6, 'mint.csv', 'mint_alt.qif', 'oqs20150613 -e20150614 -S Category -m mint' ), (7, 'default.csv', 'default.ofx', 'oe 20150908'), (8, 'default.csv', 'default_w_splits.ofx', 'oS Category'), (9, 'mint.csv', 'mint.ofx', 'oS Category -m mint'), ] try: env.run(script, cwd=parent_dir) print('\nScripttest #1: %s ... ok' % script) for test_num, example_filename, check_filename, opts in tests: example = p.join(example_dir, example_filename) check = p.join(check_dir, check_filename) checkfile = open(check) script = 'bin/csv2ofx -%s %s %s' % (opts, example, tmpname) env.run(script, cwd=parent_dir) args = [checkfile.readlines(), open(tmpname).readlines()] kwargs = {'fromfile': 'expected', 'tofile': 'got'} diffs = list(unified_diff(*args, **kwargs)) if diffs: loc = ' '.join(script.split(' ')[:-1]) msg = "ERROR from test #%i! Output:\n\t%s\n" % (test_num, loc) msg += "doesn't match hash of\n\t%s\n" % check sys.stderr.write(msg) sys.exit(''.join(diffs)) else: short_script = 'csv2ofx -%s %s %s' % ( opts, example_filename, check_filename) print('Scripttest #%i: %s ... ok' % (test_num, short_script)) except Exception as e: sys.exit(e) else: time = timer() - start print('%s' % '-' * 70) print('Ran %i scripttests in %0.3fs\n\nOK' % (test_num, time)) sys.exit(0) finally: checkfile.close os.unlink(tmpname)
class TestIntegration(unittest.TestCase): def setUp(self): self.env = TestFileEnvironment(test_path, template_path=template_path) def test_init(self): result = self.env.run("gpc", "init", expect_stderr=True) created_filenames = result.files_created.keys() self.assertTrue("log" in created_filenames) self.assertTrue("log/data" in created_filenames) self.assertTrue("log/schema.sql" in created_filenames) self.assertTrue("storage" in created_filenames) def test_make_target(self): self.env.run("gpc", "init", expect_stderr=True) self.env.writefile("gpc.yaml", frompath="simple.yaml") result = self.env.run("gpc", "make", "c", expect_stderr=True) created_filenames = list(result.files_created.keys()) self.assertTrue("c" in created_filenames) created_filenames.remove("c") self.assertTrue(any([s.startswith("storage/") for s in created_filenames])) self.assertTrue(any([s.startswith("log/data/") for s in created_filenames])) def test_make_target_cached(self): call(["cp", "-r", template_path + "/.gpc", test_path]) call(["cp", "-r", template_path + "/log", test_path]) call(["cp", "-r", template_path + "/storage", test_path]) self.env.writefile("gpc.yaml", frompath="simple.yaml") result = self.env.run("gpc", "make", "c", expect_stderr=True) created_filenames = result.files_created.keys() self.assertTrue("c" in created_filenames) self.assertTrue(len(created_filenames) == 1)
def test_stop_service_minimal_docker_container(): env = ScriptTestEnvironment() env.run('ansible-container', 'run', '--detached', cwd=project_dir('minimal_sleep'), expect_stderr=True) result = env.run('ansible-container', 'stop', 'minimal1', cwd=project_dir('minimal_sleep'), expect_stderr=True) assert "Stopping ansible_minimal1_1 ... done" in result.stderr assert "Stopping ansible_minimal2_1 ... done" not in result.stderr
def test_cli(): env = TestFileEnvironment('./test-output') img1 = os.path.join(current_dir, 'test1.png') img2 = os.path.join(current_dir, 'test2.png') imgx = os.path.join(current_dir, '*.png') res = env.run('bild', '-f', img1) assert re.findall(r'http://s1.bild.me/bilder/\d+/\d+.*', res.stdout) res = env.run('bild', '-qf', img1, img2) assert len(re.findall(r'http://s1.bild.me/bilder/\d+/\d+.*', res.stdout)) == 2 res = env.run('bild', '-lf', img1) assert re.findall(r'http://s1.bild.me/bilder/\d+/\d+.*', res.stdout) assert '[URL=http://www.bild.me][IMG]' in res.stdout res = env.run('bild', '-qlf', img1, img2) assert re.findall(r'http://s1.bild.me/bilder/\d+/\d+.*', res.stdout) assert len(re.findall(r'\[URL=http://www.bild.me\]\[IMG\]', res.stdout)) == 2 res = env.run('bild', '-qf', imgx) assert len(re.findall(r'http://s1.bild.me/bilder/\d+/\d+.*', res.stdout)) == 2
class CLITest(unittest.TestCase): def setUp(self): import githook self.githook = githook self.githook.app.config['TESTING'] = True self.tempdir = tempfile.mkdtemp() self.env = TestFileEnvironment( os.path.join(self.tempdir,'test-output'), ignore_hidden=False) @unittest.skipIf(*is_travis) def test_no_config(self): result = self.env.run('bin/python %s' % os.path.join(here, "..", "__init__.py"), expect_error=True, cwd=os.path.join(here, '../', '../') ) self.assertEqual(result.returncode, 1) self.assertEqual(result.stderr, u'CRITICAL:root:Configuration file not found. Please specify one.\n') # TODO This loops. :D Need another way of testing daemons. @unittest.skipIf(True, 'It loops') def test_ok_config(self): self.env.run('bin/python -m githook -c githook/tests/config/okconfig.ini', cwd=os.path.join(here, '../', '../') )
class TestBasicCommands(TestCase): """Test basic command-line invoke of the program""" def setUp(self): self.env = TestFileEnvironment('./test-output') def test_default(self): res = self.env.run('../run.py', '-s', '1') assert 'Timeout' in res.stdout self.assertEqual(res.returncode, 0) def test_collect(self): res = self.env.run('../run.py', '-s', '1', '-c') assert 'Collecting' in res.stdout self.assertEqual(res.returncode, 0) def test_log(self): res = self.env.run('../run.py', '-s', '1', '--log', 'log.txt') assert 'Timeout' in res.stdout assert 'log.txt' in res.files_created self.assertEqual(res.returncode, 0) def test_segment(self): res = self.env.run('../run.py', '-s', '1', '--segment', '0.2') assert 'Timeout' in res.stdout self.assertEqual(res.returncode, 0) def tearDown(self): pass
class TestProgramsHelp(unittest.TestCase): def spawn_tests(self): programs = os.listdir(programs_WD) not_checked = [] for prog in programs: if prog in ['__init__.py', 'program_envs.py']: continue if not prog.endswith('.py') or '#' in prog: not_checked.append(prog) continue if prog.lower() != prog: continue #res = env.run(prog, '-h') return prog def setUp(self): if os.path.exists('./new-test-output'): shutil.rmtree('./new-test-output') self.env = TestFileEnvironment('./new-test-output') #if not os.path.exists('./new-test-output'): # os.mkdir('./new-test-output') def tearDown(self): if os.path.exists('./new-test-output'): shutil.rmtree('./new-test-output') def test_cmd_line(self): print 'programs_WD', programs_WD programs = os.listdir(programs_WD) not_checked = [] for prog in programs: print "Testing help message for:", prog if prog in ['__init__.py', 'program_envs.py']: continue if 'gui' in prog: continue if not prog.endswith('.py') or '#' in prog: not_checked.append(prog) continue if prog.lower() != prog: continue if sys.platform in ['win32', 'win62']: prog = prog[:-3] res = self.env.run(prog, '-h') #except AssertionError as ex: # not_checked.append(prog) # print 'ex', type(ex) # print res print 'not_checked', not_checked def test_guis(self): tests = ['pmag_gui.py', 'magic_gui.py', 'demag_gui.py', 'thellier_gui.py'] for prog in tests: if sys.platform in ['win32', 'win62']: prog = prog[:-3] print 'testing:', prog res = self.env.run(prog, '-h')
def main(): """ Main method Returns 0 on success, 1 on failure """ start = timer() script = 'bin/csv2ofx --help' env = TestFileEnvironment('.scripttest') tmpfile = NamedTemporaryFile(dir=parent_dir, delete=False) tmpname = tmpfile.name tests = [ (2, 'default.csv', 'default.qif', 'oq'), (3, 'default.csv', 'default_w_splits.qif', 'oqS Category'), (4, 'xero.csv', 'xero.qif', 'oqc Description -m xero'), (5, 'mint.csv', 'mint.qif', 'oqS Category -m mint'), (6, 'mint.csv', 'mint_alt.qif', 'oqs20150613 -e20150614 -S Category -m mint'), (7, 'default.csv', 'default.ofx', 'oe 20150908'), (8, 'default.csv', 'default_w_splits.ofx', 'oS Category'), (9, 'mint.csv', 'mint.ofx', 'oS Category -m mint'), ] try: env.run(script, cwd=parent_dir) print('\nScripttest #1: %s ... ok' % script) for test_num, example_filename, check_filename, opts in tests: example = p.join(example_dir, example_filename) check = p.join(check_dir, check_filename) checkfile = open(check) script = 'bin/csv2ofx -%s %s %s' % (opts, example, tmpname) env.run(script, cwd=parent_dir) args = [checkfile.readlines(), open(tmpname).readlines()] kwargs = {'fromfile': 'expected', 'tofile': 'got'} diffs = list(unified_diff(*args, **kwargs)) if diffs: loc = ' '.join(script.split(' ')[:-1]) msg = "ERROR from test #%i! Output:\n\t%s\n" % (test_num, loc) msg += "doesn't match hash of\n\t%s\n" % check sys.stderr.write(msg) sys.exit(''.join(diffs)) else: short_script = 'csv2ofx -%s %s %s' % (opts, example_filename, check_filename) print('Scripttest #%i: %s ... ok' % (test_num, short_script)) except Exception as e: sys.exit(e) else: time = timer() - start print('%s' % '-' * 70) print('Ran %i scripttests in %0.3fs\n\nOK' % (test_num, time)) sys.exit(0) finally: checkfile.close os.unlink(tmpname)
def test_dojorun_dryrun(self): """Testing dojorun.py script in dry-run mode.""" env = self.get_env() # Build new env to run the script in dry-run mode # Copy file from data to env. env = TestFileEnvironment(template_path=pdj_data.dirpath) env.writefile("Si.psp8", frompath="Si.psp8") env.writefile("Si.djrepo", frompath="Si.djrepo_empty") env.run(self.script, "Si.psp8", self.loglevel, self.verbose, "--dry-run")
def test_build_with_variables(): env = ScriptTestEnvironment() result = env.run('ansible-container', 'build', '--save-build-container', '--with-variables', 'foo=bar', 'bar=baz', cwd=project_dir('minimal'), expect_stderr=True) assert "Aborting on container exit" in result.stdout assert "Exported minimal-minimal with image ID " in result.stderr result = env.run('docker', 'inspect', '--format="{{ .Config.Env }}"', 'ansible_ansible-container_1', expect_stderr=True) assert "foo=bar" in result.stdout assert "bar=baz" in result.stdout
def test_build_with_volumes(): env = ScriptTestEnvironment() volume_string = "{0}:{1}:{2}".format(os.getcwd(), '/projectdir', 'ro') result = env.run('ansible-container', 'build', '--save-build-container', '--with-volumes', volume_string, cwd=project_dir('minimal'), expect_stderr=True) assert "Aborting on container exit" in result.stdout assert "Exported minimal-minimal with image ID " in result.stderr result = env.run('docker', 'inspect', '--format="{{range .Mounts}}{{ .Source }}:{{ .Destination }}:{{ .Mode}} {{ end }}"', 'ansible_ansible-container_1', expect_stderr=True) volumes = result.stdout.split(' ') assert volume_string in volumes
def test_shipit_kube(): env = ScriptTestEnvironment() # Should run shipit kube to success result = env.run('ansible-container', '--debug', 'shipit', 'kube', '--pull-from', 'https://index.docker.io/v1/ansible', cwd=project_dir('postgres'), expect_error=True) assert result.returncode == 0 assert "Role postgres created" in result.stderr # Should create a role result = env.run('ls ansible/roles', cwd=project_dir('postgres')) assert "postgres-kubernetes" in result.stdout # Should create a playbook result = env.run('ls ansible', cwd=project_dir('postgres')) assert "shipit-kubernetes.yml" in result.stdout
def test_restart_service_minimal_docker_container(): env = ScriptTestEnvironment() env.run('ansible-container', 'run', '--detached', cwd=project_dir('minimal_sleep'), expect_stderr=True) result = env.run('ansible-container', 'restart', 'minimal1', cwd=project_dir('minimal_sleep'), expect_stderr=True) assert "Restarting ansible_minimal1_1 ... done" in result.stderr assert "Restarting ansible_minimal2_1 ... done" not in result.stderr
def test_force_stop_minimal_docker_container(): env = ScriptTestEnvironment() env.run('ansible-container', 'run', '--detached', cwd=project_dir('minimal_sleep'), expect_stderr=True) result = env.run('ansible-container', 'stop', '--force', cwd=project_dir('minimal_sleep'), expect_stderr=True) assert "Killing ansible_minimal1_1 ... done" in result.stderr assert "Killing ansible_minimal2_1 ... done" in result.stderr
def get_env(self): #import tempfile #env = TestFileEnvironment(tempfile.mkdtemp(suffix='', prefix='test_' + script)) env = TestFileEnvironment() # Use Agg backend for plots. #env.writefile("matplotlibrc", "backend : Agg") # Start with --help. If this does not work... env.run(self.script, "--help") # Script must provide a version option #r = env.run(self.script, "--version", expect_stderr=True) #assert r.stderr.strip() == "%s version %s" % (os.path.basename(self.script), abilab.__version__) return env
def test_all_missing_args_yield_errcode(self): command = [ 'ssm-starter' ] env = TestFileEnvironment('./test-output') result = env.run(' '.join(command), expect_error=True) assert not result.returncode == 0
class TestCommands(TestCase): @classmethod def setUpClass(cls): create_run_script() def setUp(self): self.env = TestFileEnvironment('./test-output') def test_send_no_args(self): cmd = 'python ../run.py send' self.env.run(*shlex.split(cmd), expect_error=True) def test_send_alias_stdin(self): cmd = 'python ../run.py send -a "%s"' % (TEST_ALIAS) self.env.run(*shlex.split(cmd), stdin=b'Hi')
def test_no_command_shows_help(): env = ScriptTestEnvironment() result = env.run('ansible-container', expect_error=True) assert result.returncode == 2 assert len(result.stdout) == 0 assert "usage: ansible-container" in result.stderr assert "ansible-container: error:" in result.stderr
class BaseTemplateTest(unittest.TestCase): # credit bobtemplates.plone def setUp(self): self.maxDiff = None self.tempdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tempdir) # docs http://pythonpaste.org/scripttest/ self.env = TestFileEnvironment( os.path.join(self.tempdir, 'test-output'), ignore_hidden=False, ) def create_template(self): """Run mr.bob to create your template.""" options = { 'dir': os.path.join(os.path.dirname(__file__)), 'template': self.template, 'addon': self.addon, 'answers_file': self.answers_file, 'target_dir': self.target_dir, } return self.env.run( 'mrbob -O %(target_dir)s --config ' '%(dir)s/%(answers_file)s bobtemplates.odoo:%(template)s' % options)
def run(*dtox_args, **kwargs): """Runs docker run with optional dtox_args. Returns ProcResult. kwargs: - tox_ini=<string>: contents of the tox.ini file to be used - setup=<func>: run setup(env) before actual docker run command """ env = STE(TESTS_WORKDIR) command = ["docker", "run", "--rm", "-i"] # map tests work dir into /src command.extend(["-v", '{}:/src:ro'.format(TESTS_WORKDIR)]) # write tox.ini if specified tox_ini = kwargs.pop("tox_ini", None) if tox_ini is not None: env.writefile("tox.ini", content=tox_ini) # run setup function if specified setup = kwargs.pop("setup", None) if setup is not None: setup(env) command.append(DOCKER_IMAGE) command.extend(dtox_args) r = env.run(*command, **kwargs) # make sure it isn't a docker run failure if "Unable to find image" in r.stderr: raise ValueError(r.stderr) return r
class BaseTemplateTest(unittest.TestCase): def setUp(self): self.tempdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tempdir) # docs httpcripttest//pythonpaste.org/scripttest/ self.env = TestFileEnvironment( os.path.join(self.tempdir, 'test-output'), ignore_hidden=False, ) def create_template(self): """Run mr.bob to create your template.""" options = { 'dir': os.path.join(os.path.dirname(__file__)), 'template': self.template, 'project': self.project, } return self.env.run( '%(dir)s/bin/mrbob -O %(project)s --config ' '%(dir)s/test_answers.ini %(dir)s/bobtemplates/%(template)s' % options)
class TestSpec(object): td_dir = '{0}/test_data/'.format(tests_dir) bin_dir = os.path.split(tests_dir)[0] + '/' exe = 'python {0}mybin.py'.format(bin_dir) def setup_method(self, method): self.env = TestFileEnvironment('{0}/test_output/'.format(tests_dir)) @pytest.mark.parametrize(('package', 'options', 'expected'), [ ('Jinja2', '-v2.8', 'python-Jinja2.spec'), ('Jinja2', '-v2.8 -b3', 'python-Jinja2_base.spec'), ('Jinja2', '-v2.8 -t epel7', 'python-Jinja2_epel7.spec'), ('Jinja2', '-v2.8 -t epel6', 'python-Jinja2_epel6.spec'), ('buildkit', '-v0.2.2 -b2', 'python-buildkit.spec'), ('StructArray', '-v0.1 -b2', 'python-StructArray.spec'), ('Sphinx', '-v1.5 -r python-sphinx', 'python-sphinx.spec'), ]) @pytest.mark.webtest def test_spec(self, package, options, expected): with open(self.td_dir + expected) as fi: self.spec_content = fi.read() res = self.env.run('{0} {1} {2}'.format(self.exe, package, options), expect_stderr=True) # changelog have to be cut from spec files assert set(res.stdout.split('\n')[1:-4]) == set(self.spec_content.split('\n')[1:-4])
def main(script, tests, verbose=False, stop=True): """ Main method Returns 0 on success, 1 on failure """ failures = 0 logger = gogo.Gogo(__name__, verbose=verbose).logger short_script = p.basename(script) env = TestFileEnvironment('.scripttest') start = timer() for pos, test in enumerate(tests): num = pos + 1 opts, arguments, expected = test joined_opts = ' '.join(opts) if opts else '' joined_args = '"%s"' % '" "'.join(arguments) if arguments else '' command = "%s %s %s" % (script, joined_opts, joined_args) short_command = "%s %s %s" % (short_script, joined_opts, joined_args) result = env.run(command, cwd=p.abspath(p.dirname(p.dirname(__file__)))) output = result.stdout if isinstance(expected, bool): text = StringIO(output).read() outlines = [str(bool(text))] checklines = StringIO(str(expected)).readlines() elif p.isfile(expected): outlines = StringIO(output).readlines() with open(expected, encoding='utf-8') as f: checklines = f.readlines() else: outlines = StringIO(output).readlines() checklines = StringIO(expected).readlines() args = [checklines, outlines] kwargs = {'fromfile': 'expected', 'tofile': 'got'} diffs = ''.join(unified_diff(*args, **kwargs)) passed = not diffs if not passed: failures += 1 msg = "ERROR! Output from test #%i:\n %s\n" % (num, short_command) msg += "doesn't match:\n %s\n" % expected msg += diffs if diffs else '' else: logger.debug(output) msg = 'Scripttest #%i: %s ... ok' % (num, short_command) logger.info(msg) if stop and failures: break time = timer() - start logger.info('%s' % '-' * 70) end = 'FAILED (failures=%i)' % failures if failures else 'OK' logger.info('Ran %i scripttests in %0.3fs\n\n%s' % (num, time, end)) sys.exit(failures)
class BaseTemplateTest(unittest.TestCase): """Base class for all spirit.bob test cases.""" def setUp(self): self.tempdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tempdir) # docs http://pythonpaste.org/scripttest/ self.env = TestFileEnvironment( os.path.join(self.tempdir, 'test-output'), ignore_hidden=False, ) def create_template(self): """Run mr.bob to create your template.""" options = { 'dir': os.path.join(os.path.dirname(__file__)), 'template': self.template, 'answers_file': self.answers_file, } return self.env.run( '{dir}/bin/mrbob --config ' '{dir}/{answers_file} {dir}/src/spirit/bob/{template}'.format( **options ) )
def test_setting_ansible_container_envar(): env = ScriptTestEnvironment() result = env.run('ansible-container', '--debug', 'build', cwd=project_dir('environment'), expect_stderr=True) assert "web MYVAR=foo ANSIBLE_CONTAINER=1" in result.stdout assert "db MYVAR=foo ANSIBLE_CONTAINER=1" in result.stdout assert "mw ANSIBLE_CONTAINER=1" in result.stdout
def test_run_minimal_docker_container(): env = ScriptTestEnvironment() result = env.run('ansible-container', 'run', cwd=project_dir('minimal'), expect_stderr=True) assert "ansible_minimal_1 exited with code 0" in result.stdout
def test_script__start(self): env = TestFileEnvironment('./.tmp') ini = configparser.ConfigParser() ini[LogParam.DEFAULT] = { 'host_name': '[email protected]', 'shell': 'ssh', 'log_cmd': 'log_to_rom', 'log_clear_cmd': 'log_clear', 'log_extension': 'tar.gz', 'remote_log_dir': '/root', 'remote_dist_dir': '/mnt/log', 'local_src_dir': '../', 'convert_rule': '../tests/rule.csv', 'merge_dir': 'logs', 'usb_dir': '/mnt/USB0' } file_path = os.path.join(os.getcwd(), '.tmp', LogParam.FILE_NAME) with open(file_path, 'w') as file: ini.write(file) result = env.run('../logger/cli.py get -c --debug', cwd='.tmp', expect_stderr=True) print(result) self.assertRegex(result.stdout, '正常に終了しました。') self.assertTrue(len(result.files_created) > 0)
class TestSpec(object): td_dir = "{0}/test_data/".format(tests_dir) bin_dir = os.path.split(tests_dir)[0] + "/" exe = "python {0}mybin.py".format(bin_dir) def setup_method(self, method): self.env = TestFileEnvironment("{0}/test_output/".format(tests_dir)) @pytest.mark.parametrize( ("package", "options", "expected"), [ ("Jinja2", "", "python-Jinja2.spec"), ("Jinja2", "-b3", "python-Jinja2_base.spec"), ("Jinja2", "-t epel7", "python-Jinja2_epel7.spec"), ("Jinja2", "-t epel6", "python-Jinja2_epel6.spec"), ("buildkit", "-b2", "python-buildkit.spec"), ("StructArray", "-b2", "python-StructArray.spec"), ("Sphinx", "-r python-sphinx", "python-sphinx.spec"), ], ) @pytest.mark.spectest def test_spec(self, package, options, expected): with open(self.td_dir + expected) as fi: self.spec_content = fi.read() res = self.env.run("{0} {1} {2}".format(self.exe, package, options)) # changelog have to be cut from spec files assert set(res.stdout.split("\n")[1:-4]) == set(self.spec_content.split("\n")[1:-4])
def test_run_minimal_docker_container_in_detached_mode(): env = ScriptTestEnvironment() result = env.run('ansible-container', 'run', '--detached', cwd=project_dir('minimal'), expect_stderr=True) assert "Deploying application in detached mode" in result.stderr
def test_install_role_requirements(): env = ScriptTestEnvironment() result = env.run('ansible-container', '--debug', 'build', cwd=project_dir('requirements'), expect_stderr=True) assert "ansible-role-apache was installed successfully" in result.stdout
def test_build_minimal_docker_container(): env = ScriptTestEnvironment() result = env.run('ansible-container', 'build', cwd=project_dir('minimal'), expect_stderr=True) assert "Aborting on container exit" in result.stdout assert "Exported minimal-minimal with image ID " in result.stderr
def test_script(self): with HTTMock(mock_TwitterK): env = TestFileEnvironment('myria_upload') res = env.run('''myria_upload --relation TwitterK --program test --overwrite --hostname localhost --port 12345 --dry''', stdin='foo,bar\n1,b\n3,c', expect_stderr=True) eq_(res.stdout, '''1,b\n3,c\n''')
def test_missing_command_arg_yield_errcode(self): command = [ 'ssm-starter', '--ssm-name /dev/ssm_starter_test_app', ] env = TestFileEnvironment('./test-output') result = env.run(' '.join(command), expect_error=True) assert not result.returncode == 0
def main(verbose=False): env = TestFileEnvironment('.scripttest') test_num = 1 # Test main usage result = env.run('%s --help' % script) if verbose: print(result.stdout) usage = 'usage: ckanny [<namespace>.]<command> [<args>]' assert result.stdout.split('\n')[0] == usage print('\nScripttest: #%i ... ok' % test_num) test_num += 1 # Test command usage commands = [ 'ds.delete', 'ds.update', 'ds.upload', 'fs.fetch', 'fs.migrate', 'fs.upload', 'pk.create', 'pk.update'] for command in commands: result = env.run('%s %s --help' % (script, command)) if verbose: print(result.stdout) usage = 'usage: %s %s\n' % (script, command) assert ' '.join(result.stdout.split(' ')[:3]) == usage print('Scripttest: %s ... ok' % command) test_num += 1 # Test version result = env.run('%s ver' % script) if verbose: print(result.stdout) assert result.stdout.split('\n')[0] == 'v%s' % version print('Scripttest: #%i ... ok' % test_num) # End of testing print('-----------------------------') print('Ran %i tests\n\nOK' % test_num) exit(0)
class TestManagement(TestCase): @staticmethod def _remvoe_db_file(): if os.path.exists(DB_PATH): os.remove(DB_PATH) def setUp(self): # removes the database file self._remvoe_db_file() # sets up ScriptTest testing environement self.env = TestFileEnvironment( base_path = TESTS_OUTPUT_PATH, start_clear = True, ) os.chdir(TESTS_OUTPUT_PATH) def tearDown(self): # restores current directory os.chdir(BASE_PATH) # removes files created during the tests self.env.clear() # remove the test output folder shutil.rmtree(TESTS_OUTPUT_PATH) # removes the database file self._remvoe_db_file() def test_test_cmd(self): r = self.env.run('%s test' % os.path.join(EXAMPLE_PATH, 'manage.py')) self.assertEquals(r.stdout, "Hello world!\n") def test_create_admin(self): r = self.env.run('%s create_admin' % os.path.join(EXAMPLE_PATH, 'manage.py')) self.assertEquals(r.stdout, "Admin user %(user)s (password: %(pwd)s) created successfully.\n" % { 'user': ADMIN_USER, 'pwd': ADMIN_PWD, }) r = self.env.run('%s create_admin' % os.path.join(EXAMPLE_PATH, 'manage.py')) self.assertEquals(r.stdout, "Admin user %(user)s already exists!\n" % { 'user': ADMIN_USER, })
def test_envvar_arguments_without_role(self): command = [ 'iam-docker-run', '-e TESTENVARG=MyTestEnvArg', '--image mesosphere/aws-cli:latest', "--full-entrypoint \"printenv TESTENVARG\"" ] env = TestFileEnvironment('./test-output') result = env.run(' '.join(command)) assert 'MyTestEnvArg' in result.stdout
def test_shipit_minimal_docker_container(): env = ScriptTestEnvironment() result = env.run('ansible-container', 'shipit', cwd=project_dir('minimal'), expect_error=True) assert result.returncode == 1 assert "minimal was created successfully" in result.stdout assert "Role minimal created" in result.stderr
def test_shm_size_argument(self): command = [ 'iam-docker-run', '-e TESTENVARG=MyTestEnvArg', '--image debian:latest', '--shm-size 128m', "--full-entrypoint \"df /dev/shm -m --output=size | sed 1d | sed 's/ //g'\"" ] env = TestFileEnvironment('./test-output') result = env.run(' '.join(command)) assert '128' in result.stdout
class CLITest(unittest.TestCase): def setUp(self): self.env = TestFileEnvironment('./.scratch') def tearDown(self): pass def test_can_run_script_without_arguments(self): result = self.env.run('%s' % (SCRIPT, ), expect_error=True) assert result.returncode == 2 def test_can_run_script_with_help(self): result = self.env.run('%s -h' % (SCRIPT, )) assert result.returncode == 0 def test_run_script_with_invalid_arg(self): result = self.env.run('%s --frob' % (SCRIPT, ), expect_error=True) assert result.returncode == 2
def main(script, tests, verbose=False, stop=True): """ Returns 0 on success, 1 on failure """ failures = 0 logger = gogo.Gogo(__name__, verbose=verbose).logger short_script = p.basename(script) env = TestFileEnvironment(".scripttest") start = timer() for pos, test in enumerate(tests): num = pos + 1 opts, arguments, expected = test joined_opts = " ".join(opts) if opts else "" joined_args = '"%s"' % '" "'.join(arguments) if arguments else "" command = "%s %s %s" % (script, joined_opts, joined_args) short_command = "%s %s %s" % (short_script, joined_opts, joined_args) result = env.run(command, cwd=PARENT_DIR, expect_stderr=True) output = result.stdout if isinstance(expected, bool): text = StringIO(output).read() outlines = [str(bool(text))] checklines = StringIO(str(expected)).readlines() elif p.isfile(expected): outlines = StringIO(output).readlines() with open(expected, encoding="utf-8") as f: checklines = f.readlines() else: outlines = StringIO(output).readlines() checklines = StringIO(expected).readlines() args = [checklines, list(filter_output(outlines))] kwargs = {"fromfile": "expected", "tofile": "got"} diffs = "".join(unified_diff(*args, **kwargs)) if diffs: failures += 1 msg = "ERROR! Output from test #%i:\n %s\n" % (num, short_command) msg += "doesn't match:\n %s\n" % expected msg += diffs if diffs else "" else: logger.debug(output) msg = "Scripttest #%i: %s ... ok" % (num, short_command) logger.info(msg) if stop and failures: break time = timer() - start logger.info("%s" % "-" * 70) end = "FAILED (failures=%i)" % failures if failures else "OK" logger.info("Ran %i scripttests in %0.3fs\n\n%s", num, time, end) sys.exit(failures)
def test_invalid_conflicting_ssm_name(self): command = [ 'ssm-starter', '--ssm-name /dev/ssm_starter_test_app', '--command "echo"' ] os.environ['AWS_ENV'] = 'dev' env = TestFileEnvironment('./test-output') result = env.run(' '.join(command), expect_error=True) assert not result.returncode == 0
class TestSekrets: def setup(self): self.env = TestFileEnvironment('./test-output') def test_sekrets_help(self): 'Test: sekrets help text' result = self.env.run('sekrets --help') assert result.returncode == 0 assert result.stderr == ''
class CLITest(unittest.TestCase): def setUp(self): self.env = TestFileEnvironment('./.scratch') def tearDown(self): pass def test_can_run_script_without_arguments(self): result = self.env.run('%s' % (SCRIPT,), expect_error=True) assert result.returncode == 2 def test_can_run_script_with_help(self): result = self.env.run('%s -h' % (SCRIPT,)) assert result.returncode == 0 def test_run_script_with_invalid_arg(self): result = self.env.run('%s --frob' % (SCRIPT,), expect_error=True) assert result.returncode == 2
def test_build_with_var_file(): env = ScriptTestEnvironment() result = env.run('ansible-container', '--var-file=devel.yaml', '--debug', 'build', cwd=project_dir('vartest'), expect_stderr=True) assert "ansible_ansible-container_1 exited with code 0" in result.stderr assert "Exporting built containers as images..." in result.stderr
def test_run_with_var_file(): env = ScriptTestEnvironment() result = env.run('ansible-container', '--var-file=devel.yaml', '--debug', 'run', cwd=project_dir('vartest'), expect_stderr=True) assert "ansible_db_1 exited with code 0" in result.stdout assert "ansible_web_1 exited with code 0" in result.stdout
class Shell(Pathed): """Base class for command line tests""" def setUp(self): super(Shell, self).setUp() migrate_path = os.path.dirname(sys.executable) # PATH to migrations development script folder log.debug('PATH for ScriptTest: %s', migrate_path) self.env = TestFileEnvironment( base_path=os.path.join(self.temp_usable_dir, 'env'), ) def run_version(self, repos_path): result = self.env.run('migrations version %s' % repos_path) return int(result.stdout.strip()) def run_db_version(self, url, repos_path): result = self.env.run('migrations db_version %s %s' % (url, repos_path)) return int(result.stdout.strip())
class Shell(Pathed): """Base class for command line tests""" def setUp(self): super(Shell, self).setUp() migrate_path = os.path.dirname(sys.executable) # PATH to migrate development script folder log.debug('PATH for ScriptTest: %s', migrate_path) self.env = TestFileEnvironment( base_path=os.path.join(self.temp_usable_dir, 'env'), script_path=[migrate_path], ) def run_version(self, repos_path): result = self.env.run('migrate version %s' % repos_path) return int(result.stdout.strip()) def run_db_version(self, url, repos_path): result = self.env.run('migrate db_version %s %s' % (url, repos_path)) return int(result.stdout.strip())
def test_load_single_path(self): command = [ 'ssm-starter', '--ssm-name /dev/ssm_starter_test_app', '--command "env | grep TEST"' ] os.environ['AWS_ENV'] = '' env = TestFileEnvironment('./test-output') result = env.run(' '.join(command)) assert 'TEST_STRING1 - setting value from ssm' in result.stdout
class TestSrpm(object): td_dir = "{0}/test_data/".format(tests_dir) bin_dir = os.path.split(tests_dir)[0] + "/" exe = "python {0}mybin.py".format(bin_dir) def setup_method(self, method): self.env = TestFileEnvironment("{0}/test_output/".format(tests_dir)) def test_srpm(self): res = self.env.run("{0} Jinja2 --srpm".format(self.exe), expect_stderr=True) assert res.returncode == 0
class TestLogPurge(TestCase): def setUp(self): self.tempdir = os.path.join(tempfile.gettempdir(), 'logpurge_tests') self.env = TestFileEnvironment(base_path=self.tempdir) def test_file_removal(self): for i in range(0,100): date_ = date.today() - timedelta(days=i * 7) filename = 'somefile{0}-{1}'.format(i, date_) open(os.path.join(self.tempdir, filename), 'a').close() args = ['-d', self.tempdir, '-f'] args_str = ' '.join(args) results = self.env.run( 'python ./logpurge/__init__.py ' + args_str, cwd=os.getcwd()) results = self.env.run('bash -c "ls {0} | wc -l"'.format(self.tempdir)).stdout self.assertEqual(results, '40\n')