def configure(self, options, conf): self.enabled = is_running_under_teamcity() self.config = conf if self._capture_plugin_enabled(): capture_plugin = self._get_capture_plugin() old_before_test = capture_plugin.beforeTest old_after_test = capture_plugin.afterTest old_format_error = capture_plugin.formatError def newCaptureBeforeTest(test): rv = old_before_test(test) test_id = self.get_test_id(test) capture_plugin._buf = FlushingStringIO(lambda data: dump_test_stdout(self.messages, test_id, test_id, data)) sys.stdout = capture_plugin._buf return rv def newCaptureAfterTest(test): if isinstance(capture_plugin._buf, FlushingStringIO): capture_plugin._buf.flush() return old_after_test(test) def newCaptureFormatError(test, err): if isinstance(capture_plugin._buf, FlushingStringIO): capture_plugin._buf.flush() return old_format_error(test, err) capture_plugin.beforeTest = newCaptureBeforeTest capture_plugin.afterTest = newCaptureAfterTest capture_plugin.formatError = newCaptureFormatError
def add_options(cls, parser): cls._add_option(parser, '--teamcity', default=is_running_under_teamcity(), help="Force output of JetBrains TeamCity service messages") cls._add_option(parser, '--no-teamcity', default=False, help="Disable output of JetBrains TeamCity service messages (even under TeamCity build)")
def pytest_runtest_teardown(item: pytest.Item): '''Hook to run after every test.''' # Inject footer at end of test, may be followed by additional teardown. # Don't do this when running in teamcity, where it's redundant. if not teamcity.is_running_under_teamcity(): print(''' ========== ======= END: {}::{} =========='''.format(sdk_diag.get_test_suite_name(item), item.name))
def pytest_runtest_setup(item: pytest.Item): '''Hook to run before every test.''' # Inject header at start of test, following automatic "path/to/test_file.py::test_name": # Don't do this when running in teamcity, where it's redundant. if not teamcity.is_running_under_teamcity(): print(''' ========== ======= START: {}::{} =========='''.format(sdk_diag.get_test_suite_name(item), item.name)) if INTEGRATION_TEST_LOG_COLLECTION: sdk_diag.handle_test_setup(item) sdk_utils.check_dcos_min_version_mark(item)
def pytest_configure(config): if config.option.no_teamcity >= 1: enabled = False elif config.option.teamcity >= 1: enabled = True else: enabled = is_running_under_teamcity() if enabled: output_capture_enabled = getattr(config.option, 'capture', 'fd') != 'no' coverage_controller = _get_coverage_controller(config) config._teamcityReporting = EchoTeamCityMessages(output_capture_enabled, coverage_controller) config.pluginmanager.register(config._teamcityReporting)
def main(): parser = argparse.ArgumentParser(description="Run cppcheck") parser.add_argument("--ac", type=str, help="Additional checks (default all)", default="all") parser.add_argument("--idir", type=str, help="Path of file with all include directories", default="include_directories.txt") parser.add_argument("--ip", type=argparse.FileType("r"), help="Path of file with directories to analyze", default="include_paths.txt") parser.add_argument("--idef", type=argparse.FileType("r"), help="Path of file with included definitions", default="include_defines.txt") parser.add_argument("--xp", type=argparse.FileType("r"), help="Path of file with directories or files to exclude from analysis", default="exclude_paths.txt") parser.add_argument("--xdef", type=argparse.FileType("r"), help="Path of file with definitions to exclude", default="exclude_defines.txt") parser.add_argument("--s", type=str, help="Path of file with warnings to suppress", default="suppressions.txt") parser.add_argument("--ot", type=str, help="The output template", default=None) parser.add_argument("--ext", type=str, help="Direct cppcheck arguments", default=None) # get all data from command line args = parser.parse_args() # if the output format is None identify whether under TC or not and set message format accordingly # if format set to TC will also need to escape messages if args.ot is None: if teamcity.is_running_under_teamcity(): args.ot = "tc" else: args.ot = "vs" arguments = " --inline-suppr --error-exitcode=-1 --inconclusive --force" + \ " --enable=" + args.ac + \ ("" if args.ext is None else " " + args.ext) + \ create_exclude_defines_argument(args.xdef) + \ create_include_defines_argument(args.idef) + \ create_include_paths_argument(args.ip) + \ " --includes-file=" + args.idir + \ create_exclude_paths_argument(args.xp) + \ " --template=" + ('"##teamcity[buildProblem description=\'{file}:{line}: {severity} ({id}): {message}\']"' if args.ot == "tc" else args.ot) + \ " --suppressions-list=" + args.s # run the process and redirect both stdout and stderr for further processing if needed if args.ot == "tc": process = subprocess.Popen(get_cppcheck_path() + arguments, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: more = handle_output_line(process.stdout.readline().decode()) if not more: break return process.returncode else: return subprocess.call(get_cppcheck_path() + arguments)
def pytest_configure(config): if config.option.no_teamcity >= 1: enabled = False elif config.option.teamcity >= 1: enabled = True else: enabled = is_running_under_teamcity() if enabled: output_capture_enabled = getattr(config.option, 'capture', 'fd') != 'no' coverage_controller = _get_coverage_controller(config) skip_passed_output = bool(config.getini('skippassedoutput')) config._teamcityReporting = EchoTeamCityMessages( output_capture_enabled, coverage_controller, skip_passed_output, bool(config.getini('swapdiff')) ) config.pluginmanager.register(config._teamcityReporting)
def __init__(self): self.loggers = [] if teamcity.is_running_under_teamcity(): self.loggers.append(TeamcityServiceMessages()) else: self.loggers.append(PrintLogger())
def parse_options(cls, options): if not options.no_teamcity: if options.teamcity or is_running_under_teamcity(): options.format = 'teamcity-messages'
def configure(self, options, conf): self.enabled = is_running_under_teamcity() self.config = conf
def configure(self, options, conf): self.enabled = is_running_under_teamcity()
import pep8 from teamcity.messages import TeamcityServiceMessages from teamcity import __version__, is_running_under_teamcity name = 'teamcity_stats' version = __version__ enable_teamcity = is_running_under_teamcity() def add_options(parser): parser.add_option('--teamcity_stats', default=False, action='callback', callback=set_option_callback, help="Enable teamcity stats messages") def set_option_callback(option, opt, value, parser): global enable_teamcity enable_teamcity = True def parse_options(options): if not enable_teamcity: return options.reporter = TeamcityStatisticsReport options.report = TeamcityStatisticsReport(options) options.jobs = None # needs to be disabled, flake8 overrides the report if enabled
if sys.version_info[:2] >= (3, 3, ): from unittest import mock else: try: import mock except ImportError: raise ImportError("The BatchApps Python Client test suite requires " "the mock package to run on Python 3.2 and below.\n" "Please install this package to continue.") try: from teamcity import is_running_under_teamcity from teamcity.unittestpy import TeamcityTestRunner TC_BUILD = is_running_under_teamcity() except ImportError: TC_BUILD = False if __name__ == '__main__': if TC_BUILD: runner = TeamcityTestRunner() else: runner = TextTestRunner(verbosity=2) test_dir = os.path.dirname(__file__) top_dir = os.path.dirname(os.path.dirname(test_dir)) test_loader = TestLoader() suite = test_loader.discover(test_dir,
import os import platform import sys import time import unittest from datetime import datetime from subprocess import check_output, CalledProcessError, STDOUT from typing import Iterable, Optional, List from unittest import TestCase from teamcity import is_running_under_teamcity from teamcity.unittestpy import TeamcityTestRunner JAVA_HOME = os.getenv(sys.argv[1]) if len(sys.argv) > 1 else os.getenv("JAVA_HOME") AGENT_NAME = "memory_agent" IS_UNDER_TEAMCITY = is_running_under_teamcity() PROXY_COMPILED_PATH = os.path.join('test_data', 'proxy', 'build') if IS_UNDER_TEAMCITY else None if JAVA_HOME is None: print("Java not found. Please specify JAVA_HOME and try again.") exit(1) def get_java_executable() -> str: return os.path.join(JAVA_HOME, 'bin', 'java') def get_java_compiler() -> str: return os.path.join(JAVA_HOME, 'bin', 'javac')
def run(suite): if is_running_under_teamcity(): runner = TeamcityTestRunner() else: runner = unittest.TextTestRunner() runner.run(suite)
parser = argparse.ArgumentParser(description="Extracts information from Map files.") parser.add_argument("mapfile", help="Path to map file to parse.") parser.add_argument("--tc", help="Use TeamCity output.", action="store_true") parser.add_argument("--devname", help="Label for the chip executable") args = parser.parse_args() basename = os.path.basename(args.mapfile) if not args.devname: args.devname = basename.split(".")[0] logging.basicConfig(level=logging.DEBUG) with open(args.mapfile, 'r') as fobj: mapFile = MapFileHelper(fobj.read(), deviceName=args.devname) blockTable = (mapFile.placement.blockTable) objectTable = (mapFile.placement.objectTable) modTable = objectTable.pivot_table(values="size", index=['kindMod', 'module'], aggfunc=np.sum) if args.tc or is_running_under_teamcity(): # print(blockTable) # print(modTable) print(tc_buildStatistic(args.devname, "ro", "total", blockTable["P1"]["size"])) print(tc_buildStatistic(args.devname, "rw", "total", blockTable["P2"]["size"])) to_teamcity(modTable, args.devname) else: print(blockTable) print(modTable)
def run_suite(self, suite, **kwargs): if is_running_under_teamcity(): return TeamcityTestRunner().run(suite) else: return unittest.TextTestRunner(verbosity=self.verbosity, failfast=self.failfast).run(suite)
'django.request': { 'handlers': ['console'], 'level': 'ERROR', 'propagate': False, }, 'rest_models': { 'handlers': ['console'], 'level': 'ERROR', 'propagate': False, } } } if os.environ.get('QUIET', False): LOGGING['handlers']['console']['level'] = 70 try: # pragma: nocover import teamcity if teamcity.is_running_under_teamcity(): # pragma: nocover TEST_RUNNER = "test_runner.NoCheckTeamcityDjangoRunner" else: TEST_RUNNER = "test_runner.NoCheckDiscoverRunner" except ImportError: # pragma: nocover temacity = None if teamcity and teamcity.is_running_under_teamcity(): # pragma: nocover TEST_RUNNER = "test_runner.NoCheckTeamcityDjangoRunner" else: TEST_RUNNER = "test_runner.NoCheckDiscoverRunner"
def main(self): # remove trailing slash self._artifactory_server = self._artifactory_server.rstrip("/") if self._remove_empty_folder: rules = [ CleanupPolicy( "Cleaning up empty folders in local repositories", delete_empty_folder(), ) ] else: try: self._config = self._config.replace(".py", "") sys.path.append(".") rules = getattr(importlib.import_module(self._config), "RULES") except ImportError as error: print("Error: {}".format(error)) exit(1) self._destroy_or_verbose() artifactory_session = requests.Session() artifactory_session.auth = HTTPBasicAuth(self._user, self._password) # Validate that all rules is CleanupPolicy for cleanup_rule in rules: if not isinstance(cleanup_rule, CleanupPolicy): sys.exit( "Rule '{}' is not CleanupPolicy, check this please".format( cleanup_rule)) if self._policy_name: rules = [rule for rule in rules if self._policy_name in rule.name] if not rules: sys.exit("Rule with name '{}' does not found".format( self._policy_name)) table = PrettyTable() table.field_names = ["Cleanup Policy", "Files count", "Size"] table.align["Cleanup Policy"] = "l" total_size = 0 for cleanup_rule in rules: # type: CleanupPolicy with TC.block(cleanup_rule.name): cleanup_rule.init(artifactory_session, self._artifactory_server) # prepare with TC.block("AQL filter"): cleanup_rule.aql_filter() # Get artifacts with TC.block("Get artifacts"): print("*" * 80) print("AQL Query:") print(cleanup_rule.aql_text) print("*" * 80) artifacts = cleanup_rule.get_artifacts() print("Found {} artifacts".format(len(artifacts))) # Filter with TC.block("Filter results"): artifacts_to_remove = cleanup_rule.filter(artifacts) print("Found {} artifacts AFTER filtering".format( len(artifacts_to_remove))) # Delete or debug for artifact in artifacts_to_remove: # test name for teamcity repo_underscore = (artifact["repo"].replace(".", "_").replace( "/", "_")) path_underscore = (artifact["path"].replace(".", "_").replace( "/", "_")) name_underscore = (artifact["name"].replace(".", "_").replace( "/", "_")) test_name = "cleanup.{}.{}_{}".format( repo_underscore, path_underscore, name_underscore) # Use teamcity test for output all removed artifact. If local - use suppress output ctx_mgr = (TC.test(test_name) if is_running_under_teamcity() else contextlib.suppress()) with ctx_mgr: cleanup_rule.delete(artifact, destroy=self._destroy) # Info count_artifacts = len(artifacts_to_remove) print("Deleted artifacts count: {}".format(count_artifacts)) try: artifacts_size = sum([x["size"] for x in artifacts_to_remove]) total_size += artifacts_size artifacts_size = size(artifacts_size) print("Summary size: {}".format(artifacts_size)) table.add_row( [cleanup_rule.name, count_artifacts, artifacts_size]) except KeyError: print("Summary size not defined") print() table.add_row(["", "", ""]) table.add_row(["Total size: {}".format(size(total_size)), "", ""]) print(table)
3, ): from unittest import mock else: try: import mock except ImportError: raise ImportError("The BatchApps Python Client test suite requires " "the mock package to run on Python 3.2 and below.\n" "Please install this package to continue.") try: from teamcity import is_running_under_teamcity from teamcity.unittestpy import TeamcityTestRunner TC_BUILD = is_running_under_teamcity() except ImportError: TC_BUILD = False if __name__ == '__main__': if TC_BUILD: runner = TeamcityTestRunner() else: runner = TextTestRunner(verbosity=2) test_dir = os.path.dirname(__file__) top_dir = os.path.dirname(os.path.dirname(test_dir)) test_loader = TestLoader() suite = test_loader.discover(test_dir,
import sys import unittest try: from teamcity import is_running_under_teamcity from teamcity.unittestpy import TeamcityTestRunner runner = TeamcityTestRunner() if is_running_under_teamcity() else unittest.TextTestRunner() except ImportError: runner = unittest.TextTestRunner() from tests import ExampleTest tests = { "ExampleTest": ExampleTest } if __name__ == "__main__": from application import application application.start_testing() test_name = sys.argv[1] if len(sys.argv) > 1 else None tests = tuple( [ unittest.loader.findTestCases(tests[test_suit_name]) for test_suit_name in tests if test_suit_name == test_name or not test_name ] ) sys.exit(not runner.run(unittest.TestSuite(tests=tests)).wasSuccessful())
nightSuite = suites['nightly'] allSuite = suites['all'] smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases( small_test_cases)) nightSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases( night_test_cases)) allSuite.addTests( KratosUnittest.TestLoader().loadTestsFromTestCases(all_test_cases)) return suites if __name__ == '__main__': is_team_city = False try: from teamcity import is_running_under_teamcity from teamcity.unittestpy import TeamcityTestRunner is_team_city = is_running_under_teamcity() except ImportError: pass if is_team_city: import unittest runner = TeamcityTestRunner() runner.run(AssambleTestSuites(is_team_city)) else: KratosUnittest.runTests(AssambleTestSuites(is_team_city))
import pep8 import re from teamcity.messages import TeamcityServiceMessages from teamcity import __version__, is_running_under_teamcity name = 'teamcity' version = __version__ enable_teamcity = is_running_under_teamcity() def add_options(parser): parser.add_option('--teamcity', default=False, action='callback', callback=set_option_callback, help="Enable teamcity messages") def set_option_callback(option, opt, value, parser): global enable_teamcity enable_teamcity = True def parse_options(options): if not enable_teamcity: return options.reporter = TeamcityReport options.report = TeamcityReport(options) options.jobs = None # needs to be disabled, flake8 overrides the report if enabled
def run_suite(self, suite, **kwargs): if is_running_under_teamcity(): return TeamcityTestRunner().run(suite) else: return unittest.TextTestRunner( verbosity=self.verbosity, failfast=self.failfast).run(suite)
def main(args=None): """ Most options are the same as Modernize for pass-through """ parser = optparse.OptionParser(usage=usage, version="%s %s" % (SCRIPT_NAME, __version__)) parser.formatter.format_usage = format_usage parser.add_option("-v", "--verbose", action="store_true", help="Show more verbose logging.") parser.add_option("-f", "--fix", action="append", default=[], help="Each FIX specifies a transformation; '-f default' includes default fixers.") parser.add_option("-x", "--nofix", action="append", default=[], help="Prevent a fixer from being run.") parser.add_option("-p", "--print-function", action="store_true", help="Modify the grammar so that print() is a function.") parser.add_option("-w", "--write", action="store_true", help="Write back modified files.") parser.add_option("-n", "--nobackups", action="store_true", default=False, help="Don't write backups for modified files.") parser.add_option("--six-unicode", action="store_true", default=False, help="Wrap unicode literals in six.u().") parser.add_option("--future-unicode", action="store_true", default=False, help="Use 'from __future__ import unicode_literals'" "(only useful for Python 2.6+).") parser.add_option("--no-six", action="store_true", default=False, help="Exclude fixes that depend on the six package.") parser.add_option("--enforce", action="store_true", default=False, help="Returns non-zero exit code of any fixers had to be applied. " "Useful for enforcing Python 3 compatibility.") parser.add_option("-e", "--exclude", action="append", default=[], help="Exclude a file or directory") parser.add_option("--teamcity", action="store", type="choice", default=None, choices=['true', 'false'], help="Force TeamCity state [true/false]") (options, args) = parser.parse_args(args) if options.verbose: global VERBOSE VERBOSE = True global TC, USE_TEAMCITY elems_included = args[:] elems_excluded = [] args_passed = [] args_local = [] for option, value in options.__dict__.items(): option = option.replace('_', '-') # restore dashes changed by optparse target = args_passed if option in ['teamcity']: target = args_local if value == 'false': USE_TEAMCITY = False elif value == 'true': USE_TEAMCITY = True else: USE_TEAMCITY = is_running_under_teamcity() elif isinstance(value, list): if option in ['exclude']: elems_excluded.extend(options.exclude) else: new_opts = ["--{}={}".format(option, v) for v in value] target.extend(new_opts) elif isinstance(value, bool): if value is True: new_opts = ["--{}".format(option)] target.extend(new_opts) elif value is None: pass else: print("Argument '{}' not handled here: {}".format(option, value)) parser.print_help() return -1 logger = logging.getLogger('RefactoringTool') logger.setLevel(LOG_LEVEL) ch = logging.StreamHandler(LOG_CAPTURE_STRING) formatter = logging.Formatter('%(name)s: %(message)s') ch.setFormatter(formatter) ch.setLevel(LOG_LEVEL) logger.addHandler(ch) if not elems_included: elems_included = ['.'] print('{} {} (using libmodernize {})'.format(SCRIPT_NAME, __version__, __version_modernize__)) print() if USE_TEAMCITY: print('Note: Running with TeamCity hooks') TC = TeamcityServiceMessages() else: print('Note: NOT running with TeamCity hooks') print() if VERBOSE: print("Original options:", options) print("Original args: ", args) print("Local options: ", args_local) print("Passing options: ", args_passed) print("Included elems: ", elems_included) print("Excluded elems: ", elems_excluded) print() excluded_files = set() excluded_dirs = set() for exclusion in elems_excluded: exclusion = exclusion.rstrip('/') if os.path.isfile(exclusion): excluded_files.add(exclusion) elif os.path.isdir(exclusion): excluded_dirs.add(exclusion) else: print("UNKNOWN:", exclusion) for root in elems_included: walk_tree(args=args_passed, root=root, excluded_files=excluded_files, excluded_dirs=excluded_dirs) if VERBOSE: print('=' * 78) return 0
def run_command(args): pick_tests = [] if args.tests: pick_tests = [t.lower() for t in args.tests.split(",")] print "Picking tests {}".format(pick_tests) # Set up a mock tenant so we can bootstrap the app and inspect # the modules within. setup_tenant() conf = get_config() test_modules = [] for app in conf.drift_app['apps']: m = importlib.import_module(app) path = dirname(m.__file__) tests_path = os.path.join(path, "tests") if not os.path.exists(tests_path): print "No tests found for app '{}'".format(app) continue if not os.path.exists(os.path.join(tests_path, "__init__.py")): print "No tests found for app '{}' (missing __init__.py)".format(app) continue n = 0 for filename in os.listdir(tests_path): if filename.endswith(".py") and not filename.startswith("__"): test_module_name = app + ".tests." + filename[:-3] test_modules.append(test_module_name) n += 1 print "app '{}' has {} test modules".format(app, n) suites = {} for module_name in test_modules: # first import it to see if we get any errors m = importlib.import_module(module_name) suites[module_name] = unittest.defaultTestLoader.loadTestsFromName(module_name) tests_to_run = [] tests_to_skip = [] for module_name, suite in suites.iteritems(): for test_cases in suite: for t in test_cases: if pick_tests: for p in pick_tests: if p in str(t).lower(): tests_to_run.append(t) else: tests_to_skip.append(t) else: tests_to_run.append(t) print "Running {} test(s) from {} module(s)".format(len(tests_to_run), len(suites)) print "Skipping {} test(s)".format(len(tests_to_skip)) if pick_tests: print "Just running the following tests:" if not tests_to_run: print " No tests found!" for t in tests_to_run: print " {}".format(t) if args.preview: return test_suite = unittest.TestSuite(tests_to_run) verbosity = 1 if args.verbose: verbosity = 2 if not args.logging: logging.disable(logging.WARNING) cls = unittest.TextTestRunner if is_running_under_teamcity and TeamcityTestRunner: if is_running_under_teamcity(): cls = TeamcityTestRunner results = cls(verbosity=verbosity, failfast=args.failfast).run(test_suite) # if a tenant was not specified on the commandline we destroy it if not results.wasSuccessful(): sys.exit(1)
self.driver.find_element_by_xpath( "//a[@class='locales__item']").click() #presentation_english self.driver.find_element_by_xpath( "//a[@class ='page-header__button page-header__button--presentation button']" ).click() time.sleep(2) self.driver.find_element_by_xpath("//div[text()='BeEasy']") time.sleep(2) self.driver.switch_to.window(main_window) #whitepaper_english self.driver.find_element_by_xpath( "//a[@class ='page-header__button button mod']").click() time.sleep(4) self.driver.find_elements_by_xpath( "//section[@class='linkAnnotation']") self.driver.switch_to.window(main_window) def tearDown(self): self.driver.close() if __name__ == '__main__': if is_running_under_teamcity(): runner = TeamcityTestRunner() else: runner = unittest.TextTestRunner() unittest.main(testRunner=runner)
def main_func(): import argparse parser = argparse.ArgumentParser(description="Bandwidth Wars") parser.add_argument('--port',default=7171,type=int) parser.add_argument('--turn_max',default=5,help="The maximum time allocated per turn",type=int) parser.add_argument('--tokens',default=2,help="How many tokens should be generated for this game.",type=int) parser.add_argument('--open-play',dest='open_play',action='store_true',help="Whether the server allows just anyone to play.") parser.add_argument('--selftest',action='store_true',help="Run the tests.") parser.add_argument('--teamcity',action='store_true',help="Using teamcity. Must be used with --selftest. teamcity-messages must be installed.") parser.set_defaults(open_play=False) args = parser.parse_args() if args.selftest: import teamcity import teamcity.unittestpy import unittest if teamcity.is_running_under_teamcity() or args.teamcity: runner = teamcity.unittestpy.TeamcityTestRunner() else: runner = unittest.TextTestRunner(verbosity=2) import os.path directory = os.path.dirname(__file__) print "test directory",directory testsuite = unittest.TestLoader().discover(directory) runner.run(testsuite) return import models.game game = models.game.Game(max_interval=args.turn_max,tokens=args.tokens,open_play=args.open_play) from twisted.internet import protocol, reactor import twisted.protocols.basic class BW(twisted.protocols.basic.LineReceiver): def __init__(self): self.delimiter = '\n' def connectionMade(self): self.transport.write('{"msg":"Welcome to Bandwidth Wars","ver":0.1}\n') def lineReceived(self, data): if hasattr(self,"player"): player = self.player.gameToken else: player = "???" logging.debug("raw socket from %s> %s" % (player,data)) result = game.process_raw_command(data, self) self.send_raw(result) def send_raw(self,data): if hasattr(self,"player") and self.player != None: player = self.player.gameToken else: player = "???" logging.debug("raw socket to %s> %s" % (player,data)) self.transport.write(data+"\n") class BWFactory(protocol.Factory): def buildProtocol(self, addr): return BW() logging.info("Listening on port %d",args.port) reactor.listenTCP(args.port, BWFactory()) reactor.run()
f.write(inflated) # Turn the inflated xml (which is just a string) into a in memory XML document doc = fromstring(inflated) # Verification of enveloped signature node = doc.find(".//{%s}Signature" % xmlsec.DSigNs) key_file = join(dirname(__file__), '..', '..', '..', 'certs/example.com', 'example.pubkey') dsigCtx = xmlsec.DSigCtx() signKey = xmlsec.Key.load(key_file, xmlsec.KeyDataFormatPem, None) signKey.name = 'example.pubkey' # Note: the assignment below effectively copies the key dsigCtx.signKey = signKey # Add ID attributes different from xml:id # See the Notes on https://pypi.python.org/pypi/dm.xmlsec.binding/1.3.2 xmlsec.addIDs(doc, ["ID"]) # This raises an exception if the document does not verify dsigCtx.verify(node) if __name__ == '__main__': if is_running_under_teamcity(): runner = TeamcityTestRunner() else: runner = unittest.TextTestRunner() unittest.main(testRunner=runner)
def run_command(args): from drift.utils import uuid_string from drift.appmodule import app as _app from drift.tenant import create_db, drop_db from drift.utils import get_tier_name tier_name = get_tier_name() tenant = None if args.target: print "Using test target: {}".format(args.target) os.environ["drift_test_target"] = args.target else: # only provision the DB is the test target is not specified db_host = _app.config["systest_db"]["server"] if args.db: tenant = args.db print "Using database {} from commandline on host {}".format( tenant, db_host) create_db(tenant, db_host, tier_name) else: tenant = "test{}".format(uuid_string()) print "Creating database {} on host {}".format(tenant, db_host) create_db(tenant, db_host, tier_name) os.environ["drift_test_database"] = tenant pick_tests = [] if args.tests: pick_tests = [t.lower() for t in args.tests.split(",")] print "Picking tests {}".format(pick_tests) test_modules = [] for app in _app.config["apps"]: m = importlib.import_module(app) path = dirname(m.__file__) tests_path = os.path.join(path, "tests") if not os.path.exists(tests_path): print "No tests found for app '{}'".format(app) continue if not os.path.exists(os.path.join(tests_path, "__init__.py")): print "No tests found for app '{}' (missing __init__.py)".format( app) continue n = 0 for filename in os.listdir(tests_path): if filename.endswith(".py") and not filename.startswith("__"): test_module_name = app + ".tests." + filename[:-3] test_modules.append(test_module_name) n += 1 print "app '{}' has {} test modules".format(app, n) suites = {} for module_name in test_modules: # first import it to see if we get any errors m = importlib.import_module(module_name) suites[module_name] = unittest.defaultTestLoader.loadTestsFromName( module_name) tests_to_run = [] tests_to_skip = [] for module_name, suite in suites.iteritems(): for test_cases in suite: for t in test_cases: if pick_tests: for p in pick_tests: if p in str(t).lower(): tests_to_run.append(t) else: tests_to_skip.append(t) else: tests_to_run.append(t) print "Running {} test(s) from {} module(s)".format( len(tests_to_run), len(suites)) if tests_to_skip: print "Skipping {} test(s)".format(len(tests_to_skip)) if pick_tests: print "Just running the following tests:" if not tests_to_run: print " No tests found!" for t in tests_to_run: print " {}".format(t) test_suite = unittest.TestSuite(tests_to_run) verbosity = 1 if args.verbose: verbosity = 2 if not args.logging: logging.disable(logging.WARNING) cls = unittest.TextTestRunner if is_running_under_teamcity and TeamcityTestRunner: if is_running_under_teamcity(): cls = TeamcityTestRunner results = cls(verbosity=verbosity, failfast=args.failfast).run(test_suite) # if a tenant was not specified on the commandline we destroy it if not args.db and tenant: drop_db(tenant, db_host, tier_name) pass if not results.wasSuccessful(): sys.exit(1)