def main(): multi_node = False args = _parse_args() config = commons.read_user_config() run_dir = config['qserv']['qserv_run_dir'] config_file = os.path.join(run_dir, "qserv-meta.conf") parser = ConfigParser.SafeConfigParser() parser.read(config_file) if parser.get('qserv', 'node_type') in ['master']: _LOG.info("Running Integration test in multi-node setup") multi_node = True ret_code = 1 if args.do_custom: customizer = dataCustomizer.DataCustomizer( args.case_id, args.testdata_dir, args.work_dir, args.do_download, args.custom_case_id, args.username) customizer.run() else: ret_code = _run_integration_test(args.case_id, args.testdata_dir, args.out_dir, args.mode_list, multi_node, args.load_data, args.stop_at_query) sys.exit(ret_code)
def main(): multi_node = False args = _parse_args() config = commons.read_user_config() run_dir = config['qserv']['qserv_run_dir'] config_file = os.path.join(run_dir, "qserv-meta.conf") parser = ConfigParser.SafeConfigParser() parser.read(config_file) if parser.get('qserv', 'node_type') in ['master']: _LOG.info("Running Integration test in multi-node setup") multi_node = True ret_code = 1 if args.do_custom: customizer = dataCustomizer.DataCustomizer(args.case_id, args.testdata_dir, args.work_dir, args.do_download, args.custom_case_id, args.username) customizer.run() else: ret_code = _run_integration_test(args.case_id, args.testdata_dir, args.out_dir, args.mode_list, multi_node, args.load_data, args.stop_at_query) sys.exit(ret_code)
def init(args, logfile): config = commons.read_user_config() logger.init_default_logger( logfile, args.verbose_level, log_path=config['qserv']['log_dir'] ) log = logging.getLogger() if args.testdata_dir is not None: log.debug("Overriding ~/.lsst/qserv.conf testdata_dir value with {0}".format(args.testdata_dir)) config['qserv']['testdata_dir'] = args.testdata_dir
def setUpClass(cls): super(TestDataCustomizer, cls).setUpClass() TestDataCustomizer._config = commons.read_user_config() TestDataCustomizer._logger = logging.getLogger(__name__) TestDataCustomizer._url = ("lsst-dev.ncsa.illinois.edu:" "/lsst/home/fjammes/public/qserv_testdata/unit_test_file.jpg") TestDataCustomizer._dest_file = os.path.join("/", "tmp", os.path.basename(TestDataCustomizer._url)) try: os.remove(TestDataCustomizer._dest_file) except: pass
def __init__(self, testdata_dir, out_dirname_prefix): self.config = commons.read_user_config() self._multi_node = True if not out_dirname_prefix: out_dirname_prefix = self.config['qserv']['tmp_dir'] self._out_dirname = out_dirname_prefix self._in_dirname = testdata_dir self.dataReader = dataConfig.DataConfig(self._in_dirname)
def setUpClass(cls): super(TestDataCustomizer, cls).setUpClass() TestDataCustomizer._config = commons.read_user_config() TestDataCustomizer._logger = logging.getLogger(__name__) TestDataCustomizer._url = ( "lsst-dev.ncsa.illinois.edu:" "/lsst/home/fjammes/public/qserv_testdata/unit_test_file.jpg") TestDataCustomizer._dest_file = os.path.join( "/", "tmp", os.path.basename(TestDataCustomizer._url)) try: os.remove(TestDataCustomizer._dest_file) except: pass
def __init__(self, case_id, multi_node, testdata_dir, out_dirname_prefix=None): self.config = commons.read_user_config() self._case_id = case_id self._multi_node = multi_node if not out_dirname_prefix: out_dirname_prefix = self.config['qserv']['tmp_dir'] self._out_dirname = os.path.join(out_dirname_prefix, "qservTest_case%s" % case_id) dataset_dir = Benchmark.getDatasetDir(testdata_dir, case_id) self._in_dirname = os.path.join(dataset_dir, 'data') self.dataReader = dataConfig.DataConfig(self._in_dirname) self._queries_dirname = os.path.join(dataset_dir, "queries") self.dataDuplicator = dataDuplicator.DataDuplicator(self.dataReader, self._in_dirname, self._out_dirname)
def is_multi_node(): """ Check is Qserv install is multi node it assumes integration tests are launched on master for mono-node instance Returns ------- true if Qserv install is multi-node """ multi_node = True # FIXME code below is specific to mono-node setup # and might be removed config = commons.read_user_config() run_dir = config['qserv']['qserv_run_dir'] config_file = os.path.join(run_dir, "qserv-meta.conf") if os.path.isfile(config_file): parser = configparser.SafeConfigParser() parser.read(config_file) if parser.get('qserv', 'node_type') in ['mono']: _LOG.info("Running Integration test in mono-node setup") multi_node = False return multi_node
from lsst.qserv.admin import logger from lsst.qserv.tests import benchmark from lsst.qserv.tests.unittest.testIntegration import suite def parseArgs(): parser = argparse.ArgumentParser( description='''Qserv integration tests suite. Relies on python unit testing framework, provide test meta-data which can be used for example in a continuous integration framework or by a cluster management tool. Configuration values are read from ~/.lsst/qserv.conf.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser = logger.add_logfile_opt(parser) args = parser.parse_args() return args if __name__ == '__main__': args = parseArgs() logger.setup_logging(args.log_conf) commons.read_user_config() result = unittest.TextTestRunner(verbosity=2).run(suite()) retcode = int(not result.wasSuccessful()) sys.exit(retcode)
#!/usr/bin/env python from __future__ import absolute_import, division, print_function # ---------------------------- # Imports for other modules -- # ---------------------------- from lsst.qserv.admin import commons import benchmark if __name__ == '__main__': testdata_dir = '/datapool/tmp/loader_test/test' out_dir = '/datapool/tmp/loader_tmp/test' config = commons.read_user_config() bench = benchmark.Benchmark(testdata_dir, out_dir) bench.run()
def _parse_args(): # used to get default values config = commons.read_user_config() parser = argparse.ArgumentParser( description="Launch one Qserv integration test with fine-grained " + "parameters, usefull for developers in order to debug/test " + "manually a specific part of Qserv. Configuration values " + "are read from ~/.lsst/qserv.conf.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = logger.add_logfile_opt(parser) group = parser.add_argument_group( 'General options', 'Options related to data loading and querying') group.add_argument("-i", "--case-id", dest="case_id", default="01", help="Test case number") mode_option_values = ['mysql', 'qserv', 'all'] group.add_argument( "-m", "--mode", dest="mode", choices=mode_option_values, default='all', help="Qserv test modes (direct mysql connection, or via qserv)") group = parser.add_argument_group('Load options', 'Options related to data loading') group.add_argument("-l", "--load", action="store_true", dest="load_data", default=False, help="Load test dataset prior to query execution") default_testdata_dir = None if os.environ.get('QSERV_TESTDATA_DIR') is not None: default_testdata_dir = os.path.join( os.environ.get('QSERV_TESTDATA_DIR'), "datasets") group.add_argument("-t", "--testdata-dir", dest="testdata_dir", default=default_testdata_dir, help="Absolute path to directory containing test " + "datasets. This value is set, by precedence, by this" + " option, and then by QSERV_TESTDATA_DIR/datasets/ " + "if QSERV_TESTDATA_DIR environment variable is not " + "empty") group = parser.add_argument_group('Query options', 'Options related to query execution') group.add_argument( "-o", "--out-dir", dest="out_dir", default=config['qserv']['tmp_dir'], help=("Absolute path to directory for storing query results." "The results will be stored in " "<OUT_DIR>/qservTest_case<CASE_ID>/")) group.add_argument("-s", "--stop-at-query", type=int, dest="stop_at_query", default=benchmark.MAX_QUERY, help="Stop at query with given number") group = parser.add_argument_group( 'Input dataset customization options', ('Options related to input data set customization')) group.add_argument( "-T", "--work-dir", dest="work_dir", default=config['qserv']['tmp_dir'], help="Absolute path to parent directory where source test " + "datasets will be copied, and big datasets will be " + "eventually downloaded") group.add_argument("-C", "--custom", action="store_true", dest="do_custom", default=False, help="If <WORK_DIR>/case<CASE_ID> doesn't exists" ", copy it from <TESTDATA_DIR>" ", disable load and query operations" ", and had to be performed before them") group.add_argument("-D", "--download", action="store_true", dest="do_download", default=False, help=("Download big datasets using rsync over ssh" ", implies --custom, enable batch mode with " "~/.ssh/config and ssh-agent")) group.add_argument("-I", "--custom-case-id", dest="custom_case_id", default=None, help="Rename custom test to case/CUSTOM_CASE_ID") group.add_argument("-U", "--username", dest="username", default=None, help="rsync username") args = parser.parse_args() # Configure logger logger.setup_logging(args.log_conf) # configure log4cxx logging based on the logging level of Python logger levels = { logging.ERROR: lsst.log.ERROR, logging.WARNING: lsst.log.WARN, logging.INFO: lsst.log.INFO, logging.DEBUG: lsst.log.DEBUG } lsst.log.setLevel('', levels.get(_LOG.level, lsst.log.DEBUG)) if args.do_download: args.do_custom = True if args.mode == 'all': args.mode_list = ['mysql', 'qserv'] else: args.mode_list = [args.mode] return args
def _parse_args(): # used to get default values config = commons.read_user_config() parser = argparse.ArgumentParser( description="Launch one Qserv integration test with fine-grained " + "parameters, usefull for developers in order to debug/test " + "manually a specific part of Qserv. Configuration values " + "are read from ~/.lsst/qserv.conf.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser = logger.add_logfile_opt(parser) group = parser.add_argument_group('General options', 'Options related to data loading and querying') group.add_argument("-i", "--case-id", dest="case_id", default="01", help="Test case number") mode_option_values = ['mysql', 'qserv', 'all'] group.add_argument("-m", "--mode", dest="mode", choices=mode_option_values, default='all', help="Qserv test modes (direct mysql connection, or via qserv)") group = parser.add_argument_group('Load options', 'Options related to data loading') group.add_argument("-l", "--load", action="store_true", dest="load_data", default=False, help="Load test dataset prior to query execution") default_testdata_dir = None if os.environ.get('QSERV_TESTDATA_DIR') is not None: default_testdata_dir = os.path.join( os.environ.get('QSERV_TESTDATA_DIR'), "datasets" ) group.add_argument("-t", "--testdata-dir", dest="testdata_dir", default=default_testdata_dir, help="Absolute path to directory containing test " + "datasets. This value is set, by precedence, by this" + " option, and then by QSERV_TESTDATA_DIR/datasets/ " + "if QSERV_TESTDATA_DIR environment variable is not " + "empty" ) group = parser.add_argument_group('Query options', 'Options related to query execution') group.add_argument("-o", "--out-dir", dest="out_dir", default=config['qserv']['tmp_dir'], help=("Absolute path to directory for storing query results." "The results will be stored in " "<OUT_DIR>/qservTest_case<CASE_ID>/")) group.add_argument("-s", "--stop-at-query", type=int, dest="stop_at_query", default=benchmark.MAX_QUERY, help="Stop at query with given number") group = parser.add_argument_group('Input dataset customization options', ('Options related to input data set customization' )) group.add_argument("-T", "--work-dir", dest="work_dir", default=config['qserv']['tmp_dir'], help="Absolute path to parent directory where source test " + "datasets will be copied, and big datasets will be " + "eventually downloaded" ) group.add_argument("-C", "--custom", action="store_true", dest="do_custom", default=False, help="If <WORK_DIR>/case<CASE_ID> doesn't exists" ", copy it from <TESTDATA_DIR>" ", disable load and query operations" ", and had to be performed before them") group.add_argument("-D", "--download", action="store_true", dest="do_download", default=False, help=("Download big datasets using rsync over ssh" ", implies --custom, enable batch mode with " "~/.ssh/config and ssh-agent")) group.add_argument("-I", "--custom-case-id", dest="custom_case_id", default=None, help="Rename custom test to case/CUSTOM_CASE_ID") group.add_argument("-U", "--username", dest="username", default=None, help="rsync username") args = parser.parse_args() # Configure logger logger.setup_logging(args.log_conf) # configure log4cxx logging based on the logging level of Python logger levels = {logging.ERROR: lsst.log.ERROR, logging.WARNING: lsst.log.WARN, logging.INFO: lsst.log.INFO, logging.DEBUG: lsst.log.DEBUG} lsst.log.setLevel('', levels.get(_LOG.level, lsst.log.DEBUG)) if args.do_download: args.do_custom = True if args.mode == 'all': args.mode_list = ['mysql', 'qserv'] else: args.mode_list = [args.mode] return args
def setUp(self): self.config = commons.read_user_config() self.logger = commons.init_default_logger( "TestQservDataLoader", log_path=self.config['qserv']['log_dir'] )