Esempio n. 1
0
 def __init__(self):
     self.__argumentNamespace = self._parseArguments(
         argparse.ArgumentParser(
             description='Run benchmarking scalability suites.'))
     self.logger = createLogger(self.__argumentNamespace.log_path)
     self.log = self.logger.info
     self.users_file_original_content = []
     # Proxy to with master test_result
     portal_url = self.__argumentNamespace.test_suite_master_url
     distributor = taskdistribution.TaskDistributor(portal_url,
                                                    logger=DummyLogger(
                                                        self.log))
     self.log(self.__argumentNamespace.test_suite_master_url)
     self.test_result = taskdistribution.TestResultProxy(
         distributor, 1.0, DummyLogger(self.log),
         self.__argumentNamespace.test_result_path,
         self.__argumentNamespace.node_title,
         self.__argumentNamespace.revision)
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser(
        description='Run a test suite.',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog=textwrap.dedent('''
      Tips:

        Running a full test suite on a development machine can be achieved with:

          %(prog)s --node_quantity=3 --test_suite=ERP5 --xvfb_bin=/path/to/Xvfb --firefox_bin=/path/to/firefox
      '''))

    # Parameters included in wrappers generated by SlapOS ERP5 software release.
    # To handle backward compatibity, we prefer that the generated wrapper pass
    # these parameters as environment variables. This way, if SlapOS SR is more
    # recent, the parameter will be ignored by ERP5.
    slapos_wrapper_group = parser.add_argument_group(
        'SlapOS wrapper arguments',
        description='Arguments passed automatically by SlapOS generated wrapper'
    )
    slapos_wrapper_group.add_argument(
        '--db_list', help='A list of comma separated sql connection strings')
    slapos_wrapper_group.add_argument('--conversion_server_url', default=None)
    slapos_wrapper_group.add_argument('--conversion_server_retry_count',
                                      default=None)
    slapos_wrapper_group.add_argument('--conversion_server_hostname',
                                      default=None)
    slapos_wrapper_group.add_argument('--conversion_server_port', default=None)
    slapos_wrapper_group.add_argument('--volatile_memcached_server_hostname',
                                      default=None)
    slapos_wrapper_group.add_argument('--volatile_memcached_server_port',
                                      default=None)
    slapos_wrapper_group.add_argument('--persistent_memcached_server_hostname',
                                      default=None)
    slapos_wrapper_group.add_argument('--persistent_memcached_server_port',
                                      default=None)
    slapos_wrapper_group.add_argument('--bt5_path', default=None)
    slapos_wrapper_group.add_argument(
        '--zserver_address_list',
        help='A list of comma seperated host:port for ZServer.\n'
        'Also taken from zserver_address_list environment variable.',
        default=os.getenv('zserver_address_list', ''))
    slapos_wrapper_group.add_argument(
        '--zserver_frontend_url_list',
        help=
        'A list of comma seperated frontend URLs, one for each of zserver_address_list,'
        'in the same order.\nAlso taken from zserver_frontend_url_list environment variable',
        default=os.getenv('zserver_frontend_url_list', ''))

    # Parameters passed by test node
    testnode_group = parser.add_argument_group(
        'test node arguments', description='Arguments passed by testnode')
    testnode_group.add_argument('--test_suite', help='The test suite name')
    testnode_group.add_argument('--test_suite_title',
                                help='The test suite title',
                                default=None)
    testnode_group.add_argument('--test_node_title',
                                help='The test node title',
                                default=None)
    testnode_group.add_argument('--project_title',
                                help='The project title',
                                default=None)
    testnode_group.add_argument('--revision',
                                help='The revision to test',
                                default='dummy_revision')
    testnode_group.add_argument('--node_quantity',
                                help='Number of parallel tests to run',
                                default=1,
                                type=int)
    testnode_group.add_argument(
        '--master_url',
        help='The Url of Master controling many suites',
        default=None)
    testnode_group.add_argument("--xvfb_bin", default=None)
    testnode_group.add_argument("--firefox_bin", default=None)
    testnode_group.add_argument("--log_directory", default=None)

    args = parser.parse_args()
    if args.bt5_path is not None:
        sys.path[0:0] = args.bt5_path.split(",")
    master = taskdistribution.TaskDistributor(args.master_url)
    test_suite_title = args.test_suite_title or args.test_suite
    revision = args.revision

    args.zserver_address_list = (args.zserver_address_list.split(',')
                                 if args.zserver_address_list else ())
    args.zserver_frontend_url_list = (args.zserver_frontend_url_list.split(',')
                                      if args.zserver_frontend_url_list else
                                      ())

    if args.zserver_address_list and len(
            args.zserver_address_list) < args.node_quantity:
        print >> sys.stderr, 'Not enough zserver address/frontends for node quantity %s (%r)' % (
            args.node_quantity, args.zserver_address_list)
        sys.exit(1)

    # sanity check
    assert len(args.zserver_address_list) == len(
        args.zserver_frontend_url_list)

    suite = makeSuite(test_suite=args.test_suite,
                      node_quantity=args.node_quantity,
                      revision=revision,
                      db_list=args.db_list,
                      zserver_address_list=args.zserver_address_list,
                      zserver_frontend_url_list=args.zserver_frontend_url_list,
                      bt5_path=args.bt5_path,
                      firefox_bin=args.firefox_bin,
                      xvfb_bin=args.xvfb_bin,
                      log_directory=args.log_directory)
    test_result = master.createTestResult(revision, suite.getTestList(),
                                          args.test_node_title,
                                          suite.allow_restart,
                                          test_suite_title, args.project_title)
    if test_result is not None:
        assert revision == test_result.revision, (revision,
                                                  test_result.revision)
        while suite.acquire():
            test = test_result.start(suite.running.keys())
            if test is not None:
                suite.start(test.name,
                            lambda status_dict, __test=test: __test.stop(
                                **status_dict))
            elif not suite.running:
                break
Esempio n. 3
0
def main():
    logger = logging.getLogger()
    logger.addHandler(logging.StreamHandler(sys.stdout))
    logger.setLevel(logging.DEBUG)
    parser = argparse.ArgumentParser(description='Run a test suite.')
    parser.add_argument('--test_suite', help='The test suite name')
    parser.add_argument('--test_suite_title', help='The test suite title')
    parser.add_argument('--test_node_title', help='The test node title')
    parser.add_argument('--project_title', help='The project title')
    parser.add_argument('--revision',
                        help='The revision to test',
                        default='dummy_revision')
    parser.add_argument('--node_quantity',
                        type=int,
                        help='Number of CPUs to use for the VM')
    parser.add_argument('--master_url',
                        help='The Url of Master controlling test suites')
    # SlapOS and deploy test specific
    parser.add_argument('--partition_path',
                        help="Path of a partition",
                        default=os.path.abspath(os.getcwd()))
    parser.add_argument('--test_reference',
                        help="Reference of the test",
                        default="missing")
    parser.add_argument('--partition_ipv4', help="IPv4 of a partition")
    parser.add_argument('--test_location', help="Location of the tests")
    parser.add_argument(
        '--python_interpreter',
        help="Path to python interpreter used to run the test suite")

    args = parser.parse_args()

    revision = args.revision
    test_suite_title = args.test_suite_title or args.test_suite
    suite = testsuite.EggTestSuite(
        1,
        test_suite=args.test_suite,
        node_quantity=args.node_quantity,
        python_interpreter=args.python_interpreter,
        egg_test_path_dict={
            os.path.basename(os.path.normpath(path)): path
            for path in args.test_location.split(',')
        },
        revision=revision)
    access_url_http = None
    access_url_https = None
    if args.partition_ipv4:
        access_url_http = 'http://%s:10080' % (args.partition_ipv4, )
        access_url_https = 'https://%s:10443' % (args.partition_ipv4, )
        os.environ['TEST_ACCESS_URL_HTTP'] = access_url_http
        os.environ['TEST_ACCESS_URL_HTTPS'] = access_url_https
    distributor = taskdistribution.TaskDistributor(args.master_url,
                                                   logger=logger)
    test_result = distributor.createTestResult(revision, suite.getTestList(),
                                               args.test_node_title,
                                               suite.allow_restart,
                                               test_suite_title,
                                               args.project_title)
    if test_result is None:
        return

    # Create the site
    status_dict = waitForSite(args.partition_path)

    status_file = tempfile.NamedTemporaryFile()
    status_file.write(json.dumps(status_dict))
    status_file.flush()
    os.fsync(status_file.fileno())
    os.environ['TEST_SITE_STATUS_JSON'] = status_file.name

    assert revision == test_result.revision, (revision, test_result.revision)
    while suite.acquire():
        test = test_result.start(suite.running.keys())
        if test is not None:
            suite.start(
                test.name,
                lambda status_dict, __test=test: __test.stop(**status_dict))
        elif not suite.running:
            break
    return
Esempio n. 4
0
    def run(self):
        log = self.log
        config = self.config
        slapgrid = None
        previous_revision_dict = {}
        revision_dict = {}
        test_result = None
        test_node_slapos = SlapOSInstance()
        test_node_slapos.edit(
            working_directory=self.config['slapos_directory'])
        try:
            while True:
                try:
                    node_test_suite = None
                    self.log = self.process_manager.log = self.testnode_log
                    self.cleanUp(None)
                    remote_test_result_needs_cleanup = False
                    begin = time.time()
                    portal_url = config['test_suite_master_url']
                    portal = taskdistribution.TaskDistributionTool(
                        portal_url, logger=DummyLogger(log))
                    self.portal = portal
                    self.test_suite_portal = taskdistribution.TaskDistributor(
                        portal_url, logger=DummyLogger(log))
                    self.test_suite_portal.subscribeNode(
                        node_title=config['test_node_title'],
                        computer_guid=config['computer_id'])
                    test_suite_data = self.test_suite_portal.startTestSuite(
                        node_title=config['test_node_title'],
                        computer_guid=config['computer_id'])
                    if type(test_suite_data) == str:
                        # Backward compatiblity
                        test_suite_data = json.loads(test_suite_data)
                    test_suite_data = Utils.deunicodeData(test_suite_data)
                    log("Got following test suite data from master : %r" % \
                        (test_suite_data,))
                    try:
                        my_test_type = self.test_suite_portal.getTestType()
                    except:
                        log("testnode, error during requesting getTestType() method \
from the distributor.")
                        raise
                    # Select runner according to the test type
                    if my_test_type == 'UnitTest':
                        runner = UnitTestRunner(self)
                    elif my_test_type == 'ScalabilityTest':
                        runner = ScalabilityTestRunner(self)
                    else:
                        log("testnode, Runner type %s not implemented.",
                            my_test_type)
                        raise NotImplementedError
                    log("Type of current test is %s" % (my_test_type, ))
                    # master testnode gets test_suites, slaves get nothing
                    runner.prepareSlapOSForTestNode(test_node_slapos)
                    # Clean-up test suites
                    self.checkOldTestSuite(test_suite_data)
                    for test_suite in test_suite_data:
                        remote_test_result_needs_cleanup = False
                        node_test_suite = self.getNodeTestSuite(
                            test_suite["test_suite_reference"])

                        node_test_suite.edit(
                            working_directory=self.config['working_directory'],
                            log_directory=self.config['log_directory'])

                        node_test_suite.edit(**test_suite)
                        if my_test_type == 'UnitTest':
                            runner = UnitTestRunner(node_test_suite)
                        elif my_test_type == 'ScalabilityTest':
                            runner = ScalabilityTestRunner(node_test_suite)
                        else:
                            log("testnode, Runner type %s not implemented.",
                                my_test_type)
                            raise NotImplementedError

                        # XXX: temporary hack to prevent empty test_suite
                        if not hasattr(node_test_suite, 'test_suite'):
                            node_test_suite.edit(test_suite='')
                        run_software = True
                        # kill processes from previous loop if any
                        self.process_manager.killPreviousRun()
                        self.getAndUpdateFullRevisionList(node_test_suite)
                        # Write our own software.cfg to use the local repository
                        self.constructProfile(node_test_suite, my_test_type,
                                              runner.getRelativePathUsage())
                        # Make sure we have local repository
                        test_result = portal.createTestResult(
                            node_test_suite.revision, [],
                            config['test_node_title'], False,
                            node_test_suite.test_suite_title,
                            node_test_suite.project_title)
                        remote_test_result_needs_cleanup = True
                        log("testnode, test_result : %r" % (test_result, ))
                        if test_result is not None:
                            self.registerSuiteLog(test_result, node_test_suite)
                            self.checkRevision(test_result, node_test_suite)
                            node_test_suite.edit(test_result=test_result)
                            # Now prepare the installation of SlapOS and create instance
                            status_dict = runner.prepareSlapOSForTestSuite(
                                node_test_suite)
                            # Give some time so computer partitions may start
                            # as partitions can be of any kind we have and likely will never have
                            # a reliable way to check if they are up or not ...
                            time.sleep(20)
                            if my_test_type == 'UnitTest':
                                runner.runTestSuite(node_test_suite,
                                                    portal_url)
                            elif my_test_type == 'ScalabilityTest':
                                error_message = None
                                # A problem is appeared during runTestSuite
                                if status_dict['status_code'] == 1:
                                    error_message = "Software installation too long or error(s) are present during SR install."
                                else:
                                    status_dict = runner.runTestSuite(
                                        node_test_suite, portal_url)
                                    # A problem is appeared during runTestSuite
                                    if status_dict['status_code'] == 1:
                                        error_message = status_dict[
                                            'error_message']

                                # If an error is appeared
                                if error_message:
                                    test_result.reportFailure(
                                        stdout=error_message)
                                    self.log(error_message)
                                    raise ValueError(error_message)
                            else:
                                raise NotImplementedError

                            # break the loop to get latest priorities from master
                            break
                        self.cleanUp(test_result)
                except (SubprocessError, CalledProcessError) as e:
                    log("SubprocessError", exc_info=sys.exc_info())
                    if remote_test_result_needs_cleanup:
                        status_dict = e.status_dict or {}
                        test_result.reportFailure(
                            command=status_dict.get('command'),
                            stdout=status_dict.get('stdout'),
                            stderr=status_dict.get('stderr'),
                        )
                    continue
                except ValueError as e:
                    # This could at least happens if runTestSuite is not found
                    log("ValueError", exc_info=sys.exc_info())
                    if node_test_suite is not None:
                        node_test_suite.retry_software_count += 1
                except CancellationError, e:
                    log("CancellationError", exc_info=sys.exc_info())
                    self.process_manager.under_cancellation = False
                    node_test_suite.retry = True
                    continue
                except:
                    ex_type, ex, tb = sys.exc_info()
                    traceback.print_tb(tb)
                    log("erp5testnode exception", exc_info=sys.exc_info())
                    raise
                now = time.time()
                self.cleanUp(test_result)
                if (now - begin) < 120:
                    sleep_time = 120 - (now - begin)
                    log("End of processing, going to sleep %s" % sleep_time)
                    time.sleep(sleep_time)
Esempio n. 5
0
def runTestSuite():
    parser = argparse.ArgumentParser(description='Run a test suite.')
    parser.add_argument('--test_suite', help='The test suite name')
    parser.add_argument('--test_suite_title',
                        help='The test suite title',
                        default=None)
    parser.add_argument('--test_node_title',
                        help='The test node title',
                        default=None)
    parser.add_argument('--project_title',
                        help='The project title',
                        default=None)
    parser.add_argument('--revision',
                        help='The revision to test',
                        default='dummy_revision')
    parser.add_argument('--node_quantity',
                        help='Number of parallel tests to run',
                        default=1,
                        type=int)
    parser.add_argument('--master_url',
                        help='The Url of Master controling many suites',
                        default=None)
    parser.add_argument('--frontend_url',
                        help='The url of the frontend of this test node',
                        default=None)
    parser.add_argument(
        '--python_interpreter',
        help='Path to python interpreter used to run the test suite',
        default='python')
    parser.add_argument('--source_code_path_list',
                        help='Coma separated list of Eggs folders to test',
                        default='.')

    args = parser.parse_args()
    master = taskdistribution.TaskDistributor(args.master_url)
    test_suite_title = args.test_suite_title or args.test_suite
    revision = args.revision

    # Guess test name from path, we support mainly two cases:
    #  /path/to/erp5.util/                  -> we want erp5.util
    #  /path/to/slapos/software/erp5/test/  -> we want erp5
    egg_test_path_dict = {}
    for test_path in args.source_code_path_list.split(','):
        path, test_name = os.path.split(test_path)
        while test_name in ('', 'test'):
            path, test_name = os.path.split(path)
            assert path != os.path.sep
        egg_test_path_dict[test_name] = test_path

    suite = EggTestSuite(
        1,
        test_suite=args.test_suite,
        node_quantity=args.node_quantity,
        revision=revision,
        python_interpreter=args.python_interpreter,
        egg_test_path_dict=egg_test_path_dict,
    )

    test_result = master.createTestResult(revision, suite.getTestList(),
                                          args.test_node_title,
                                          suite.allow_restart,
                                          test_suite_title, args.project_title)
    if test_result is not None:
        assert revision == test_result.revision, (revision,
                                                  test_result.revision)
        while suite.acquire():
            test = test_result.start(suite.running.keys())
            if test is not None:
                suite.start(test.name,
                            lambda status_dict, __test=test: __test.stop(
                                **status_dict))
            elif not suite.running:
                break
Esempio n. 6
0
def main():
    parser = argparse.ArgumentParser(description='Run a test suite.')
    parser.add_argument('--test_suite', help='The test suite name')
    parser.add_argument('--test_suite_title',
                        help='The test suite title',
                        default=None)
    parser.add_argument('--test_node_title',
                        help='The test node title',
                        default=None)
    parser.add_argument('--project_title',
                        help='The project title',
                        default=None)
    parser.add_argument('--revision',
                        help='The revision to test',
                        default='dummy_revision')
    parser.add_argument('--node_quantity',
                        help='Number of parallel tests to run',
                        default=1,
                        type=int)
    parser.add_argument('--master_url',
                        help='The Url of Master controling many suites',
                        default=None)
    parser.add_argument('--db_list', help='A list of sql connection strings')
    # parameters that needs to be passed to runUnitTest
    parser.add_argument('--conversion_server_url', default=None)
    parser.add_argument('--conversion_server_hostname', default=None)
    parser.add_argument('--conversion_server_port', default=None)
    parser.add_argument('--volatile_memcached_server_hostname', default=None)
    parser.add_argument('--volatile_memcached_server_port', default=None)
    parser.add_argument('--persistent_memcached_server_hostname', default=None)
    parser.add_argument('--persistent_memcached_server_port', default=None)
    parser.add_argument('--bt5_path', default=None)
    parser.add_argument("--xvfb_bin", default=None)
    parser.add_argument("--firefox_bin", default=None)

    args = parser.parse_args()
    if args.bt5_path is not None:
        sys.path[0:0] = args.bt5_path.split(",")
    master = taskdistribution.TaskDistributor(args.master_url)
    test_suite_title = args.test_suite_title or args.test_suite
    revision = args.revision
    suite = makeSuite(test_suite=args.test_suite,
                      node_quantity=args.node_quantity,
                      revision=revision,
                      db_list=args.db_list,
                      bt5_path=args.bt5_path,
                      firefox_bin=args.firefox_bin,
                      xvfb_bin=args.xvfb_bin)
    test_result = master.createTestResult(revision, suite.getTestList(),
                                          args.test_node_title,
                                          suite.allow_restart,
                                          test_suite_title, args.project_title)
    if test_result is not None:
        assert revision == test_result.revision, (revision,
                                                  test_result.revision)
        while suite.acquire():
            test = test_result.start(suite.running.keys())
            if test is not None:
                suite.start(test.name,
                            lambda status_dict, __test=test: __test.stop(
                                **status_dict))
            elif not suite.running:
                break