示例#1
0
def main():
  parser = argparse.ArgumentParser(description='Run a test suite.')
  parser.add_argument('--test_suite', help='The test suite name')
  parser.add_argument('--test_suite_title', help='The test suite title',
                      default=None)
  parser.add_argument('--test_node_title', help='The test node title',
                      default=None)
  parser.add_argument('--project_title', help='The project title',
                      default=None)
  parser.add_argument('--revision', help='The revision to test',
                      default='dummy_revision')
  parser.add_argument('--node_quantity', help='Number of parallel tests to run',
                      default=1, type=int)
  parser.add_argument('--master_url',
                      help='The Url of Master controling many suites',
                      default=None)
  parser.add_argument('--db_list', help='A list of sql connection strings')
  # parameters that needs to be passed to runUnitTest
  parser.add_argument('--conversion_server_url', default=None)
  parser.add_argument('--conversion_server_hostname', default=None)
  parser.add_argument('--conversion_server_port', default=None)
  parser.add_argument('--volatile_memcached_server_hostname', default=None)
  parser.add_argument('--volatile_memcached_server_port', default=None)
  parser.add_argument('--persistent_memcached_server_hostname', default=None)
  parser.add_argument('--persistent_memcached_server_port', default=None)
  parser.add_argument('--bt5_path', default=None)
  parser.add_argument("--xvfb_bin", default=None)
  parser.add_argument("--firefox_bin", default=None)

  args = parser.parse_args()
  if args.bt5_path is not None:
    sys.path[0:0] = args.bt5_path.split(",")
  master = taskdistribution.TaskDistributionTool(args.master_url)
  test_suite_title = args.test_suite_title or args.test_suite
  revision = args.revision
  suite = makeSuite(test_suite=args.test_suite,
                    node_quantity=args.node_quantity,
                    revision=revision,
                    db_list=args.db_list,
                    bt5_path=args.bt5_path,
                    firefox_bin=args.firefox_bin,
                    xvfb_bin=args.xvfb_bin)
  test_result = master.createTestResult(revision, suite.getTestList(),
    args.test_node_title, suite.allow_restart, test_suite_title,
    args.project_title)
  if test_result is not None:
    assert revision == test_result.revision, (revision, test_result.revision)
    while suite.acquire():
      test = test_result.start(suite.running.keys())
      if test is not None:
        suite.start(test.name, lambda status_dict, __test=test:
          __test.stop(**status_dict))
      elif not suite.running:
        break
示例#2
0
def runTestSuite():
    parser = argparse.ArgumentParser(description='Run a test suite.')
    parser.add_argument('--test_suite', help='The test suite name')
    parser.add_argument('--test_suite_title',
                        help='The test suite title',
                        default=None)
    parser.add_argument('--test_node_title',
                        help='The test node title',
                        default=None)
    parser.add_argument('--project_title',
                        help='The project title',
                        default=None)
    parser.add_argument('--revision',
                        help='The revision to test',
                        default='dummy_revision')
    parser.add_argument('--node_quantity',
                        help='Number of parallel tests to run',
                        default=1,
                        type=int)
    parser.add_argument('--master_url',
                        help='The Url of Master controling many suites',
                        default=None)
    parser.add_argument('--source_code_path_list',
                        help='List of Eggs folders to test, splited by commam',
                        default='.')

    args = parser.parse_args()
    master = taskdistribution.TaskDistributionTool(args.master_url)
    os.environ.setdefault("SOURCE_CODE_TO_TEST", args.source_code_path_list)
    test_suite_title = args.test_suite_title or args.test_suite
    revision = args.revision
    suite = EggTestSuite(1,
                         test_suite=args.test_suite,
                         node_quantity=args.node_quantity,
                         revision=revision)

    test_result = master.createTestResult(revision, suite.getTestList(),
                                          args.test_node_title,
                                          suite.allow_restart,
                                          test_suite_title, args.project_title)
    if test_result is not None:
        assert revision == test_result.revision, (revision,
                                                  test_result.revision)
        while suite.acquire():
            test = test_result.start(suite.running.keys())
            if test is not None:
                suite.start(test.name,
                            lambda status_dict, __test=test: __test.stop(
                                **status_dict))
            elif not suite.running:
                break
示例#3
0
    def run(self):
        log = self.log
        config = self.config
        slapgrid = None
        previous_revision_dict = {}
        revision_dict = {}
        test_result = None
        test_node_slapos = SlapOSInstance()
        test_node_slapos.edit(
            working_directory=self.config['slapos_directory'])
        try:
            while True:
                try:
                    node_test_suite = None
                    self.log = self.process_manager.log = self.testnode_log
                    self.cleanUp(None)
                    remote_test_result_needs_cleanup = False
                    begin = time.time()
                    portal_url = config['test_suite_master_url']
                    portal = taskdistribution.TaskDistributionTool(
                        portal_url, logger=DummyLogger(log))
                    self.portal = portal
                    self.test_suite_portal = taskdistribution.TaskDistributor(
                        portal_url, logger=DummyLogger(log))
                    self.test_suite_portal.subscribeNode(
                        node_title=config['test_node_title'],
                        computer_guid=config['computer_id'])
                    test_suite_data = self.test_suite_portal.startTestSuite(
                        node_title=config['test_node_title'],
                        computer_guid=config['computer_id'])
                    if type(test_suite_data) == str:
                        # Backward compatiblity
                        test_suite_data = json.loads(test_suite_data)
                    test_suite_data = Utils.deunicodeData(test_suite_data)
                    log("Got following test suite data from master : %r" % \
                        (test_suite_data,))
                    try:
                        my_test_type = self.test_suite_portal.getTestType()
                    except:
                        log("testnode, error during requesting getTestType() method \
from the distributor.")
                        raise
                    # Select runner according to the test type
                    if my_test_type == 'UnitTest':
                        runner = UnitTestRunner(self)
                    elif my_test_type == 'ScalabilityTest':
                        runner = ScalabilityTestRunner(self)
                    else:
                        log("testnode, Runner type %s not implemented.",
                            my_test_type)
                        raise NotImplementedError
                    log("Type of current test is %s" % (my_test_type, ))
                    # master testnode gets test_suites, slaves get nothing
                    runner.prepareSlapOSForTestNode(test_node_slapos)
                    # Clean-up test suites
                    self.checkOldTestSuite(test_suite_data)
                    for test_suite in test_suite_data:
                        remote_test_result_needs_cleanup = False
                        node_test_suite = self.getNodeTestSuite(
                            test_suite["test_suite_reference"])

                        node_test_suite.edit(
                            working_directory=self.config['working_directory'],
                            log_directory=self.config['log_directory'])

                        node_test_suite.edit(**test_suite)
                        if my_test_type == 'UnitTest':
                            runner = UnitTestRunner(node_test_suite)
                        elif my_test_type == 'ScalabilityTest':
                            runner = ScalabilityTestRunner(node_test_suite)
                        else:
                            log("testnode, Runner type %s not implemented.",
                                my_test_type)
                            raise NotImplementedError

                        # XXX: temporary hack to prevent empty test_suite
                        if not hasattr(node_test_suite, 'test_suite'):
                            node_test_suite.edit(test_suite='')
                        run_software = True
                        # kill processes from previous loop if any
                        self.process_manager.killPreviousRun()
                        self.getAndUpdateFullRevisionList(node_test_suite)
                        # Write our own software.cfg to use the local repository
                        self.constructProfile(node_test_suite, my_test_type,
                                              runner.getRelativePathUsage())
                        # Make sure we have local repository
                        test_result = portal.createTestResult(
                            node_test_suite.revision, [],
                            config['test_node_title'], False,
                            node_test_suite.test_suite_title,
                            node_test_suite.project_title)
                        remote_test_result_needs_cleanup = True
                        log("testnode, test_result : %r" % (test_result, ))
                        if test_result is not None:
                            self.registerSuiteLog(test_result, node_test_suite)
                            self.checkRevision(test_result, node_test_suite)
                            node_test_suite.edit(test_result=test_result)
                            # Now prepare the installation of SlapOS and create instance
                            status_dict = runner.prepareSlapOSForTestSuite(
                                node_test_suite)
                            # Give some time so computer partitions may start
                            # as partitions can be of any kind we have and likely will never have
                            # a reliable way to check if they are up or not ...
                            time.sleep(20)
                            if my_test_type == 'UnitTest':
                                runner.runTestSuite(node_test_suite,
                                                    portal_url)
                            elif my_test_type == 'ScalabilityTest':
                                error_message = None
                                # A problem is appeared during runTestSuite
                                if status_dict['status_code'] == 1:
                                    error_message = "Software installation too long or error(s) are present during SR install."
                                else:
                                    status_dict = runner.runTestSuite(
                                        node_test_suite, portal_url)
                                    # A problem is appeared during runTestSuite
                                    if status_dict['status_code'] == 1:
                                        error_message = status_dict[
                                            'error_message']

                                # If an error is appeared
                                if error_message:
                                    test_result.reportFailure(
                                        stdout=error_message)
                                    self.log(error_message)
                                    raise ValueError(error_message)
                            else:
                                raise NotImplementedError

                            # break the loop to get latest priorities from master
                            break
                        self.cleanUp(test_result)
                except (SubprocessError, CalledProcessError) as e:
                    log("SubprocessError", exc_info=sys.exc_info())
                    if remote_test_result_needs_cleanup:
                        status_dict = e.status_dict or {}
                        test_result.reportFailure(
                            command=status_dict.get('command'),
                            stdout=status_dict.get('stdout'),
                            stderr=status_dict.get('stderr'),
                        )
                    continue
                except ValueError as e:
                    # This could at least happens if runTestSuite is not found
                    log("ValueError", exc_info=sys.exc_info())
                    if node_test_suite is not None:
                        node_test_suite.retry_software_count += 1
                except CancellationError, e:
                    log("CancellationError", exc_info=sys.exc_info())
                    self.process_manager.under_cancellation = False
                    node_test_suite.retry = True
                    continue
                except:
                    ex_type, ex, tb = sys.exc_info()
                    traceback.print_tb(tb)
                    log("erp5testnode exception", exc_info=sys.exc_info())
                    raise
                now = time.time()
                self.cleanUp(test_result)
                if (now - begin) < 120:
                    sleep_time = 120 - (now - begin)
                    log("End of processing, going to sleep %s" % sleep_time)
                    time.sleep(sleep_time)