Пример #1
0
  def findAndRunTests(self):
    self.preRun()
    self.start_time = clock()

    # PBS STUFF
    if self.options.pbs and os.path.exists(self.options.pbs):
      self.options.processingPBS = True
      self.processPBSResults()
    else:
      self.options.processingPBS = False
      for dirpath, dirnames, filenames in os.walk(os.getcwd(), followlinks=True):
        if (self.test_match.search(dirpath) and "contrib" not in os.path.relpath(dirpath, os.getcwd())):
          for file in filenames:
            # set cluster_handle to be None initially (happens for each test)
            self.options.cluster_handle = None
            # See if there were other arguments (test names) passed on the command line
            if file == self.options.input_file_name: #and self.test_match.search(file):
              saved_cwd = os.getcwd()
              sys.path.append(os.path.abspath(dirpath))
              os.chdir(dirpath)

              if self.prunePath(file):
                continue

              # Build a Warehouse to hold the MooseObjects
              warehouse = Warehouse()

              # Build a Parser to parse the objects
              parser = Parser(self.factory, warehouse)

              # Parse it
              parser.parse(file)

              # Retrieve the tests from the warehouse
              testers = warehouse.getAllObjects()

              # Augment the Testers with additional information directly from the TestHarness
              for tester in testers:
                self.augmentParameters(file, tester)

              if self.options.enable_recover:
                testers = self.appendRecoverableTests(testers)

              # Go through the Testers and run them
              for tester in testers:
                # Double the alloted time for tests when running with the valgrind option
                tester.setValgrindMode(self.options.valgrind_mode)

                # When running in valgrind mode, we end up with a ton of output for each failed
                # test.  Therefore, we limit the number of fails...
                if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
                  (should_run, reason) = (False, 'Max Fails Exceeded')
                else:
                  (should_run, reason) = tester.checkRunnableBase(self.options, self.checks)

                if should_run:
                  # Create the cluster launcher input file
                  if self.options.pbs and self.options.cluster_handle == None:
                    self.options.cluster_handle = open(dirpath + '/tests.cluster', 'a')
                    self.options.cluster_handle.write('[Jobs]\n')

                  command = tester.getCommand(self.options)
                  # This method spawns another process and allows this loop to continue looking for tests
                  # RunParallel will call self.testOutputAndFinish when the test has completed running
                  # This method will block when the maximum allowed parallel processes are running
                  self.runner.run(tester, command)
                else: # This job is skipped - notify the runner
                  if (reason != ''):
                    self.handleTestResult(tester.parameters(), '', reason)
                  self.runner.jobSkipped(tester.parameters()['test_name'])

                if self.options.cluster_handle != None:
                  self.options.cluster_handle.write('[]\n')
                  self.options.cluster_handle.close()
                  self.options.cluster_handle = None

              os.chdir(saved_cwd)
              sys.path.pop()

    self.runner.join()
    # Wait for all tests to finish
    if self.options.pbs and self.options.processingPBS == False:
      print '\n< checking batch status >\n'
      self.options.processingPBS = True
      self.processPBSResults()
      self.cleanupAndExit()
    else:
      self.cleanupAndExit()
Пример #2
0
  def processPBSResults(self):
    # If batch file exists, check the contents for pending tests.
    if os.path.exists(self.options.pbs):
      # Build a list of launched jobs
      batch_file = open(self.options.pbs)
      batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
      batch_file.close()
      del batch_list[-1:]

      # Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
      for job in batch_list:
        file = '/'.join(job[2].split('/')[:-2]) + '/' + job[3]

        # Build a Warehouse to hold the MooseObjects
        warehouse = Warehouse()

        # Build a Parser to parse the objects
        parser = Parser(self.factory, warehouse)

        # Parse it
        parser.parse(file)

        # Retrieve the tests from the warehouse
        testers = warehouse.getAllObjects()
        for tester in testers:
          self.augmentParameters(file, tester)

        for tester in testers:
          # Build the requested Tester object
          if job[1] == tester.parameters()['test_name']:
            # Create Test Type
            # test = self.factory.create(tester.parameters()['type'], tester)

            # Get job status via qstat
            qstat = ['qstat', '-f', '-x', str(job[0])]
            qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            qstat_stdout = qstat_command.communicate()[0]
            if qstat_stdout != None:
              output_value = re.search(r'job_state = (\w+)', qstat_stdout).group(1)
            else:
              return ('QSTAT NOT FOUND', '')

            # Report the current status of JOB_ID
            if output_value == 'F':
              # F = Finished. Get the exit code reported by qstat
              exit_code = int(re.search(r'Exit_status = (-?\d+)', qstat_stdout).group(1))

              # Read the stdout file
              if os.path.exists(job[2]):
                output_file = open(job[2], 'r')
                # Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
                tester.parameters()['test_dir'] = '/'.join(job[2].split('/')[:-1])
                outfile = output_file.read()
                output_file.close()
              else:
                # I ran into this scenario when the cluster went down, but launched/completed my job :)
                self.handleTestResult(tester.specs, '', 'FAILED (NO STDOUT FILE)', 0, 0, True)

              self.testOutputAndFinish(tester, exit_code, outfile)

            elif output_value == 'R':
              # Job is currently running
              self.handleTestResult(tester.specs, '', 'RUNNING', 0, 0, True)
            elif output_value == 'E':
              # Job is exiting
              self.handleTestResult(tester.specs, '', 'EXITING', 0, 0, True)
            elif output_value == 'Q':
              # Job is currently queued
              self.handleTestResult(tester.specs, '', 'QUEUED', 0, 0, True)
    else:
      return ('BATCH FILE NOT FOUND', '')
Пример #3
0
  def findAndRunTests(self):
    self.error_code = 0x0
    self.preRun()
    self.start_time = clock()

    try:
      # PBS STUFF
      if self.options.pbs:
        # Check to see if we are using the PBS Emulator.
        # Its expensive, so it must remain outside of the os.walk for loop.
        self.options.PBSEmulator = self.checkPBSEmulator()
      if self.options.pbs and os.path.exists(self.options.pbs):
        self.options.processingPBS = True
        self.processPBSResults()
      else:
        self.options.processingPBS = False
        self.base_dir = os.getcwd()
        for dirpath, dirnames, filenames in os.walk(self.base_dir, followlinks=True):
          # Prune submdule paths when searching for tests
          if self.base_dir != dirpath and os.path.exists(os.path.join(dirpath, '.git')):
            dirnames[:] = []

          # walk into directories that aren't contrib directories
          if "contrib" not in os.path.relpath(dirpath, os.getcwd()):
            for file in filenames:
              # set cluster_handle to be None initially (happens for each test)
              self.options.cluster_handle = None
              # See if there were other arguments (test names) passed on the command line
              if file == self.options.input_file_name: #and self.test_match.search(file):
                saved_cwd = os.getcwd()
                sys.path.append(os.path.abspath(dirpath))
                os.chdir(dirpath)

                if self.prunePath(file):
                  continue

                # Build a Warehouse to hold the MooseObjects
                warehouse = Warehouse()

                # Build a Parser to parse the objects
                parser = Parser(self.factory, warehouse)

                # Parse it
                self.error_code = self.error_code | parser.parse(file)

                # Retrieve the tests from the warehouse
                testers = warehouse.getAllObjects()

                # Augment the Testers with additional information directly from the TestHarness
                for tester in testers:
                  self.augmentParameters(file, tester)

                if self.options.enable_recover:
                  testers = self.appendRecoverableTests(testers)


                # Handle PBS tests.cluster file
                if self.options.pbs:
                  (tester, command) = self.createClusterLauncher(dirpath, testers)
                  if command is not None:
                    self.runner.run(tester, command)
                else:
                  # Go through the Testers and run them
                  for tester in testers:
                    # Double the alloted time for tests when running with the valgrind option
                    tester.setValgrindMode(self.options.valgrind_mode)

                    # When running in valgrind mode, we end up with a ton of output for each failed
                    # test.  Therefore, we limit the number of fails...
                    if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
                      (should_run, reason) = (False, 'Max Fails Exceeded')
                    elif self.num_failed > self.options.max_fails:
                      (should_run, reason) = (False, 'Max Fails Exceeded')
                    else:
                      (should_run, reason) = tester.checkRunnableBase(self.options, self.checks)

                    if should_run:
                      command = tester.getCommand(self.options)
                      # This method spawns another process and allows this loop to continue looking for tests
                      # RunParallel will call self.testOutputAndFinish when the test has completed running
                      # This method will block when the maximum allowed parallel processes are running
                      self.runner.run(tester, command)
                    else: # This job is skipped - notify the runner
                      if reason != '':
                        if (self.options.report_skipped and reason.find('skipped') != -1) or reason.find('skipped') == -1:
                          self.handleTestResult(tester.parameters(), '', reason)
                      self.runner.jobSkipped(tester.parameters()['test_name'])
                os.chdir(saved_cwd)
                sys.path.pop()
    except KeyboardInterrupt:
      print '\nExiting due to keyboard interrupt...'
      sys.exit(0)

    self.runner.join()
    # Wait for all tests to finish
    if self.options.pbs and self.options.processingPBS == False:
      print '\n< checking batch status >\n'
      self.options.processingPBS = True
      self.processPBSResults()

    self.cleanup()

    if self.num_failed:
      self.error_code = self.error_code | 0x10

    sys.exit(self.error_code)
Пример #4
0
  def processPBSResults(self):
    # If batch file exists, check the contents for pending tests.
    if os.path.exists(self.options.pbs):
      # Build a list of launched jobs
      batch_file = open(self.options.pbs)
      batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
      batch_file.close()
      del batch_list[-1:]

      # Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
      for job in batch_list:
        file = '/'.join(job[2].split('/')[:-2]) + '/' + job[3]

        # Build a Warehouse to hold the MooseObjects
        warehouse = Warehouse()

        # Build a Parser to parse the objects
        parser = Parser(self.factory, warehouse)

        # Parse it
        parser.parse(file)

        # Retrieve the tests from the warehouse
        testers = warehouse.getAllObjects()
        for tester in testers:
          self.augmentParameters(file, tester)

        for tester in testers:
          # Build the requested Tester object
          if job[1] == tester.parameters()['test_name']:
            # Create Test Type
            # test = self.factory.create(tester.parameters()['type'], tester)

            # Get job status via qstat
            qstat = ['qstat', '-f', '-x', str(job[0])]
            qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            qstat_stdout = qstat_command.communicate()[0]
            if qstat_stdout != None:
              output_value = re.search(r'job_state = (\w+)', qstat_stdout).group(1)
            else:
              return ('QSTAT NOT FOUND', '')

            # Report the current status of JOB_ID
            if output_value == 'F':
              # F = Finished. Get the exit code reported by qstat
              exit_code = int(re.search(r'Exit_status = (-?\d+)', qstat_stdout).group(1))

              # Read the stdout file
              if os.path.exists(job[2]):
                output_file = open(job[2], 'r')
                # Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
                tester.parameters()['test_dir'] = '/'.join(job[2].split('/')[:-1])
                outfile = output_file.read()
                output_file.close()
                self.testOutputAndFinish(tester, exit_code, outfile)
              else:
                # I ran into this scenario when the cluster went down, but launched/completed my job :)
                self.handleTestResult(tester.specs, '', 'FAILED (NO STDOUT FILE)', 0, 0, True)

            elif output_value == 'R':
              # Job is currently running
              self.handleTestResult(tester.specs, '', 'RUNNING', 0, 0, True)
            elif output_value == 'E':
              # Job is exiting
              self.handleTestResult(tester.specs, '', 'EXITING', 0, 0, True)
            elif output_value == 'Q':
              # Job is currently queued
              self.handleTestResult(tester.specs, '', 'QUEUED', 0, 0, True)
    else:
      return ('BATCH FILE NOT FOUND', '')
Пример #5
0
    def findAndRunTests(self):
        self.preRun()
        self.start_time = clock()

        # PBS STUFF
        if self.options.pbs and os.path.exists(self.options.pbs):
            self.options.processingPBS = True
            self.processPBSResults()
        else:
            self.options.processingPBS = False
            for dirpath, dirnames, filenames in os.walk(os.getcwd(),
                                                        followlinks=True):
                if (self.test_match.search(dirpath)
                        and "contrib" not in os.path.relpath(
                            dirpath, os.getcwd())):
                    for file in filenames:
                        # set cluster_handle to be None initially (happens for each test)
                        self.options.cluster_handle = None
                        # See if there were other arguments (test names) passed on the command line
                        if file == self.options.input_file_name:  #and self.test_match.search(file):
                            saved_cwd = os.getcwd()
                            sys.path.append(os.path.abspath(dirpath))
                            os.chdir(dirpath)

                            if self.prunePath(file):
                                continue

                            # Build a Warehouse to hold the MooseObjects
                            warehouse = Warehouse()

                            # Build a Parser to parse the objects
                            parser = Parser(self.factory, warehouse)

                            # Parse it
                            parser.parse(file)

                            # Retrieve the tests from the warehouse
                            testers = warehouse.getAllObjects()

                            # Augment the Testers with additional information directly from the TestHarness
                            for tester in testers:
                                self.augmentParameters(file, tester)

                            if self.options.enable_recover:
                                testers = self.appendRecoverableTests(testers)

                            # Go through the Testers and run them
                            for tester in testers:
                                # Double the alloted time for tests when running with the valgrind option
                                tester.setValgrindMode(
                                    self.options.valgrind_mode)

                                # When running in valgrind mode, we end up with a ton of output for each failed
                                # test.  Therefore, we limit the number of fails...
                                if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
                                    (should_run,
                                     reason) = (False, 'Max Fails Exceeded')
                                else:
                                    (should_run,
                                     reason) = tester.checkRunnableBase(
                                         self.options, self.checks)

                                if should_run:
                                    # Create the cluster launcher input file
                                    if self.options.pbs and self.options.cluster_handle == None:
                                        self.options.cluster_handle = open(
                                            dirpath + '/tests.cluster', 'a')
                                        self.options.cluster_handle.write(
                                            '[Jobs]\n')

                                    command = tester.getCommand(self.options)
                                    # This method spawns another process and allows this loop to continue looking for tests
                                    # RunParallel will call self.testOutputAndFinish when the test has completed running
                                    # This method will block when the maximum allowed parallel processes are running
                                    self.runner.run(tester, command)
                                else:  # This job is skipped - notify the runner
                                    if (reason != ''):
                                        self.handleTestResult(
                                            tester.parameters(), '', reason)
                                    self.runner.jobSkipped(
                                        tester.parameters()['test_name'])

                                if self.options.cluster_handle != None:
                                    self.options.cluster_handle.write('[]\n')
                                    self.options.cluster_handle.close()
                                    self.options.cluster_handle = None

                            os.chdir(saved_cwd)
                            sys.path.pop()

        self.runner.join()
        # Wait for all tests to finish
        if self.options.pbs and self.options.processingPBS == False:
            print '\n< checking batch status >\n'
            self.options.processingPBS = True
            self.processPBSResults()
            self.cleanupAndExit()
        else:
            self.cleanupAndExit()
Пример #6
0
  def findAndRunTests(self):
    self.error_code = 0x0
    self.preRun()
    self.start_time = clock()

    try:
      # PBS STUFF
      if self.options.pbs:
        # Check to see if we are using the PBS Emulator.
        # Its expensive, so it must remain outside of the os.walk for loop.
        self.options.PBSEmulator = self.checkPBSEmulator()
      if self.options.pbs and os.path.exists(self.options.pbs):
        self.options.processingPBS = True
        self.processPBSResults()
      else:
        self.options.processingPBS = False
        self.base_dir = os.getcwd()
        for dirpath, dirnames, filenames in os.walk(self.base_dir, followlinks=True):
          # Prune submdule paths when searching for tests
          if self.base_dir != dirpath and os.path.exists(os.path.join(dirpath, '.git')):
            dirnames[:] = []

          # walk into directories that aren't contrib directories
          if "contrib" not in os.path.relpath(dirpath, os.getcwd()):
            for file in filenames:
              # set cluster_handle to be None initially (happens for each test)
              self.options.cluster_handle = None
              # See if there were other arguments (test names) passed on the command line
              if file == self.options.input_file_name: #and self.test_match.search(file):
                saved_cwd = os.getcwd()
                sys.path.append(os.path.abspath(dirpath))
                os.chdir(dirpath)

                if self.prunePath(file):
                  continue

                # Build a Warehouse to hold the MooseObjects
                warehouse = Warehouse()

                # Build a Parser to parse the objects
                parser = Parser(self.factory, warehouse)

                # Parse it
                self.error_code = self.error_code | parser.parse(file)

                # Retrieve the tests from the warehouse
                testers = warehouse.getAllObjects()

                # Augment the Testers with additional information directly from the TestHarness
                for tester in testers:
                  self.augmentParameters(file, tester)

                if self.options.enable_recover:
                  testers = self.appendRecoverableTests(testers)


                # Handle PBS tests.cluster file
                if self.options.pbs:
                  (tester, command) = self.createClusterLauncher(dirpath, testers)
                  if command is not None:
                    self.runner.run(tester, command)
                else:
                  # Go through the Testers and run them
                  for tester in testers:
                    # Double the alloted time for tests when running with the valgrind option
                    tester.setValgrindMode(self.options.valgrind_mode)

                    # When running in valgrind mode, we end up with a ton of output for each failed
                    # test.  Therefore, we limit the number of fails...
                    if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
                      (should_run, reason) = (False, 'Max Fails Exceeded')
                    elif self.num_failed > self.options.max_fails:
                      (should_run, reason) = (False, 'Max Fails Exceeded')
                    else:
                      (should_run, reason) = tester.checkRunnableBase(self.options, self.checks)

                    if should_run:
                      command = tester.getCommand(self.options)
                      # This method spawns another process and allows this loop to continue looking for tests
                      # RunParallel will call self.testOutputAndFinish when the test has completed running
                      # This method will block when the maximum allowed parallel processes are running
                      self.runner.run(tester, command)
                    else: # This job is skipped - notify the runner
                      if reason != '':
                        if (self.options.report_skipped and reason.find('skipped') != -1) or reason.find('skipped') == -1:
                          self.handleTestResult(tester.parameters(), '', reason)
                      self.runner.jobSkipped(tester.parameters()['test_name'])
                os.chdir(saved_cwd)
                sys.path.pop()
    except KeyboardInterrupt:
      print '\nExiting due to keyboard interrupt...'
      sys.exit(0)

    self.runner.join()
    # Wait for all tests to finish
    if self.options.pbs and self.options.processingPBS == False:
      print '\n< checking batch status >\n'
      self.options.processingPBS = True
      self.processPBSResults()

    self.cleanup()

    if self.num_failed:
      self.error_code = self.error_code | 0x10

    sys.exit(self.error_code)