def _runPathTest(self, run, test): stateMachine = test.stateMachine # If no path declaration found then simply skip the validation if len(stateMachine.getRoute()) == 0: return print ("[*] Running path validation test[%s]\n" % test.name) try: stateEngine = StateEngine(self, stateMachine, test.publishers) # Create a path validator to check basic validation rules mutator = PathValidationMutator() pathValidator = PathValidator(stateEngine.pathFinder, mutator) try: actionValues = stateEngine.run(mutator) print "Traced route: " print " - ".join(["%s" % str(stateName) for stateName in mutator.states]) pathValidator.validate() except PathException, e: raise PeachException(str(e)) except PeachException, e: print ("\n[-] End of path validation test : Validation failed!\n") raise e
def _runPathTest(self, run, test): stateMachine = test.stateMachine # If no path declaration found then simply skip the validation if len(stateMachine.getRoute()) == 0: return print("[*] Running path validation test[%s]\n" % test.name) try: stateEngine = StateEngine(self, stateMachine, test.publishers) # Create a path validator to check basic validation rules mutator = PathValidationMutator() pathValidator = PathValidator(stateEngine.pathFinder, mutator) try: actionValues = stateEngine.run(mutator) print "Traced route: " print " - ".join( ["%s" % str(stateName) for stateName in mutator.states]) pathValidator.validate() except PathException, e: raise PeachException(str(e)) except PeachException, e: print("\n[-] End of path validation test : Validation failed!\n") raise e
def _runPathTest(self, run, test): stateMachine = test.stateMachine # If no path declaration found then simply skip the validation if not len(stateMachine.getRoute()): return logging.info("Running path validation test for %s." % test.name) try: stateEngine = StateEngine(self, stateMachine, test.publishers) # Create a path validator to check basic validation rules mutator = PathValidationMutator() pathValidator = PathValidator(stateEngine.pathFinder, mutator) try: actionValues = stateEngine.run(mutator) print("Traced route: ") print(" - ".join(["%s" % str(stateName) for stateName in mutator.states])) pathValidator.validate() except PathException as e: raise PeachException(str(e)) except PeachException as e: logging.error("End of path validation test : Validation failed!") raise e else: logging.info("End of path validation test : Successfully passed")
def _runPathTest(self, run, test): stateMachine = test.stateMachine # If no path declaration found then simply skip the validation if not len(stateMachine.getRoute()): return logging.info("Running path validation test for %s." % test.name) try: stateEngine = StateEngine(self, stateMachine, test.publishers) # Create a path validator to check basic validation rules mutator = PathValidationMutator() pathValidator = PathValidator(stateEngine.pathFinder, mutator) try: actionValues = stateEngine.run(mutator) print("Traced route: ") print(" - ".join( ["%s" % str(stateName) for stateName in mutator.states])) pathValidator.validate() except PathException as e: raise PeachException(str(e)) except PeachException as e: logging.error("End of path validation test : Validation failed!") raise e else: logging.info("End of path validation test : Successfully passed")
def _runTest(self, run, test, countOnly = False, testRange = None): ''' Runs a Test as defined in the Peach XML. @type run: Run object @param run: Run that test is part of @type test: Test object @param test: Test to run @type countOnly: bool @param countOnly: Should we just get total mutator count? Defaults to False. @type testRange: list of numbers @param testRange: Iteration # test ranges. Only used when performing parallel fuzzing. @rtype: number @return: the total number of test iterations or None ''' stateMachine = test.stateMachine stateEngine = StateEngine(self, stateMachine, test.publishers) pub = test.publishers totalTests = "?" testCount = 0 # Sping up agents self._startAgents(run, test) if not countOnly: self.watcher.OnTestStarting(run, test, totalTests) # Initialize publishers for p in pub: p.initialize() errorCount = 0 maxErrorCount = 10 # Get all the mutators we will use self.mutators = [] for m in test.getMutators(): try: self.mutators.append(eval(m.name)) except: try: self.mutators.append(evalEvent("PeachXml_"+m.name, {}, run)) except: raise PeachException("Unable to load mutator [%s], please verify it was imported correctly." % m.name) mutator = test.mutator value = "StateMachine" if self.restartState != None: print "-- State will load in 1 iteration" elif testRange != None: print "-- Will skip to start of chunk in 1 iteration" # Needs to be off on its own! startCount = None endCount = None if testRange != None: startCount = testRange[0] endCount = testRange[1] if self.startNum != None: startCount = self.startNum redoCount = 0 saveState = False exitImmediate = False actionValues = None try: while True: try: testCount += 1 if PROFILE: if testCount > 2: break # What if we are just counting? if testCount == 2 and countOnly: self._stopAgents(run, test) return mutator # Go through one iteration before we load state elif testCount == 2 and self.restartState != None: print "-- Restoring state" testCount = self.restartState[1] mutator.setState(self.restartState[2]) elif testCount == 2 and startCount != None and startCount > 2: # Skip ahead to start range, but not if we are # restoring saved state. print "-- Skipping ahead to iteration %d" % startCount #testCount -= 1 for i in range(testCount, startCount): mutator.next() testCount+=1 # Update total test count if testRange == None: totalTests = mutator.getCount() else: # if we are parallel use our endCount which will also # cause the estimated time left to be correct totalTests = endCount+1 if totalTests == -1 or totalTests == None: totalTests = "?" else: self.watcher.setTotalVariations(totalTests) # Fire some events self.agent.OnTestStarting() if not countOnly: self.watcher.OnTestCaseStarting(run, test, testCount) self.testCount = testCount mutator.onTestCaseStarting(test, testCount, stateEngine) # Run the test try: actionValues = stateEngine.run(mutator) except RedoTestException: raise except MemoryError: # Some tests cause out of memeory # exceptions, let skip past them print "Warning: Out of memory, going to next test" pass except OverflowError: # Some tests cause out of memeory # exceptions, let skip past them print "Warning: Out of memory, going to next test" pass except SoftException, e: # In the case of the first iteration we should # never fail. if testCount == 1: raise PeachException("Error: First test case failed: ",e) # Otherwise ignore any SoftExceptions # and head for next iteration pass # Pause as needed time.sleep(run.waitTime) mutator.onTestCaseFinished(test, testCount, stateEngine) # Notify if not countOnly: self.watcher.OnTestCaseFinished(run, test, testCount, actionValues) self.agent.OnTestFinished() # Should we repeat this test? if self.agent.RedoTest(): print "-- Repeating test --" raise RedoTestException() # Check for faults if self.agent.DetectedFault(): # Collect data print "-- Detected fault, getting data --" results = self.agent.GetMonitorData() mutator.onFaultDetected(test, testCount, stateEngine, results, actionValues) self.watcher.OnFault(run, test, testCount, results, actionValues) self.agent.OnFault() # Check for stop event if self.agent.StopRun(): print "-- Detected StopRun, bailing! --" self.watcher.OnStopRun(run, test, testCount, None, actionValues) break # Increment our mutator mutator.next() # Reset the redoCounter redoCount = 0 except RedoTestException, e: if redoCount == 3: raise PeachException(e.message) redoCount += 1 testCount -= 1
def _runTest(self, run, test, countOnly=False, testRange=None): """ Runs a Test as defined in the Peach XML. @type run: Run object @param run: Run that test is part of @type test: Test object @param test: Test to run @type countOnly: bool @param countOnly: Should we just get total mutator count? Defaults to False. @type testRange: list of numbers @param testRange: Iteration # test ranges. Only used when performing parallel fuzzing. @rtype: number @return: the total number of test iterations or None """ stateMachine = test.stateMachine stateEngine = StateEngine(self, stateMachine, test.publishers) totalTests = "?" testCount = 0 self._startAgents(run, test) if not countOnly: self.watcher.OnTestStarting(run, test, totalTests) for p in test.publishers: p.initialize() errorCount = 0 maxErrorCount = 10 # Get all the mutators we will use self.mutators = [] for m in test.getMutators(): try: self.mutators.append(eval(m.name)) except: try: self.mutators.append( evalEvent("PeachXml_" + m.name, {}, run)) except: raise PeachException( "Unable to load mutator [%s], please verify it was imported correctly." % m.name) mutator = test.mutator value = "StateMachine" if self.restartState is not None: logging.info("State will load in 1 iteration.") elif testRange is not None: logging.info("Skipping to start of chunk in 1 iteration.") # Needs to be off on its own! startCount = None endCount = None if testRange is not None: startCount = testRange[0] endCount = testRange[1] if self.startNum is not None: startCount = self.startNum redoCount = 0 saveState = False exitImmediate = False actionValues = None try: while True: try: testCount += 1 # What if we are just counting? if testCount == 2 and countOnly: self._stopAgents(run, test) return mutator # Go through one iteration before we load state. elif testCount == 2 and self.restartState is not None: logging.info("Restoring state.") testCount = self.restartState[1] mutator.setState(self.restartState[2]) elif testCount == 2 and startCount is not None and startCount > 2: # Skip ahead to start range, but not if we are restoring saved state. logging.info("Skipping ahead to iteration %d." % startCount) #testCount -= 1 for _ in range(testCount, startCount): mutator.next() testCount += 1 # Update total test count if testRange is None: totalTests = mutator.getCount() else: # If we are parallel use our endCount which will also cause the estimated time # left to be correct. totalTests = endCount + 1 if totalTests == -1 or totalTests is None: totalTests = "?" else: self.watcher.setTotalVariations(totalTests) # Fire some events self.agent.OnTestStarting() if not countOnly: self.watcher.OnTestCaseStarting(run, test, testCount) self.testCount = testCount mutator.onTestCaseStarting(test, testCount, stateEngine) # Run the test try: actionValues = stateEngine.run(mutator) except RedoTestException: raise except MemoryError: # Some tests cause out of memeory exceptions, let skip past them. logging.warning("Out of memory, going to next test.") pass except OverflowError: # Some tests cause out of memeory exceptions, let skip past them. logging.warning("Out of memory, going to next test.") pass except SoftException as e: # In the case of the first iteration we should never fail. if testCount == 1: raise PeachException( "Error: First test case failed: ", e) # Otherwise ignore any SoftExceptions and head for next iteration. pass # Pause as needed time.sleep(run.waitTime) mutator.onTestCaseFinished(test, testCount, stateEngine) # Notify if not countOnly: self.watcher.OnTestCaseFinished( run, test, testCount, actionValues) self.agent.OnTestFinished() # Should we repeat this test? if self.agent.RedoTest(): logging.warning(highlight.warning("Repeating test")) raise RedoTestException() if self.agent.DetectedFault(): logging.warning( highlight.warning( "Detected fault! Processing data...")) results = self.agent.GetMonitorData() mutator.onFaultDetected(test, testCount, stateEngine, results, actionValues) self.watcher.OnFault(run, test, testCount, results, actionValues) self.agent.OnFault() # Check for stop event if self.agent.StopRun(): logging.warning( highlight.warning("Detected StopRun, bailing!")) self.watcher.OnStopRun(run, test, testCount, None, actionValues) break # Increment our mutator mutator.next() # Reset the redoCounter redoCount = 0 except RedoTestException as e: if redoCount == 3: raise PeachException(e.message) redoCount += 1 testCount -= 1 except PathException: # Ignore PathException while running tests mutator.next() except SoftException: mutator.next() # Have we completed our range? if (testRange is not None and testCount > endCount) or \ (Engine.justOne and startCount is None) or \ (Engine.justOne and startCount == testCount): logging.info("Completed iteration range.") break except MutatorCompleted: pass except KeyboardInterrupt: logging.warning("User canceled run.") saveState = True exitImmediate = True except PeachException as e: if e.msg.find("Unable to reconnect to Agent") > -1: results = {"_Bucket": "AgentConnectionFailed"} self.watcher.OnFault(run, test, testCount, results, actionValues) raise except: # Always save state on exceptions saveState = True self.watcher.OnTestCaseException(run, test, testCount, None) raise finally: try: for publisher in test.publishers: if hasattr( publisher, "hasBeenConnected") and publisher.hasBeenConnected: publisher.close() publisher.hasBeenConnected = False if hasattr(publisher, "hasBeenStarted") and publisher.hasBeenStarted: publisher.stop() publisher.hasBeenStarted = False publisher.finalize() except: pass self._stopAgents(run, test) if not countOnly: self.watcher.OnTestFinished(run, test)
def _runTest(self, run, test, countOnly=False, testRange=None): """ Runs a Test as defined in the Peach XML. @type run: Run object @param run: Run that test is part of @type test: Test object @param test: Test to run @type countOnly: bool @param countOnly: Should we just get total mutator count? Defaults to False. @type testRange: list of numbers @param testRange: Iteration # test ranges. Only used when performing parallel fuzzing. @rtype: number @return: the total number of test iterations or None """ stateMachine = test.stateMachine stateEngine = StateEngine(self, stateMachine, test.publishers) totalTests = "?" testCount = 0 self._startAgents(run, test) if not countOnly: self.watcher.OnTestStarting(run, test, totalTests) for p in test.publishers: p.initialize() errorCount = 0 maxErrorCount = 10 # Get all the mutators we will use self.mutators = [] for m in test.getMutators(): try: self.mutators.append(eval(m.name)) except: try: self.mutators.append(evalEvent("PeachXml_" + m.name, {}, run)) except: raise PeachException( "Unable to load mutator [%s], please verify it was imported correctly." % m.name) mutator = test.mutator value = "StateMachine" if self.restartState is not None: logging.info("State will load in 1 iteration.") elif testRange is not None: logging.info("Skipping to start of chunk in 1 iteration.") # Needs to be off on its own! startCount = None endCount = None if testRange is not None: startCount = testRange[0] endCount = testRange[1] if self.startNum is not None: startCount = self.startNum redoCount = 0 saveState = False exitImmediate = False actionValues = None try: while True: try: testCount += 1 # What if we are just counting? if testCount == 2 and countOnly: self._stopAgents(run, test) return mutator # Go through one iteration before we load state. elif testCount == 2 and self.restartState is not None: logging.info("Restoring state.") testCount = self.restartState[1] mutator.setState(self.restartState[2]) elif testCount == 2 and startCount is not None and startCount > 2: # Skip ahead to start range, but not if we are restoring saved state. logging.info("Skipping ahead to iteration %d." % startCount) #testCount -= 1 for _ in range(testCount, startCount): mutator.next() testCount += 1 # Update total test count if testRange is None: totalTests = mutator.getCount() else: # If we are parallel use our endCount which will also cause the estimated time # left to be correct. totalTests = endCount + 1 if totalTests == -1 or totalTests is None: totalTests = "?" else: self.watcher.setTotalVariations(totalTests) # Fire some events self.agent.OnTestStarting() if not countOnly: self.watcher.OnTestCaseStarting(run, test, testCount) self.testCount = testCount mutator.onTestCaseStarting(test, testCount, stateEngine) # Run the test try: actionValues = stateEngine.run(mutator) except RedoTestException: raise except MemoryError: # Some tests cause out of memeory exceptions, let skip past them. logging.warning("Out of memory, going to next test.") pass except OverflowError: # Some tests cause out of memeory exceptions, let skip past them. logging.warning("Out of memory, going to next test.") pass except SoftException as e: # In the case of the first iteration we should never fail. if testCount == 1: raise PeachException("Error: First test case failed: ", e) # Otherwise ignore any SoftExceptions and head for next iteration. pass # Pause as needed time.sleep(run.waitTime) mutator.onTestCaseFinished(test, testCount, stateEngine) # Notify if not countOnly: self.watcher.OnTestCaseFinished(run, test, testCount, actionValues) self.agent.OnTestFinished() # Should we repeat this test? if self.agent.RedoTest(): logging.warning(highlight.warning("Repeating test")) raise RedoTestException() if self.agent.DetectedFault(): logging.warning(highlight.warning("Detected fault! Processing data...")) results = self.agent.GetMonitorData() mutator.onFaultDetected(test, testCount, stateEngine, results, actionValues) self.watcher.OnFault(run, test, testCount, results, actionValues) self.agent.OnFault() # Check for stop event if self.agent.StopRun(): logging.warning(highlight.warning("Detected StopRun, bailing!")) self.watcher.OnStopRun(run, test, testCount, None, actionValues) break # Increment our mutator mutator.next() # Reset the redoCounter redoCount = 0 except RedoTestException as e: if redoCount == 3: raise PeachException(e.message) redoCount += 1 testCount -= 1 except PathException: # Ignore PathException while running tests mutator.next() except SoftException: mutator.next() # Have we completed our range? if (testRange is not None and testCount > endCount) or \ (Engine.justOne and startCount is None) or \ (Engine.justOne and startCount == testCount): logging.info("Completed iteration range.") break except MutatorCompleted: pass except KeyboardInterrupt: logging.warning("User canceled run.") saveState = True exitImmediate = True except PeachException as e: if e.msg.find("Unable to reconnect to Agent") > -1: results = { "_Bucket": "AgentConnectionFailed" } self.watcher.OnFault(run, test, testCount, results, actionValues) raise except: # Always save state on exceptions saveState = True self.watcher.OnTestCaseException(run, test, testCount, None) raise finally: try: for publisher in test.publishers: if hasattr(publisher, "hasBeenConnected") and publisher.hasBeenConnected: publisher.close() publisher.hasBeenConnected = False if hasattr(publisher, "hasBeenStarted") and publisher.hasBeenStarted: publisher.stop() publisher.hasBeenStarted = False publisher.finalize() except: pass self._stopAgents(run, test) if not countOnly: self.watcher.OnTestFinished(run, test)
def _runTest(self, run, test, countOnly=False, testRange=None): ''' Runs a Test as defined in the Peach XML. @type run: Run object @param run: Run that test is part of @type test: Test object @param test: Test to run @type countOnly: bool @param countOnly: Should we just get total mutator count? Defaults to False. @type testRange: list of numbers @param testRange: Iteration # test ranges. Only used when performing parallel fuzzing. @rtype: number @return: the total number of test iterations or None ''' stateMachine = test.stateMachine stateEngine = StateEngine(self, stateMachine, test.publishers) pub = test.publishers totalTests = "?" testCount = 0 # Sping up agents self._startAgents(run, test) if not countOnly: self.watcher.OnTestStarting(run, test, totalTests) # Initialize publishers for p in pub: p.initialize() errorCount = 0 maxErrorCount = 10 # Get all the mutators we will use self.mutators = [] for m in test.getMutators(): try: self.mutators.append(eval(m.name)) except: try: self.mutators.append( evalEvent("PeachXml_" + m.name, {}, run)) except: raise PeachException( "Unable to load mutator [%s], please verify it was imported correctly." % m.name) mutator = test.mutator value = "StateMachine" if self.restartState != None: print "-- State will load in 1 iteration" elif testRange != None: print "-- Will skip to start of chunk in 1 iteration" # Needs to be off on its own! startCount = None endCount = None if testRange != None: startCount = testRange[0] endCount = testRange[1] if self.startNum != None: startCount = self.startNum redoCount = 0 saveState = False exitImmediate = False actionValues = None try: while True: try: testCount += 1 if PROFILE: if testCount > 2: break # What if we are just counting? if testCount == 2 and countOnly: self._stopAgents(run, test) return mutator # Go through one iteration before we load state elif testCount == 2 and self.restartState != None: print "-- Restoring state" testCount = self.restartState[1] mutator.setState(self.restartState[2]) elif testCount == 2 and startCount != None and startCount > 2: # Skip ahead to start range, but not if we are # restoring saved state. print "-- Skipping ahead to iteration %d" % startCount #testCount -= 1 for i in range(testCount, startCount): mutator.next() testCount += 1 # Update total test count if testRange == None: totalTests = mutator.getCount() else: # if we are parallel use our endCount which will also # cause the estimated time left to be correct totalTests = endCount + 1 if totalTests == -1 or totalTests == None: totalTests = "?" else: self.watcher.setTotalVariations(totalTests) # Fire some events self.agent.OnTestStarting() if not countOnly: self.watcher.OnTestCaseStarting(run, test, testCount) self.testCount = testCount mutator.onTestCaseStarting(test, testCount, stateEngine) # Run the test try: actionValues = stateEngine.run(mutator) except RedoTestException: raise except MemoryError: # Some tests cause out of memeory # exceptions, let skip past them print "Warning: Out of memory, going to next test" pass except OverflowError: # Some tests cause out of memeory # exceptions, let skip past them print "Warning: Out of memory, going to next test" pass except SoftException, e: # In the case of the first iteration we should # never fail. if testCount == 1: raise PeachException( "Error: First test case failed: ", e) # Otherwise ignore any SoftExceptions # and head for next iteration pass # Pause as needed time.sleep(run.waitTime) mutator.onTestCaseFinished(test, testCount, stateEngine) # Notify if not countOnly: self.watcher.OnTestCaseFinished( run, test, testCount, actionValues) self.agent.OnTestFinished() # Should we repeat this test? if self.agent.RedoTest(): print "-- Repeating test --" raise RedoTestException() # Check for faults if self.agent.DetectedFault(): # Collect data print "-- Detected fault, getting data --" results = self.agent.GetMonitorData() mutator.onFaultDetected(test, testCount, stateEngine, results, actionValues) self.watcher.OnFault(run, test, testCount, results, actionValues) self.agent.OnFault() # Check for stop event if self.agent.StopRun(): print "-- Detected StopRun, bailing! --" self.watcher.OnStopRun(run, test, testCount, None, actionValues) break # Increment our mutator mutator.next() # Reset the redoCounter redoCount = 0 except RedoTestException, e: if redoCount == 3: raise PeachException(e.message) redoCount += 1 testCount -= 1