def _CheckDeviceParse(self, device_input, scheme, username=None, hostname=None, port=None, path=None, serial=None): """Checks that parsing a device input gives the expected result. Args: device_input (str): Input specifying a device. scheme (str): Expected scheme. username (str|None): Expected username. hostname (str|None): Expected hostname. port (int|None): Expected port. path (str|None): Expected path. serial (str|None): Expected serial number. """ parser = commandline.ArgumentParser() parser.add_argument('device', type=commandline.DeviceParser(scheme)) device = parser.parse_args([device_input]).device self.assertEqual(device.scheme, scheme) self.assertEqual(device.username, username) self.assertEqual(device.hostname, hostname) self.assertEqual(device.port, port) self.assertEqual(device.path, path) self.assertEqual(device.serial_number, serial)
def _CheckDeviceParse(self, device_input, scheme, username=None, hostname=None, port=None, path=None): """Checks that parsing a device input gives the expected result. Args: device_input: String input specifying a device. scheme: String expected scheme. username: String expected username or None. hostname: String expected hostname or None. port: Int expected port or None. path: String expected path or None. """ parser = commandline.ArgumentParser() parser.add_argument('device', type=commandline.DeviceParser(scheme)) device = parser.parse_args([device_input]).device self.assertEqual(device.scheme, scheme) self.assertEqual(device.username, username) self.assertEqual(device.hostname, hostname) self.assertEqual(device.port, port) self.assertEqual(device.path, path)
def _CheckDeviceParseFails(self, device_input, schemes=_ALL_SCHEMES): """Checks that parsing a device input fails. Args: device_input: String input specifying a device. schemes: A scheme or list of allowed schemes, by default allows all. """ parser = commandline.ArgumentParser() parser.add_argument('device', type=commandline.DeviceParser(schemes)) with self.OutputCapturer(): self.assertRaises2(SystemExit, parser.parse_args, [device_input])
def AddDeviceArgument(cls, parser, schemes=commandline.DEVICE_SCHEME_SSH, positional=False): """Add a device argument to the parser. This standardizes the help message across all subcommands. Args: parser: The parser to add the device argument to. schemes: List of device schemes or single scheme to allow. positional: Whether it should be a positional or named argument. """ help_strings = [] schemes = list(cros_build_lib.iflatten_instance(schemes)) if commandline.DEVICE_SCHEME_SSH in schemes: help_strings.append( 'Target a device with [user@]hostname[:port]. ' 'IPv4/IPv6 addresses are allowed, but IPv6 must ' 'use brackets (e.g. [::1]).') if commandline.DEVICE_SCHEME_USB in schemes: help_strings.append('Target removable media with usb://[path].') if commandline.DEVICE_SCHEME_SERVO in schemes: help_strings.append( 'Target a servo by port or serial number with ' 'servo:port[:port] or servo:serial:serial-number. ' 'e.g. servo:port:1234 or servo:serial:C1230024192.') if commandline.DEVICE_SCHEME_FILE in schemes: help_strings.append('Target a local file with file://path.') if positional: parser.add_argument('device', type=commandline.DeviceParser(schemes), help=' '.join(help_strings)) else: parser.add_argument('-d', '--device', type=commandline.DeviceParser(schemes), help=' '.join(help_strings))
def testDeployWithPort(self): port = '9999' dut = commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH)( self.DUT_IP + ':' + port) chrome_sdk_run_mock = self.PatchObject(commands.ChromeSDK, 'Run') build_to_deploy = os.path.join('out_%s' % self.BOARD, 'Release') commit_label = 'test' builder = self.GetBuilder() builder.Deploy(dut, build_to_deploy, commit_label) chrome_sdk_run_mock.assert_called_with( ['deploy_chrome', '--build-dir', build_to_deploy, '--to', self.DUT_IP, '--force', '--port', port], run_args=self.log_output_args)
def AddDeviceArgument(cls, parser, schemes=commandline.DEVICE_SCHEME_SSH): """Add a device argument to the parser. This standardizes the help message across all subcommands. Args: parser: The parser to add the device argument to. schemes: List of device schemes or single scheme to allow. """ help_strings = [] schemes = list(cros_build_lib.iflatten_instance(schemes)) if commandline.DEVICE_SCHEME_SSH in schemes: help_strings.append('Target a device with [user@]hostname[:port].') if commandline.DEVICE_SCHEME_USB in schemes: help_strings.append('Target removable media with usb://[path].') if commandline.DEVICE_SCHEME_FILE in schemes: help_strings.append('Target a local file with file://path.') parser.add_argument('device', type=commandline.DeviceParser(schemes), help=' '.join(help_strings))
class TestAutotestEvaluator(cros_test_lib.MockTempDirTestCase): """Tests AutotestEvaluator class.""" BOARD = 'samus' TEST_NAME = 'graphics_WebGLAquarium' METRIC = 'avg_fps_1000_fishes/summary/value' REPORT_FILE = 'reports.json' REMOTE_REPORT_FILE = '%s/results/default/%s/results/results-chart.json' % ( autotest_evaluator.AutotestEvaluator.AUTOTEST_BASE, TEST_NAME) DUT_IP = '192.168.1.1' DUT = commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH)(DUT_IP) TEST_TARGET = '%s/tests/%s/control' % ( autotest_evaluator.AutotestEvaluator.AUTOTEST_BASE, TEST_NAME) AQUARIUM_REPORT_TEMPLATE = """ {"avg_fps_1000_fishes": { "summary": { "units": "fps", "type": "scalar", "value": %s, "improvement_direction": "up" } } }""" BUILD_LABEL = 'base' AUTOTEST_CLIENT = autotest_evaluator.AutotestEvaluator.AUTOTEST_CLIENT TEST_THAT_COMMAND = ['test_that', '-b', BOARD, '--fast', '--args', 'local=True', DUT_IP, TEST_NAME] def setUp(self): self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=False) # Sets up default options and evaluator object. self.options = cros_test_lib.EasyAttr( base_dir=self.tempdir, board=self.BOARD, test_name=self.TEST_NAME, metric=self.METRIC, metric_take_average=False, reuse_eval=True, chromium_dir=None, cros_dir=None, eval_passing_only=False) self.evaluator = autotest_evaluator.AutotestEvaluator(self.options) def PrepareWebglAquariumReports(self, scores): """Prepares graphics_WebGLAquarium reports. It is a simplified version. What test cares is "avg_fps_1000_fishes/summary/value". It can produces multiple reports if more than one score is given. Args: scores: List of scores. Returns: A list of file names storing in report directory. """ result = [] num_reports = len(scores) for ith, score in enumerate(scores, start=1): report_file = os.path.join( self.tempdir, 'reports', 'results-chart.%s.%d-%d.json' % (self.BUILD_LABEL, ith, num_reports)) osutils.WriteFile(report_file, self.AQUARIUM_REPORT_TEMPLATE % score) result.append(report_file) return result def UpdateOptionsAndEvaluator(self, options_to_update): """Updates self.options and self.evaluator. Based on updated self.options, it creates a new AutotestEvaluator instance and assigns to self.evaluator. Args: options_to_update: a dict to update self.options. """ self.options.update(options_to_update) self.evaluator = autotest_evaluator.AutotestEvaluator(self.options) def testInit(self): """Tests that AutotestEvaluator() works as expected.""" base_dir = self.tempdir self.assertEqual(base_dir, self.evaluator.base_dir) self.assertEqual(os.path.join(base_dir, 'reports'), self.evaluator.report_base_dir) self.assertTrue(os.path.isdir(self.evaluator.report_base_dir)) self.assertEqual(self.BOARD, self.evaluator.board) self.assertEqual(self.TEST_NAME, self.evaluator.test_name) self.assertEqual(self.METRIC, self.evaluator.metric) self.assertFalse(self.evaluator.metric_take_average) self.assertTrue(self.evaluator.reuse_eval) self.assertEqual(os.path.join(base_dir, 'chromium'), self.evaluator.chromium_dir) # With chromium_dir specified and flip booleans. self.UpdateOptionsAndEvaluator( dict(chromium_dir='/tmp/chromium', reuse_eval=False)) self.assertFalse(self.evaluator.metric_take_average) self.assertFalse(self.evaluator.reuse_eval) self.assertEqual('/tmp/chromium', self.evaluator.chromium_dir) def testInitMissingRequiredArgs(self): """Tests that AE() raises exception when required options are missing.""" options = cros_test_lib.EasyAttr() with self.assertRaises(Exception) as cm: autotest_evaluator.AutotestEvaluator(options) exception_message = str(cm.exception) self.assertIn('Missing command line', exception_message) self.assertIn('AutotestEvaluator', exception_message) for arg in autotest_evaluator.AutotestEvaluator.REQUIRED_ARGS: self.assertIn(arg, exception_message) def testRunTestFromDut(self): """Tests that RunTestFromDut() invokes expected commands.""" rsh_mock = self.StartPatcher(RemoteShScpMock()) rsh_mock.AddCmdResult( ['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0) rsh_mock.AddCmdResult( ['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0) rsh_mock.AddCmdResult( [self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0) rsh_mock.AddCmdResult( [self.REMOTE_REPORT_FILE, self.REPORT_FILE], returncode=0, mock_attr='ScpToLocal') self.assertTrue(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE)) def testRunTestFromDutSanityCheckFail(self): """Tests RunTestFromDut() when autotest control file is missing.""" rsh_mock = self.StartPatcher(RemoteShScpMock()) rsh_mock.AddCmdResult( ['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0) rsh_mock.AddCmdResult( ['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=1) self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE)) def testRunTestFromDutLsSshError(self): """Tests RunTestFromDut() when autotest control file is missing.""" rsh_mock = self.StartPatcher(RemoteShScpMock()) rsh_mock.AddCmdResult( ['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0) rsh_mock.AddCmdResult( ['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=remote_access.SSH_ERROR_CODE) self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE)) def testRunTestFromDutAutotestSshErrorWithEvalPassingOnly(self): """Tests RunTestFromDut() with failed autotest and --eval-passing-only.""" self.UpdateOptionsAndEvaluator(dict(eval_passing_only=True)) rsh_mock = self.StartPatcher(RemoteShScpMock()) rsh_mock.AddCmdResult( ['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0) rsh_mock.AddCmdResult( ['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0) rsh_mock.AddCmdResult( [self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=remote_access.SSH_ERROR_CODE) self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE)) def testRunTestFromDutAutotestFailWithEvalPassingOnly(self): """Tests RunTestFromDut() with failed autotest and --eval-passing-only.""" self.UpdateOptionsAndEvaluator(dict(eval_passing_only=True)) rsh_mock = self.StartPatcher(RemoteShScpMock()) rsh_mock.AddCmdResult( ['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0) rsh_mock.AddCmdResult( ['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0) rsh_mock.AddCmdResult( [self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=1) self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE)) def testRunTestFromDutAutotestFailWithFailsafe(self): """Tests RunTestFromDut() with failed autotest. Even if the autotest fails to run, RunTestFromDut() tries to retrieve report from DUT. """ rsh_mock = self.StartPatcher(RemoteShScpMock()) rsh_mock.AddCmdResult( ['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0) rsh_mock.AddCmdResult( ['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0) rsh_mock.AddCmdResult( [self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=1) rsh_mock.AddCmdResult( [self.REMOTE_REPORT_FILE, self.REPORT_FILE], returncode=0, mock_attr='ScpToLocal') self.assertTrue(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE)) def testRunTestFromDutScpReportFail(self): """Tests RunTestFromDut() when it failed to remote copy report file.""" rsh_mock = self.StartPatcher(RemoteShScpMock()) rsh_mock.AddCmdResult( ['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0) rsh_mock.AddCmdResult( ['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0) rsh_mock.AddCmdResult( [self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0) rsh_mock.AddCmdResult( [self.REMOTE_REPORT_FILE, self.REPORT_FILE], returncode=1, mock_attr='ScpToLocal') self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE)) def testRunTestFromDutAutotestFailWithFailsafeScpReportFail(self): """Tests RunTestFromDut() with autotest failed with --eval-failsafe. Even if the autotest fails to run, with --eval-failsafe set, RunTestFromDut() tries to retrieve report from DUT. This test checks report missing case. """ self.UpdateOptionsAndEvaluator(dict(eval_failsafe=True)) rsh_mock = self.StartPatcher(RemoteShScpMock()) rsh_mock.AddCmdResult( ['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0) rsh_mock.AddCmdResult( ['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0) rsh_mock.AddCmdResult( [self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=1) rsh_mock.AddCmdResult( [self.REMOTE_REPORT_FILE, self.REPORT_FILE], returncode=1, mock_attr='ScpToLocal') self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE)) def GetTestResultPath(self, evaluator): """Returns base path storing test result. Args: evaluator: Evaluator object. Returns: Path where the evaulator stores test results. """ return evaluator.ResolvePathFromChroot(os.path.join( '/tmp', 'test_that_latest', 'results-1-%s' % evaluator.test_name)) def testLookupReportFile(self): """Tests LookupReportFile(). Tests that it invokes expected command and performs path normalization. """ command_mock = self.StartPatcher(cros_test_lib.RunCommandMock()) results_base_path = self.GetTestResultPath(self.evaluator) find_command_result = ( './%s/results/results-chart.json\n' % self.TEST_NAME) command_mock.AddCmdResult( ['find', '.', '-name', 'results-chart.json'], kwargs={'cwd': results_base_path, 'capture_output': True}, output=find_command_result) self.assertEqual( os.path.join(results_base_path, self.TEST_NAME, 'results', 'results-chart.json'), self.evaluator.LookupReportFile()) def testLookupReportFileMissing(self): """Tests LookupReportFile() when the report does not exist.""" command_mock = self.StartPatcher(cros_test_lib.RunCommandMock()) results_base_path = self.GetTestResultPath(self.evaluator) command_mock.AddCmdResult( ['find', '.', '-name', 'results-chart.json'], kwargs={'cwd': results_base_path, 'capture_output': True}, output='') self.assertIsNone(self.evaluator.LookupReportFile()) def WriteTestResult(self, evaluator, score=0): """Writes a test result to evaluator's default location. Args: evaluator: Evaluator object. score: score of the result. Returns: (path to test result file, result file's content) """ result_dir = self.GetTestResultPath(evaluator) osutils.SafeMakedirs(result_dir) result_path = os.path.join(result_dir, evaluator.RESULT_FILENAME) result_content = self.AQUARIUM_REPORT_TEMPLATE % score osutils.WriteFile(result_path, result_content) return (result_path, result_content) def testRunTestFromHost(self): """Tests TestFromHost(). Tests that it invokes expected commands and report file being copied to designated path. """ command_mock = self.StartPatcher(cros_test_lib.RunCommandMock()) self.SkipMaySetupBoard() command_mock.AddCmdResult(self.TEST_THAT_COMMAND, returncode=0) report_path, report_content = self.WriteTestResult(self.evaluator) command_mock.AddCmdResult( ['find', '.', '-name', 'results-chart.json'], output=report_path) # Make sure report file is copied to designated path. target_report_file = os.path.join(self.tempdir, 'stored-results-chart.json') osutils.SafeUnlink(target_report_file) self.assertTrue( self.evaluator.RunTestFromHost(self.DUT, target_report_file)) self.assertExists(target_report_file) self.assertEqual(report_content, osutils.ReadFile(target_report_file)) def testRunTestFromHostTestThatFailWithEvalPassingOnly(self): """Tests TestFromHost() with failed autotest and --eval-passing-only.""" self.UpdateOptionsAndEvaluator(dict(eval_passing_only=True)) command_mock = self.StartPatcher(cros_test_lib.RunCommandMock()) self.SkipMaySetupBoard() command_mock.AddCmdResult(self.TEST_THAT_COMMAND, returncode=1) self.assertFalse(self.evaluator.RunTestFromHost(self.DUT, self.REPORT_FILE)) def testRunTestFromHostTestThatFail(self): """Tests TestFromHost() with failed autotest. It will try evaluating test result. """ command_mock = self.StartPatcher(cros_test_lib.RunCommandMock()) self.SkipMaySetupBoard() # test_that failed. command_mock.AddCmdResult(self.TEST_THAT_COMMAND, returncode=1) # However, report is obtained successfully. report_path, report_content = self.WriteTestResult(self.evaluator) command_mock.AddCmdResult( ['find', '.', '-name', 'results-chart.json'], output=report_path) # Make sure report file is copied to designated path. target_report_file = os.path.join(self.tempdir, 'stored-results-chart.json') osutils.SafeUnlink(target_report_file) self.assertTrue( self.evaluator.RunTestFromHost(self.DUT, target_report_file)) self.assertExists(target_report_file) self.assertEqual(report_content, osutils.ReadFile(target_report_file)) def testRunTestFromHostTestThatFailReportMissing(self): """Tests TestFromHost() with failed autotest and without report.""" command_mock = self.StartPatcher(cros_test_lib.RunCommandMock()) self.SkipMaySetupBoard() # test_that failed. command_mock.AddCmdResult(self.TEST_THAT_COMMAND, returncode=1) # And report file is missing. command_mock.AddCmdResult( ['find', '.', '-name', 'results-chart.json'], output='') self.assertFalse(self.evaluator.RunTestFromHost(self.DUT, self.REPORT_FILE)) def testRunTestFromHostReportFileMissing(self): """Tests TestFromHost() when test report file does not exist.""" command_mock = self.StartPatcher(cros_test_lib.RunCommandMock()) self.SkipMaySetupBoard() command_mock.AddCmdResult(self.TEST_THAT_COMMAND, returncode=0) command_mock.AddCmdResult( ['find', '.', '-name', 'results-chart.json'], output='') self.assertFalse(self.evaluator.RunTestFromHost(self.DUT, self.REPORT_FILE)) def testGetAutotestMetricValue(self): """Tests that GetAutotestMetricValue() extracts score correctly.""" score = 56.73 report_file = self.PrepareWebglAquariumReports([score])[0] self.assertEqual(score, self.evaluator.GetAutotestMetricValue(report_file)) def testGetAutotestMetricValueMetricTakeAverage(self): """Tests that GetAutotestMetricValue() extracts averaged scores.""" # metric_take_average=True self.UpdateOptionsAndEvaluator(dict(metric_take_average=True)) scores = [55, 57, 58] # A report's value is a list of scores. report_file = self.PrepareWebglAquariumReports([scores])[0] self.assertAlmostEqual(56.66, self.evaluator.GetAutotestMetricValue(report_file), delta=0.01) def testEvaluateRunTestFromDut(self): """Tests Evaluate() which runs test from DUT.""" # Mock RunTestFromDut success. rsh_mock = self.StartPatcher(RemoteShScpMock()) rsh_mock.AddCmdResult( ['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0) rsh_mock.AddCmdResult( ['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0) rsh_mock.AddCmdResult( [self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0) # Prepare result for evaluate. score = 56.73 report_file = self.PrepareWebglAquariumReports([score])[0] rsh_mock.AddCmdResult( [self.REMOTE_REPORT_FILE, report_file], returncode=0, kwargs={'check': False}, mock_attr='ScpToLocal') eval_score = self.evaluator.Evaluate(self.DUT, self.BUILD_LABEL) self.assertEqual(1, len(eval_score.values)) self.assertEqual(score, eval_score.values[0]) self.assertEqual(score, eval_score.mean) self.assertEqual(0.0, eval_score.variance) self.assertEqual(0.0, eval_score.std) def testEvaluateTwiceRunTestFromDut(self): """Tests Evaluate() with repeat=2 which runs test from DUT.""" # Mock RunTestFromDut success. rsh_mock = self.StartPatcher(RemoteShScpMock()) rsh_mock.AddCmdResult( ['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0) rsh_mock.AddCmdResult( ['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0) rsh_mock.AddCmdResult( [self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0) # Prepare two results for evaluate. scores = [56, 58] report_files = self.PrepareWebglAquariumReports(scores) for report_file in report_files: rsh_mock.AddCmdResult( [self.REMOTE_REPORT_FILE, report_file], returncode=0, mock_attr='ScpToLocal') eval_score = self.evaluator.Evaluate(self.DUT, self.BUILD_LABEL, repeat=2) self.assertEqual(2, len(eval_score.values)) self.assertEqual(scores[0], eval_score.values[0]) self.assertEqual(scores[1], eval_score.values[1]) self.assertEqual(57, eval_score.mean) self.assertEqual(2.0, eval_score.variance) self.assertAlmostEqual(1.414, eval_score.std, delta=0.01) def SkipMaySetupBoard(self): """Let evaluator.MaySetupBoard() returns True without action. It touches /build/{board} directory inside chroot so that MaySetupBoard() thinks the board is already set up. """ osutils.SafeMakedirs(os.path.join( self.evaluator.cros_dir, 'chroot', 'build', self.evaluator.board)) def testEvaluateFromHost(self): """Tests Evaluate() which runs test from host.""" # Mock RunTestFromDut fail. command_mock = self.StartPatcher(cros_test_lib.RunCommandMock()) command_mock.AddCmdResult( partial_mock.InOrder(['rm', '-f', self.REMOTE_REPORT_FILE]), returncode=0) command_mock.AddCmdResult( partial_mock.InOrder([self.AUTOTEST_CLIENT, self.TEST_TARGET]), returncode=1) self.SkipMaySetupBoard() # Mock RunTestFromHost success. command_mock.AddCmdResult(self.TEST_THAT_COMMAND, returncode=0) # Mock 'find' and returns a result file for verify. score = 59.9 report_file_in_chroot, _ = self.WriteTestResult(self.evaluator, score) command_mock.AddCmdResult( ['find', '.', '-name', 'results-chart.json'], output=report_file_in_chroot) eval_score = self.evaluator.Evaluate(self.DUT, self.BUILD_LABEL) self.assertEqual(1, len(eval_score.values)) self.assertEqual(score, eval_score.values[0]) self.assertEqual(score, eval_score.mean) self.assertEqual(0.0, eval_score.variance) self.assertEqual(0.0, eval_score.std) def testCheckLastEvaluate(self): """Tests CheckLastEvaluate(). Test that it extracts score from last evaluation result. """ scores = [56, 58] self.PrepareWebglAquariumReports(scores) eval_score = self.evaluator.CheckLastEvaluate(self.BUILD_LABEL, repeat=2) self.assertEqual(2, len(eval_score.values)) self.assertEqual(scores[0], eval_score.values[0]) self.assertEqual(scores[1], eval_score.values[1]) self.assertEqual(57, eval_score.mean) self.assertEqual(2.0, eval_score.variance) self.assertAlmostEqual(1.414, eval_score.std, delta=0.01) def testCheckLastEvaluateDifferentLabel(self): """Tests that CheckLastEvaluate() failed to extracts score.""" scores = [56, 58] self.PrepareWebglAquariumReports(scores) eval_score = self.evaluator.CheckLastEvaluate('different_build', repeat=2) self.assertEqual(0, len(eval_score)) def testCheckLastEvaluateFlagUnset(self): """Tests CheckLastEvaluate() when "reuse_eval" option is unset. Tests that it always returns empty score when "reuse_eval" option is unset. """ # 'reuse_eval' set to False. self.UpdateOptionsAndEvaluator(dict(reuse_eval=False)) scores = [56, 58] self.PrepareWebglAquariumReports(scores) eval_score = self.evaluator.CheckLastEvaluate(self.BUILD_LABEL, repeat=2) self.assertEqual(0, len(eval_score)) def CreateCommandMockForRepo(self, cwd): """Creates a command mock and add commands "repo init" "repo sync". Args: cwd: Directory for running "repo init". Returns: command_mock object. """ command_mock = self.StartPatcher(cros_test_lib.RunCommandMock()) command_mock.AddCmdResult( ['repo', 'init', '--manifest-url', 'https://chromium.googlesource.com/chromiumos/manifest.git', '--repo-url', 'https://chromium.googlesource.com/external/repo.git'], kwargs={'cwd': cwd}, side_effect=repo_util_unittest.RepoInitSideEffects) command_mock.AddCmdResult( [repo_util_unittest.RepoCmdPath(cwd), 'sync', '--jobs', '8'], kwargs={'cwd': cwd}) return command_mock def testSetupCrosRepo(self): """Tests SetupCrosRepo() by verifying commands it emits.""" unused_command_mock = self.CreateCommandMockForRepo(self.evaluator.cros_dir) self.evaluator.SetupCrosRepo() def testMaySetupBoardAlreadyDone(self): """Tests MaySetupBoard() that board is already set.""" # mkdir board path inside chroot. self.SkipMaySetupBoard() self.assertTrue(self.evaluator.MaySetupBoard()) def testMaySetupBoard(self): """Tests MaySetupBoard().""" command_mock = self.CreateCommandMockForRepo(self.evaluator.cros_dir) kwargs_run_chroot = { 'enter_chroot': True, 'chroot_args': ['--chrome_root', self.evaluator.chromium_dir, '--no-ns-pid'], 'cwd': self.evaluator.cros_dir} command_mock.AddCmdResult( ['setup_board', '--board', self.BOARD], kwargs=kwargs_run_chroot) command_mock.AddCmdResult( ['./build_packages', '--board', self.BOARD], kwargs=kwargs_run_chroot) self.assertTrue(self.evaluator.MaySetupBoard()) def testMaySetupBoardBuildPackageFailed(self): """Tests MaySetupBoard().""" command_mock = self.CreateCommandMockForRepo(self.evaluator.cros_dir) kwargs_run_chroot = { 'enter_chroot': True, 'chroot_args': ['--chrome_root', self.evaluator.chromium_dir, '--no-ns-pid'], 'cwd': self.evaluator.cros_dir} command_mock.AddCmdResult( ['setup_board', '--board', self.BOARD], kwargs=kwargs_run_chroot) command_mock.AddCmdResult( ['./build_packages', '--board', self.BOARD], kwargs=kwargs_run_chroot, returncode=1) self.assertFalse(self.evaluator.MaySetupBoard())
class TestGitBisector(cros_test_lib.MockTempDirTestCase): """Tests GitBisector class.""" BOARD = 'samus' TEST_NAME = 'graphics_WebGLAquarium' METRIC = 'avg_fps_1000_fishes/summary/value' REPORT_FILE = 'reports.json' DUT_ADDR = '192.168.1.1' DUT = commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH)(DUT_ADDR) # Be aware that GOOD_COMMIT_INFO and BAD_COMMIT_INFO should be assigned via # copy.deepcopy() as their users are likely to change the content. GOOD_COMMIT_SHA1 = '44af5c9a5505' GOOD_COMMIT_TIMESTAMP = 1486526594 GOOD_COMMIT_SCORE = common.Score([100]) GOOD_COMMIT_INFO = common.CommitInfo(sha1=GOOD_COMMIT_SHA1, timestamp=GOOD_COMMIT_TIMESTAMP, title='good', label='last-known-good ', score=GOOD_COMMIT_SCORE) BAD_COMMIT_SHA1 = '6a163bb66c3e' BAD_COMMIT_TIMESTAMP = 1486530021 BAD_COMMIT_SCORE = common.Score([80]) BAD_COMMIT_INFO = common.CommitInfo(sha1=BAD_COMMIT_SHA1, timestamp=BAD_COMMIT_TIMESTAMP, title='bad', label='last-known-bad ', score=BAD_COMMIT_SCORE) CULPRIT_COMMIT_SHA1 = '12345abcde' CULPRIT_COMMIT_TIMESTAMP = 1486530000 CULPRIT_COMMIT_SCORE = common.Score([81]) CULPRIT_COMMIT_INFO = common.CommitInfo(sha1=CULPRIT_COMMIT_SHA1, timestamp=CULPRIT_COMMIT_TIMESTAMP, title='bad', score=CULPRIT_COMMIT_SCORE) THRESHOLD_SPLITTER = 95 # Score between good and bad, closer to good side. THRESHOLD = 5 # Distance between good score and splitter. REPEAT = 3 def setUp(self): """Sets up test case.""" self.options = cros_test_lib.EasyAttr(base_dir=self.tempdir, board=self.BOARD, reuse_repo=True, good=self.GOOD_COMMIT_SHA1, bad=self.BAD_COMMIT_SHA1, remote=self.DUT, eval_repeat=self.REPEAT, auto_threshold=False, reuse_eval=False, eval_raise_on_error=False, skip_failed_commit=False) self.evaluator = evaluator_module.Evaluator(self.options) self.builder = builder_module.Builder(self.options) self.bisector = git_bisector.GitBisector(self.options, self.builder, self.evaluator) self.repo_dir = os.path.join(self.tempdir, builder_module.Builder.DEFAULT_REPO_DIR) def setDefaultCommitInfo(self): """Sets up default commit info.""" self.bisector.good_commit_info = copy.deepcopy(self.GOOD_COMMIT_INFO) self.bisector.bad_commit_info = copy.deepcopy(self.BAD_COMMIT_INFO) def testInit(self): """Tests GitBisector() to expect default data members.""" self.assertEqual(self.GOOD_COMMIT_SHA1, self.bisector.good_commit) self.assertEqual(self.BAD_COMMIT_SHA1, self.bisector.bad_commit) self.assertEqual(self.DUT_ADDR, self.bisector.remote.raw) self.assertEqual(self.REPEAT, self.bisector.eval_repeat) self.assertEqual(self.builder, self.bisector.builder) self.assertEqual(self.repo_dir, self.bisector.repo_dir) self.assertIsNone(self.bisector.good_commit_info) self.assertIsNone(self.bisector.bad_commit_info) self.assertEqual(0, len(self.bisector.bisect_log)) self.assertIsNone(self.bisector.threshold) self.assertTrue(not self.bisector.current_commit) def testInitMissingRequiredArgs(self): """Tests that GitBisector raises error when missing required arguments.""" options = cros_test_lib.EasyAttr() with self.assertRaises(common.MissingRequiredOptionsException) as cm: git_bisector.GitBisector(options, self.builder, self.evaluator) exception_message = str(cm.exception) self.assertIn('Missing command line', exception_message) self.assertIn('GitBisector', exception_message) for arg in git_bisector.GitBisector.REQUIRED_ARGS: self.assertIn(arg, exception_message) def testCheckCommitFormat(self): """Tests CheckCommitFormat().""" sha1 = '900d900d' self.assertEqual(sha1, git_bisector.GitBisector.CheckCommitFormat(sha1)) not_sha1 = 'bad_sha1' self.assertIsNone(git_bisector.GitBisector.CheckCommitFormat(not_sha1)) def testSetUp(self): """Tests that SetUp() calls builder.SetUp().""" with mock.patch.object(builder_module.Builder, 'SetUp') as builder_mock: self.bisector.SetUp() builder_mock.assert_called_with() def testGit(self): """Tests that Git() invokes git.RunGit() as expected.""" git_mock = self.StartPatcher(GitMock(self.repo_dir)) git_mock.AddRunGitResult(['ls']) self.bisector.Git(['ls']) def testUpdateCurrentCommit(self): """Tests that UpdateCurrentCommit() updates self.current_commit.""" git_mock = self.StartPatcher(GitMock(self.repo_dir)) git_mock.AddRunGitResult( partial_mock.In('show'), output='4fcbdaf6 1493010050 Add "cros bisect" command.\n') self.bisector.UpdateCurrentCommit() current_commit = self.bisector.current_commit self.assertEqual('4fcbdaf6', current_commit.sha1) self.assertEqual(1493010050, current_commit.timestamp) self.assertEqual('Add "cros bisect" command.', current_commit.title) def testUpdateCurrentCommitFail(self): """Tests UpdateCurrentCommit() when 'git show' returns nonzero.""" git_mock = self.StartPatcher(GitMock(self.repo_dir)) git_mock.AddRunGitResult(partial_mock.In('show'), returncode=128) self.bisector.UpdateCurrentCommit() self.assertTrue(not self.bisector.current_commit) def testUpdateCurrentCommitFailUnexpectedOutput(self): """Tests UpdateCurrentCommit() when 'git show' gives unexpected output.""" git_mock = self.StartPatcher(GitMock(self.repo_dir)) git_mock.AddRunGitResult(partial_mock.In('show'), output='Not expected') self.bisector.UpdateCurrentCommit() self.assertTrue(not self.bisector.current_commit) def MockOutBuildDeployEvaluateForSanityCheck(self): """Mocks out BuildDeployEvaluate behavior for SaintyCheck test. It mocks UpdateCurrentCommit() to emit good and bad commits. And mocks CheckLastEvaluate() to return good and bad commit score. """ commit_info_list = [ common.CommitInfo(sha1=self.GOOD_COMMIT_SHA1, title='good', timestamp=self.GOOD_COMMIT_TIMESTAMP), common.CommitInfo(sha1=self.BAD_COMMIT_SHA1, title='bad', timestamp=self.BAD_COMMIT_TIMESTAMP) ] # This mock is problematic. The side effect should modify "self" when # the member method is called. def UpdateCurrentCommitSideEffect(): self.bisector.current_commit = commit_info_list.pop(0) self.PatchObject(git_bisector.GitBisector, 'UpdateCurrentCommit', side_effect=UpdateCurrentCommitSideEffect) self.PatchObject( evaluator_module.Evaluator, 'CheckLastEvaluate', side_effect=[self.GOOD_COMMIT_SCORE, self.BAD_COMMIT_SCORE]) def testGetCommitTimestamp(self): """Tests GetCommitTimeStamp by mocking git.RunGit.""" git_mock = self.StartPatcher(GitMock(self.repo_dir)) # Mock git result for GetCommitTimestamp. git_mock.AddRunGitResult(partial_mock.InOrder( ['show', self.GOOD_COMMIT_SHA1]), output=str(self.GOOD_COMMIT_TIMESTAMP)) self.assertEqual( self.GOOD_COMMIT_TIMESTAMP, self.bisector.GetCommitTimestamp(self.GOOD_COMMIT_SHA1)) def testGetCommitTimestampNotFound(self): """Tests GetCommitTimeStamp when the commit is not found.""" git_mock = self.StartPatcher(GitMock(self.repo_dir)) nonexist_sha1 = '12345' # Mock git result for GetCommitTimestamp. git_mock.AddRunGitResult(partial_mock.InOrder(['show', nonexist_sha1]), output='commit does not exist') self.assertIsNone(self.bisector.GetCommitTimestamp(nonexist_sha1)) def testSanityCheck(self): """Tests SanityCheck(). It tests by mocking out git commands called by SanityCheck(). """ git_mock = self.StartPatcher(GitMock(self.repo_dir)) # Mock git result for DoesCommitExistInRepo. git_mock.AddRunGitResult( partial_mock.InOrder(['rev-list', self.GOOD_COMMIT_SHA1])) git_mock.AddRunGitResult( partial_mock.InOrder(['rev-list', self.BAD_COMMIT_SHA1])) # Mock git result for GetCommitTimestamp. git_mock.AddRunGitResult(partial_mock.InOrder( ['show', self.GOOD_COMMIT_SHA1]), output=str(self.GOOD_COMMIT_TIMESTAMP)) git_mock.AddRunGitResult(partial_mock.InOrder( ['show', self.BAD_COMMIT_SHA1]), output=str(self.BAD_COMMIT_TIMESTAMP)) self.assertTrue(self.bisector.SanityCheck()) def testSanityCheckGoodCommitNotExist(self): """Tests SanityCheck() that good commit does not exist.""" git_mock = self.StartPatcher(GitMock(self.repo_dir)) sync_to_head_mock = self.PatchObject(builder_module.Builder, 'SyncToHead') # Mock git result for DoesCommitExistInRepo to return False. git_mock.AddRunGitResult(partial_mock.InOrder( ['rev-list', self.GOOD_COMMIT_SHA1]), returncode=128) # Mock invalid result for GetCommitTimestamp to return None. git_mock.AddRunGitResult(partial_mock.InOrder( ['show', self.GOOD_COMMIT_SHA1]), output='invalid commit') git_mock.AddRunGitResult(partial_mock.InOrder( ['show', self.BAD_COMMIT_SHA1]), output='invalid commit') self.assertFalse(self.bisector.SanityCheck()) # SyncToHead() called because DoesCommitExistInRepo() returns False for # good commit. sync_to_head_mock.assert_called() def testSanityCheckSyncToHeadWorks(self): """Tests SanityCheck() that good and bad commit do not exist. As good and bad commit do not exist, it calls SyncToHead(). """ git_mock = self.StartPatcher(GitMock(self.repo_dir)) sync_to_head_mock = self.PatchObject(builder_module.Builder, 'SyncToHead') # Mock git result for DoesCommitExistInRepo to return False. git_mock.AddRunGitResult(partial_mock.InOrder( ['rev-list', self.GOOD_COMMIT_SHA1]), returncode=128) # Mock git result for GetCommitTimestamp. git_mock.AddRunGitResult(partial_mock.InOrder( ['show', self.GOOD_COMMIT_SHA1]), output=str(self.GOOD_COMMIT_TIMESTAMP)) git_mock.AddRunGitResult(partial_mock.InOrder( ['show', self.BAD_COMMIT_SHA1]), output=str(self.BAD_COMMIT_TIMESTAMP)) self.assertTrue(self.bisector.SanityCheck()) # After SyncToHead, found both bad and good commit. sync_to_head_mock.assert_called() def testSanityCheckWrongTimeOrder(self): """Tests SanityCheck() that good and bad commit have wrong time order.""" git_mock = self.StartPatcher(GitMock(self.repo_dir)) # Mock git result for DoesCommitExistInRepo. git_mock.AddRunGitResult( partial_mock.InOrder(['rev-list', self.GOOD_COMMIT_SHA1])) git_mock.AddRunGitResult( partial_mock.InOrder(['rev-list', self.BAD_COMMIT_SHA1])) # Mock git result for GetCommitTimestamp, but swap timestamp. git_mock.AddRunGitResult(partial_mock.InOrder( ['show', self.GOOD_COMMIT_SHA1]), output=str(self.BAD_COMMIT_TIMESTAMP)) git_mock.AddRunGitResult(partial_mock.InOrder( ['show', self.BAD_COMMIT_SHA1]), output=str(self.GOOD_COMMIT_TIMESTAMP)) self.assertFalse(self.bisector.SanityCheck()) def testObtainBisectBoundaryScoreImpl(self): """Tests ObtainBisectBoundaryScoreImpl().""" git_mock = self.StartPatcher(GitMock(self.repo_dir)) git_mock.AddRunGitResult(['checkout', self.GOOD_COMMIT_SHA1]) git_mock.AddRunGitResult(['checkout', self.BAD_COMMIT_SHA1]) build_deploy_eval_mock = self.PatchObject( git_bisector.GitBisector, 'BuildDeployEval', side_effect=[self.GOOD_COMMIT_SCORE, self.BAD_COMMIT_SCORE]) self.assertEqual(self.GOOD_COMMIT_SCORE, self.bisector.ObtainBisectBoundaryScoreImpl(True)) self.assertEqual(self.BAD_COMMIT_SCORE, self.bisector.ObtainBisectBoundaryScoreImpl(False)) self.assertEqual([ mock.call(self.repo_dir, ['checkout', self.GOOD_COMMIT_SHA1], error_code_ok=True), mock.call(self.repo_dir, ['checkout', self.BAD_COMMIT_SHA1], error_code_ok=True) ], git_mock.call_args_list) build_deploy_eval_mock.assert_called() def testObtainBisectBoundaryScore(self): """Tests ObtainBisectBoundaryScore(). Normal case.""" def MockedObtainBisectBoundaryScoreImpl(good_side): if good_side: self.bisector.current_commit = copy.deepcopy( self.GOOD_COMMIT_INFO) else: self.bisector.current_commit = copy.deepcopy( self.BAD_COMMIT_INFO) return self.bisector.current_commit.score obtain_score_mock = self.PatchObject( git_bisector.GitBisector, 'ObtainBisectBoundaryScoreImpl', side_effect=MockedObtainBisectBoundaryScoreImpl) self.assertTrue(self.bisector.ObtainBisectBoundaryScore()) self.assertEqual(self.GOOD_COMMIT_SCORE, self.bisector.good_commit_info.score) self.assertEqual('last-known-good ', self.bisector.good_commit_info.label) self.assertEqual(self.BAD_COMMIT_SCORE, self.bisector.bad_commit_info.score) self.assertEqual('last-known-bad ', self.bisector.bad_commit_info.label) obtain_score_mock.assert_called() def testObtainBisectBoundaryScoreBadScoreUnavailable(self): """Tests ObtainBisectBoundaryScore(). Bad score unavailable.""" def UpdateCurrentCommitSideEffect(good_side): if good_side: self.bisector.current_commit = copy.deepcopy( self.GOOD_COMMIT_INFO) else: self.bisector.current_commit = copy.deepcopy( self.BAD_COMMIT_INFO) self.bisector.current_commit.score = None return self.bisector.current_commit.score obtain_score_mock = self.PatchObject( git_bisector.GitBisector, 'ObtainBisectBoundaryScoreImpl', side_effect=UpdateCurrentCommitSideEffect) self.assertFalse(self.bisector.ObtainBisectBoundaryScore()) self.assertEqual(self.GOOD_COMMIT_SCORE, self.bisector.good_commit_info.score) self.assertEqual('last-known-good ', self.bisector.good_commit_info.label) self.assertIsNone(self.bisector.bad_commit_info.score) self.assertEqual('last-known-bad ', self.bisector.bad_commit_info.label) obtain_score_mock.assert_called() def testGetThresholdFromUser(self): """Tests GetThresholdFromUser().""" logging.notice('testGetThresholdFromUser') self.setDefaultCommitInfo() input_mock = self.PatchObject(cros_build_lib, 'GetInput', return_value=self.THRESHOLD_SPLITTER) self.assertTrue(self.bisector.GetThresholdFromUser()) self.assertEqual(self.THRESHOLD, self.bisector.threshold) input_mock.assert_called() def testGetThresholdFromUserAutoPick(self): """Tests GetThresholdFromUser().""" self.setDefaultCommitInfo() self.bisector.auto_threshold = True self.assertTrue(self.bisector.GetThresholdFromUser()) self.assertEqual(10, self.bisector.threshold) def testGetThresholdFromUserOutOfBoundFail(self): """Tests GetThresholdFromUser() with out-of-bound input.""" self.setDefaultCommitInfo() input_mock = self.PatchObject(cros_build_lib, 'GetInput', side_effect=['0', '1000', '-10']) self.assertFalse(self.bisector.GetThresholdFromUser()) self.assertIsNone(self.bisector.threshold) self.assertEqual(3, input_mock.call_count) def testGetThresholdFromUserRetrySuccess(self): """Tests GetThresholdFromUser() with retry.""" self.setDefaultCommitInfo() input_mock = self.PatchObject( cros_build_lib, 'GetInput', side_effect=['not_a_number', '1000', self.THRESHOLD_SPLITTER]) self.assertTrue(self.bisector.GetThresholdFromUser()) self.assertEqual(self.THRESHOLD, self.bisector.threshold) self.assertEqual(3, input_mock.call_count) def testBuildDeploy(self): """Tests BuildDeploy().""" # Inject this as UpdateCurrentCommit's side effect. self.bisector.current_commit = copy.deepcopy(self.GOOD_COMMIT_INFO) build_to_deploy = '/build/to/deploy' build_mock = self.PatchObject(builder_module.Builder, 'Build', return_value=build_to_deploy) deploy_mock = self.PatchObject(builder_module.Builder, 'Deploy') self.assertTrue(self.bisector.BuildDeploy()) build_label = self.GOOD_COMMIT_INFO.sha1 build_mock.assert_called_with(build_label) deploy_mock.assert_called_with(self.DUT, build_to_deploy, build_label) def testBuildDeployBuildFail(self): """Tests BuildDeploy() with Build() failure.""" # Inject this as UpdateCurrentCommit's side effect. self.bisector.current_commit = copy.deepcopy(self.GOOD_COMMIT_INFO) # Build() failed. build_mock = self.PatchObject(builder_module.Builder, 'Build', return_value=None) deploy_mock = self.PatchObject(builder_module.Builder, 'Deploy') self.assertFalse(self.bisector.BuildDeploy()) build_mock.assert_called() deploy_mock.assert_not_called() def PatchObjectForBuildDeployEval(self): """Returns a dict of patch objects. The patch objects are to mock: git_bisector.UpdateCurrentCommit() evaluator.CheckLastEvaluate() git_bisector.BuildDeploy() evaluator.Evaluate() """ return { 'UpdateCurrentCommit': self.PatchObject(git_bisector.GitBisector, 'UpdateCurrentCommit'), 'CheckLastEvaluate': self.PatchObject(evaluator_module.Evaluator, 'CheckLastEvaluate'), 'BuildDeploy': self.PatchObject(git_bisector.GitBisector, 'BuildDeploy', return_value=True), 'Evaluate': self.PatchObject(evaluator_module.Evaluator, 'Evaluate') } def testBuildDeployEvalShortcutCheckLastEvaluate(self): """Tests BuildDeployEval() with CheckLastEvaluate() found last score.""" mocks = self.PatchObjectForBuildDeployEval() # Inject this as UpdateCurrentCommit's side effect. self.bisector.current_commit = copy.deepcopy(self.GOOD_COMMIT_INFO) mocks['CheckLastEvaluate'].return_value = self.GOOD_COMMIT_SCORE score = self.bisector.BuildDeployEval() self.assertEqual(self.GOOD_COMMIT_SCORE, score) self.assertEqual(self.GOOD_COMMIT_SCORE, self.bisector.current_commit.score) for method_called in ['UpdateCurrentCommit', 'CheckLastEvaluate']: mocks[method_called].assert_called() mocks['CheckLastEvaluate'].assert_called_with(self.GOOD_COMMIT_SHA1, self.REPEAT) for method_called in ['BuildDeploy', 'Evaluate']: mocks[method_called].assert_not_called() def AssertBuildDeployEvalMocksAllCalled(self, mocks): for method_called in [ 'UpdateCurrentCommit', 'CheckLastEvaluate', 'BuildDeploy', 'Evaluate' ]: mocks[method_called].assert_called() mocks['CheckLastEvaluate'].assert_called_with(self.GOOD_COMMIT_SHA1, self.REPEAT) mocks['Evaluate'].assert_called_with(self.DUT, self.GOOD_COMMIT_SHA1, self.REPEAT) def testBuildDeployEvalNoCheckLastEvaluate(self): """Tests BuildDeployEval() without last score.""" mocks = self.PatchObjectForBuildDeployEval() # Inject this as UpdateCurrentCommit's side effect. self.bisector.current_commit = copy.deepcopy(self.GOOD_COMMIT_INFO) mocks['CheckLastEvaluate'].return_value = common.Score() mocks['Evaluate'].return_value = self.GOOD_COMMIT_SCORE self.assertEqual(self.GOOD_COMMIT_SCORE, self.bisector.BuildDeployEval()) self.assertEqual(self.GOOD_COMMIT_SCORE, self.bisector.current_commit.score) self.AssertBuildDeployEvalMocksAllCalled(mocks) def testBuildDeployEvalBuildFail(self): """Tests BuildDeployEval() with BuildDeploy failure.""" mocks = self.PatchObjectForBuildDeployEval() # Inject this as UpdateCurrentCommit's side effect. self.bisector.current_commit = copy.deepcopy(self.GOOD_COMMIT_INFO) mocks['CheckLastEvaluate'].return_value = common.Score() mocks['BuildDeploy'].return_value = False score = self.bisector.BuildDeployEval() self.assertFalse(score) self.assertFalse(self.bisector.current_commit.score) for method_called in [ 'UpdateCurrentCommit', 'CheckLastEvaluate', 'BuildDeploy' ]: mocks[method_called].assert_called() mocks['CheckLastEvaluate'].assert_called_with(self.GOOD_COMMIT_SHA1, self.REPEAT) mocks['Evaluate'].assert_not_called() def testBuildDeployEvalNoCheckLastEvaluateSpecifyEvalLabel(self): """Tests BuildDeployEval() with eval_label specified.""" mocks = self.PatchObjectForBuildDeployEval() # Inject this as UpdateCurrentCommit's side effect. self.bisector.current_commit = copy.deepcopy(self.GOOD_COMMIT_INFO) mocks['CheckLastEvaluate'].return_value = common.Score() mocks['Evaluate'].return_value = self.GOOD_COMMIT_SCORE eval_label = 'customized_label' self.assertEqual(self.GOOD_COMMIT_SCORE, self.bisector.BuildDeployEval(eval_label=eval_label)) self.assertEqual(self.GOOD_COMMIT_SCORE, self.bisector.current_commit.score) for method_called in [ 'UpdateCurrentCommit', 'CheckLastEvaluate', 'BuildDeploy', 'Evaluate' ]: mocks[method_called].assert_called() # Use given label instead of SHA1 as eval label. mocks['CheckLastEvaluate'].assert_called_with(eval_label, self.REPEAT) mocks['Evaluate'].assert_called_with(self.DUT, eval_label, self.REPEAT) @staticmethod def _DummyMethod(): """A dummy method for test to call and mock.""" def testBuildDeployEvalNoCheckLastEvaluateSpecifyBuildDeploy(self): """Tests BuildDeployEval() with customize_build_deploy specified.""" mocks = self.PatchObjectForBuildDeployEval() # Inject this as UpdateCurrentCommit's side effect. self.bisector.current_commit = copy.deepcopy(self.GOOD_COMMIT_INFO) mocks['CheckLastEvaluate'].return_value = common.Score() mocks['Evaluate'].return_value = self.GOOD_COMMIT_SCORE dummy_method = self.PatchObject(TestGitBisector, '_DummyMethod', return_value=True) eval_label = 'customized_label' self.assertEqual( self.GOOD_COMMIT_SCORE, self.bisector.BuildDeployEval( eval_label=eval_label, customize_build_deploy=TestGitBisector._DummyMethod)) self.assertEqual(self.GOOD_COMMIT_SCORE, self.bisector.current_commit.score) for method_called in [ 'UpdateCurrentCommit', 'CheckLastEvaluate', 'Evaluate' ]: mocks[method_called].assert_called() mocks['BuildDeploy'].assert_not_called() dummy_method.assert_called() # Use given label instead of SHA1 as eval label. mocks['CheckLastEvaluate'].assert_called_with(eval_label, self.REPEAT) mocks['Evaluate'].assert_called_with(self.DUT, eval_label, self.REPEAT) def testBuildDeployEvalRaiseNoScore(self): """Tests BuildDeployEval() without score with eval_raise_on_error set.""" self.options.eval_raise_on_error = True self.bisector = git_bisector.GitBisector(self.options, self.builder, self.evaluator) mocks = self.PatchObjectForBuildDeployEval() # Inject this as UpdateCurrentCommit's side effect. self.bisector.current_commit = copy.deepcopy(self.GOOD_COMMIT_INFO) mocks['CheckLastEvaluate'].return_value = common.Score() mocks['Evaluate'].return_value = common.Score() self.assertRaises(Exception, self.bisector.BuildDeployEval) self.assertFalse(self.bisector.current_commit.score) self.AssertBuildDeployEvalMocksAllCalled(mocks) def testBuildDeployEvalSuppressRaiseNoScore(self): """Tests BuildDeployEval() without score with eval_raise_on_error unset.""" mocks = self.PatchObjectForBuildDeployEval() # Inject this as UpdateCurrentCommit's side effect. self.bisector.current_commit = copy.deepcopy(self.GOOD_COMMIT_INFO) mocks['CheckLastEvaluate'].return_value = common.Score() mocks['Evaluate'].return_value = common.Score() self.assertFalse(self.bisector.BuildDeployEval()) self.assertFalse(self.bisector.current_commit.score) self.AssertBuildDeployEvalMocksAllCalled(mocks) def testLabelBuild(self): """Tests LabelBuild().""" # Inject good(100), bad(80) score and threshold. self.setDefaultCommitInfo() self.bisector.threshold = self.THRESHOLD good = 'good' bad = 'bad' # Worse than given bad score. self.assertEqual(bad, self.bisector.LabelBuild(common.Score([70]))) # Better than bad score, but not good enough. self.assertEqual(bad, self.bisector.LabelBuild(common.Score([85]))) self.assertEqual(bad, self.bisector.LabelBuild(common.Score([90]))) # On the margin. self.assertEqual(good, self.bisector.LabelBuild(common.Score([95]))) # Better than the margin. self.assertEqual(good, self.bisector.LabelBuild(common.Score([98]))) # Better than given good score. self.assertEqual(good, self.bisector.LabelBuild(common.Score([110]))) # No score, default bad. self.assertEqual(bad, self.bisector.LabelBuild(None)) self.assertEqual(bad, self.bisector.LabelBuild(common.Score())) def testLabelBuildSkipNoScore(self): """Tests LabelBuild().""" self.options.skip_failed_commit = True self.bisector = git_bisector.GitBisector(self.options, self.builder, self.evaluator) # Inject good(100), bad(80) score and threshold. self.setDefaultCommitInfo() self.bisector.threshold = self.THRESHOLD # No score, skip. self.assertEqual('skip', self.bisector.LabelBuild(None)) self.assertEqual('skip', self.bisector.LabelBuild(common.Score())) def testLabelBuildLowerIsBetter(self): """Tests LabelBuild() in lower-is-better condition.""" # Reverse good(80) and bad(100) score (lower is better), same threshold. self.bisector.good_commit_info = copy.deepcopy(self.BAD_COMMIT_INFO) self.bisector.bad_commit_info = copy.deepcopy(self.GOOD_COMMIT_INFO) self.bisector.threshold = self.THRESHOLD good = 'good' bad = 'bad' # Better than given good score. self.assertEqual(good, self.bisector.LabelBuild(common.Score([70]))) # Worse than good score, but still better than margin. self.assertEqual(good, self.bisector.LabelBuild(common.Score([80]))) self.assertEqual(good, self.bisector.LabelBuild(common.Score([82]))) # On the margin. self.assertEqual(good, self.bisector.LabelBuild(common.Score([85]))) # Worse than the margin. self.assertEqual(bad, self.bisector.LabelBuild(common.Score([88]))) self.assertEqual(bad, self.bisector.LabelBuild(common.Score([90]))) self.assertEqual(bad, self.bisector.LabelBuild(common.Score([95]))) # Worse than given bad score. self.assertEqual(bad, self.bisector.LabelBuild(common.Score([110]))) def testGitBisect(self): """Tests GitBisect().""" git_mock = self.PatchObject(git_bisector.GitBisector, 'Git', return_value=cros_build_lib.CommandResult( cmd=['git', 'bisect', 'reset'], output='We are not bisecting.', returncode=0)) result, done = self.bisector.GitBisect(['reset']) git_mock.assert_called_with(['bisect', 'reset']) self.assertFalse(done) self.assertEqual('We are not bisecting.', result.output) self.assertListEqual(['git', 'bisect', 'reset'], result.cmd) def testGitBisectDone(self): """Tests GitBisect() when culprit is found.""" git_mock = self.PatchObject( git_bisector.GitBisector, 'Git', return_value=cros_build_lib.CommandResult( cmd=['git', 'bisect', 'bad'], output='abcedf is the first bad commit\ncommit abcdef', returncode=0)) result, done = self.bisector.GitBisect(['bad']) git_mock.assert_called_with(['bisect', 'bad']) self.assertTrue(done) self.assertListEqual(['git', 'bisect', 'bad'], result.cmd) def testRun(self): """Tests Run().""" bisector_mock = self.StartPatcher(GitBisectorMock()) bisector_mock.good_commit_info = copy.deepcopy(self.GOOD_COMMIT_INFO) bisector_mock.bad_commit_info = copy.deepcopy(self.BAD_COMMIT_INFO) bisector_mock.threshold = self.THRESHOLD bisector_mock.git_bisect_args_result = [ (['reset'], (None, False)), (['start'], (None, False)), (['bad', self.BAD_COMMIT_SHA1], (None, False)), (['good', self.GOOD_COMMIT_SHA1], (None, False)), (['bad'], (cros_build_lib.CommandResult( cmd=['git', 'bisect', 'bad'], output='%s is the first bad commit\ncommit %s' % (self.CULPRIT_COMMIT_SHA1, self.CULPRIT_COMMIT_SHA1), returncode=0), True)), # bisect bad (assume it found the first bad commit). (['log'], (None, False)), (['reset'], (None, False)) ] bisector_mock.build_deploy_eval_current_commit = [ self.CULPRIT_COMMIT_INFO ] bisector_mock.build_deploy_eval_result = [self.CULPRIT_COMMIT_SCORE] bisector_mock.label_build_result = ['bad'] run_result = self.bisector.Run() self.assertTrue(bisector_mock.patched['PrepareBisect'].called) self.assertEqual(7, bisector_mock.patched['GitBisect'].call_count) self.assertTrue(bisector_mock.patched['BuildDeployEval'].called) self.assertTrue(bisector_mock.patched['LabelBuild'].called) self.assertEqual(self.CULPRIT_COMMIT_SHA1, run_result)
def AddParser(cls, parser): super(BisectCommand, cls).AddParser(parser) parser.add_argument( '-G', '--good', metavar='good_commit', required=True, type=GoodBadCommitType, help= 'A good revision (commit SHA) or CrOS image (e.g. R60-9531.0.0) ' 'to start bisection. Should be earlier than the bad revision.') parser.add_argument( '-B', '--bad', metavar='bad_commit', required=True, type=GoodBadCommitType, help='A bad revision (commit SHA) or CrOS image (e.g. R60-9532.0.0) ' 'to start bisection. Should be later than the good revision.') parser.add_argument( '-r', '--remote', metavar='IP address / hostname', required=True, type=commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH), help= 'The IP address/hostname of remote device which is going to test.') parser.add_argument( '-b', '--board', metavar='board_name', help= 'The board name of the ChromeOS device under test. Obtained from ' 'DUT if not assigned.') parser.add_argument( '--repo', default='chromium', choices=cls.REPO, help= 'Repository to bisect. Now it supports chormium git repository. ' 'Later it will support catapult git repository and even ChromeOS ' 'repositories.') parser.add_argument( '--evaluator', default='autotest', choices=sorted(cls.EVALUATOR), help= 'Evaluator used to determine if a commit is good or bad. Now it ' 'supports autotest. Later it will support telemetry.') parser.add_argument( '-d', '--base-dir', required=True, type='path', help= 'Base directory to store repo and results. Existing checkout can ' 'be used, see --reuse-repo.') parser.add_argument( '--chromium-dir', type='path', help='If specified, use it as chromium repository. Otherwise, use ' '"[base-dir]/chromium".') parser.add_argument( '--cros-dir', type='path', help='If specified, use it to enter CrOS chroot environment. ' 'Otherwise, use "[base-dir]/cros".') parser.add_argument( '--build-dir', type='path', help='If specified, use it to store build results. Otherwise, use ' '"[base-dir]/build".') parser.add_argument('--reuse-repo', action='store_true', help='If set, reuse repository if it exists.') parser.add_argument('--reuse-build', action='store_true', help='If set, reuse build if available.') parser.add_argument( '--reuse-eval', action='store_true', help='If set, reuse evaluation result if available.') parser.add_argument( '--reuse', action='store_true', help='If set, set reuse-repo, reuse-build, reuse-eval') parser.add_argument('--no-archive-build', dest='archive_build', default=True, action='store_false', help='If set, do not archive the build.') parser.add_argument( '--auto-threshold', action='store_true', help='If set, set threshold in the middle between good and bad ' 'score instead of prompting user to set it.') parser.add_argument('--test-name', help='Test name to run against') parser.add_argument( '--metric', help= 'Metric of test result to look at. For autotest, metric name is ' 'hierarchical, divided by "/". E.g., "avg_fps_1000_fishes/summary/' 'value" for graphics_WebGLAquarium, or "charts/Total/Score/value" ' 'for telemetry_Benchmarks.octane autotest.') parser.add_argument( '--metric-take-average', action='store_true', help='If set, treat metric value as a list of numbers and calculate ' 'arithmetic average of them.') parser.add_argument( '--eval-repeat', type=int, default=3, help='Repeat evaluate commit for N times to calculate mean and ' 'standard deviation. Default 3.') parser.add_argument( '--cros-flash-retry', type=int, default=3, help='Max #retry for "cros flash" command. Default 3.') parser.add_argument( '--cros-flash-sleep', type=int, default=60, help= 'Wait #seconds before retry. See cros-flash-backoff for detail.') parser.add_argument( '--cros-flash-backoff', type=float, default=1, help= 'Backoff factor for sleep between "cros flash" retry. If backoff ' 'factor is 1, sleep_duration = sleep * num_retry. Otherwise, ' 'sleep_duration = sleep * (backoff_factor) ** (num_retry - 1)') parser.add_argument( '--eval-passing-only', action='store_true', help='If set, use existing perf result only if test was passing.') parser.add_argument( '--eval-raise-on-error', action='store_true', help='If set, stop bisect if it fails to evaluate a commit. ' 'Otherwise, the failed commit is labeled as bad.') parser.add_argument( '--skip-failed-commit', action='store_true', help= 'If set, skip the failed commit (build failed / no result) rather ' 'than marking it as bad commit.')
class TestSimpleChromeBuilder(cros_test_lib.MockTempDirTestCase): """Tests AutotestEvaluator class.""" BOARD = 'samus' DUT_IP = '192.168.1.1' DUT = commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH)(DUT_IP) def setUp(self): self.default_chromium_dir = os.path.join(self.tempdir, 'chromium') self.default_repo_dir = os.path.join(self.tempdir, 'chromium', 'src') self.default_archive_base = os.path.join(self.tempdir, 'build') self.gclient_path = os.path.join(self.tempdir, 'gclient') self.log_output_args = {'log_output': True} def GetBuilder(self, base_dir=None, board=None, reuse_repo=True, chromium_dir=None, build_dir=None, archive_build=True, reuse_build=True): """Obtains a SimpleChromeBuilder instance. Args: base_dir: Base directory. Default self.tempdir. board: Board name. Default self.BOARD. reuse_repo: True to reuse repo. chromium_dir: Optional. If specified, use the chromium repo the path points to. build_dir: Optional. Store build result to it if specified. archive_build: True to archive build. reuse_build: True to reuse previous build. Returns: A SimpleChromeBuilder instance. """ if base_dir is None: base_dir = self.tempdir if board is None: board = self.BOARD options = cros_test_lib.EasyAttr( base_dir=base_dir, board=board, reuse_repo=reuse_repo, chromium_dir=chromium_dir, build_dir=build_dir, archive_build=archive_build, reuse_build=reuse_build) builder = simple_chrome_builder.SimpleChromeBuilder(options) # Override gclient path. builder.gclient = self.gclient_path return builder def testInit(self): builder = self.GetBuilder() base_dir = self.tempdir self.assertEqual(base_dir, builder.base_dir) self.assertEqual(self.default_chromium_dir, builder.chromium_dir) self.assertEqual(self.default_repo_dir, builder.repo_dir) self.assertTrue(builder.reuse_repo) self.assertTrue(builder.reuse_build) self.assertTrue(builder.archive_build) self.assertEqual(self.default_archive_base, builder.archive_base) self.assertTrue(os.path.isdir(builder.archive_base)) self.assertDictEqual(self.log_output_args, builder.log_output_args) def testInitMissingRequiredArgs(self): options = cros_test_lib.EasyAttr() with self.assertRaises(Exception) as cm: simple_chrome_builder.SimpleChromeBuilder(options) exception_message = str(cm.exception) self.assertIn('Missing command line', exception_message) self.assertIn('SimpleChromeBuilder', exception_message) for arg in simple_chrome_builder.SimpleChromeBuilder.REQUIRED_ARGS: self.assertIn(arg, exception_message) def testInitCustomizedDir(self): base_dir = self.tempdir chromium_dir = os.path.join(base_dir, 'another_chromium') build_dir = os.path.join(base_dir, 'another_build') builder = self.GetBuilder(chromium_dir=chromium_dir, build_dir=build_dir) self.assertEqual(base_dir, builder.base_dir) self.assertEqual(chromium_dir, builder.chromium_dir) self.assertEqual(os.path.join(chromium_dir, 'src'), builder.repo_dir) self.assertTrue(builder.reuse_repo) self.assertTrue(builder.reuse_build) self.assertTrue(builder.archive_build) self.assertEqual(build_dir, builder.archive_base) self.assertTrue(os.path.isdir(builder.archive_base)) self.assertDictEqual(self.log_output_args, builder.log_output_args) def testInitFlipFlags(self): builder = self.GetBuilder(reuse_repo=False, archive_build=False, reuse_build=False) base_dir = self.tempdir self.assertEqual(base_dir, builder.base_dir) self.assertEqual(self.default_chromium_dir, builder.chromium_dir) self.assertEqual(self.default_repo_dir, builder.repo_dir) self.assertFalse(builder.reuse_repo) self.assertFalse(builder.reuse_build) self.assertFalse(builder.archive_build) self.assertEqual(self.default_archive_base, builder.archive_base) self.assertFalse(os.path.isdir(builder.archive_base)) self.assertDictEqual(self.log_output_args, builder.log_output_args) def testSetUp(self): command_mock = self.StartPatcher(cros_test_lib.RunCommandMock()) command_mock.AddCmdResult(['fetch', '--nohooks', 'chromium']) write_config_mock = self.PatchObject(gclient, 'WriteConfigFile') git_mock = self.PatchObject(git, 'RunGit') gsync_mock = self.PatchObject(gclient, 'Sync') builder = self.GetBuilder() builder.SetUp() write_config_mock.assert_called_with( self.gclient_path, self.default_chromium_dir, True, None, managed=False) git_mock.assert_called_with(self.default_repo_dir, ['pull', 'origin', 'master']) gsync_mock.assert_called_with( self.gclient_path, self.default_chromium_dir, reset=True, nohooks=True, verbose=False, run_args=self.log_output_args) def testSetUpSkip(self): write_config_mock = self.PatchObject(gclient, 'WriteConfigFile') git_mock = self.PatchObject(git, 'RunGit') gsync_mock = self.PatchObject(gclient, 'Sync') # Make it looks like a git repo. osutils.SafeMakedirs(os.path.join(self.default_repo_dir, '.git')) builder = self.GetBuilder() builder.SetUp() write_config_mock.assert_not_called() git_mock.assert_not_called() gsync_mock.assert_not_called() def testSetUpExistingRepoException(self): write_config_mock = self.PatchObject(gclient, 'WriteConfigFile') git_mock = self.PatchObject(git, 'RunGit') gsync_mock = self.PatchObject(gclient, 'Sync') # Make it looks like a git repo. osutils.SafeMakedirs(os.path.join(self.default_repo_dir, '.git')) builder = self.GetBuilder(reuse_repo=False) self.assertRaisesRegex(Exception, 'Chromium repo exists.*', builder.SetUp) write_config_mock.assert_not_called() git_mock.assert_not_called() gsync_mock.assert_not_called() def testSyncToHead(self): git_mock = self.PatchObject(git, 'CleanAndCheckoutUpstream') builder = self.GetBuilder() builder.SyncToHead() git_mock.assert_called_with(self.default_repo_dir) def testGclientSync(self): gsync_mock = self.PatchObject(gclient, 'Sync') builder = self.GetBuilder() builder.GclientSync() gsync_mock.assert_called_with( self.gclient_path, self.default_chromium_dir, reset=False, nohooks=False, verbose=False, run_args=self.log_output_args) builder.GclientSync(reset=True, nohooks=True) gsync_mock.assert_called_with( self.gclient_path, self.default_chromium_dir, reset=True, nohooks=True, verbose=False, run_args=self.log_output_args) def testBuildReuse(self): commit_label = 'test' # Let the build already be in archive. archive_path = os.path.join( self.default_archive_base, 'out_%s_%s' % (self.BOARD, commit_label), 'Release') osutils.SafeMakedirs(archive_path) builder = self.GetBuilder() build_to_deploy = builder.Build(commit_label) self.assertEqual(archive_path, build_to_deploy) def _ChromeSdkRunSideEffect(self, *args, **unused_kwargs): if args and len(args[0]) == 3: bash_command = args[0][2] if 'gn gen' in bash_command: build_dir = bash_command.split()[2] osutils.SafeMakedirs(os.path.join(self.default_repo_dir, build_dir)) return mock.DEFAULT def testBuild(self): gsync_mock = self.PatchObject(simple_chrome_builder.SimpleChromeBuilder, 'GclientSync') success_result = cros_test_lib.EasyAttr(returncode=0) chrome_sdk_run_mock = self.PatchObject( commands.ChromeSDK, 'Run', side_effect=self._ChromeSdkRunSideEffect, return_value=success_result) chrome_sdk_ninja_mock = self.PatchObject( commands.ChromeSDK, 'Ninja', return_value=success_result) commit_label = 'test' archive_path = os.path.join( self.default_archive_base, 'out_%s_%s' % (self.BOARD, commit_label), 'Release') self.assertFalse(os.path.isdir(archive_path)) builder = self.GetBuilder() build_to_deploy = builder.Build(commit_label) self.assertEqual(archive_path, build_to_deploy) # Check that build_to_deploy exists after builder.Build() self.assertTrue(os.path.isdir(archive_path)) gsync_mock.assert_called() chrome_sdk_run_mock.assert_called_with( ['bash', '-c', 'gn gen out_%s/Release --args="$GN_ARGS"' % self.BOARD], run_args=self.log_output_args) chrome_sdk_ninja_mock.assert_called_with(run_args=self.log_output_args) def testBuildNoArchive(self): gsync_mock = self.PatchObject(simple_chrome_builder.SimpleChromeBuilder, 'GclientSync') success_result = cros_test_lib.EasyAttr(returncode=0) chrome_sdk_run_mock = self.PatchObject( commands.ChromeSDK, 'Run', side_effect=self._ChromeSdkRunSideEffect, return_value=success_result) chrome_sdk_ninja_mock = self.PatchObject( commands.ChromeSDK, 'Ninja', return_value=success_result) commit_label = 'test' archive_path = os.path.join( self.default_archive_base, 'out_%s_%s' % (self.BOARD, commit_label), 'Release') self.assertFalse(os.path.isdir(archive_path)) builder = self.GetBuilder(archive_build=False) build_to_deploy = builder.Build(commit_label) # No archive. Check that archive_path is not created. self.assertNotEqual(archive_path, build_to_deploy) self.assertFalse(os.path.isdir(archive_path)) self.assertEqual(os.path.join('out_%s' % self.BOARD, 'Release'), build_to_deploy) self.assertTrue(os.path.isdir( os.path.join(self.default_repo_dir, build_to_deploy))) gsync_mock.assert_called() chrome_sdk_run_mock.assert_called_with( ['bash', '-c', 'gn gen out_%s/Release --args="$GN_ARGS"' % self.BOARD], run_args=self.log_output_args) chrome_sdk_ninja_mock.assert_called_with(run_args=self.log_output_args) def testDeploy(self): chrome_sdk_run_mock = self.PatchObject(commands.ChromeSDK, 'Run') build_to_deploy = os.path.join('out_%s' % self.BOARD, 'Release') commit_label = 'test' builder = self.GetBuilder() builder.Deploy(self.DUT, build_to_deploy, commit_label) chrome_sdk_run_mock.assert_called_with( ['deploy_chrome', '--build-dir', build_to_deploy, '--to', self.DUT_IP, '--force'], run_args=self.log_output_args) def testDeployWithPort(self): port = '9999' dut = commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH)( self.DUT_IP + ':' + port) chrome_sdk_run_mock = self.PatchObject(commands.ChromeSDK, 'Run') build_to_deploy = os.path.join('out_%s' % self.BOARD, 'Release') commit_label = 'test' builder = self.GetBuilder() builder.Deploy(dut, build_to_deploy, commit_label) chrome_sdk_run_mock.assert_called_with( ['deploy_chrome', '--build-dir', build_to_deploy, '--to', self.DUT_IP, '--force', '--port', port], run_args=self.log_output_args)
def main(argv): parser = commandline.ArgumentParser(description=__doc__) parser.add_argument('--board', default=None, help='board to debug for') parser.add_argument( '-g', '--gdb_args', action='append', default=[], help='Arguments to gdb itself. If multiple arguments are' ' passed, each argument needs a separate \'-g\' flag.') parser.add_argument( '--remote', default=None, type=commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH), help='Remote device on which to run the binary. Use' ' "--remote=localhost:9222" to debug in a ChromeOS image in an' ' already running local virtual machine.') parser.add_argument( '--pid', default='', help='Process ID of the (already) running process on the' ' remote device to which to attach.') parser.add_argument('--remote_pid', dest='pid', default='', help='Deprecated alias for --pid.') parser.add_argument( '--no-ping', dest='ping', default=True, action='store_false', help='Do not ping remote before attempting to connect.') parser.add_argument('--attach', dest='attach_name', default='', help='Name of existing process to which to attach, on' ' remote device (remote debugging only). "--attach' ' browser" will find the main chrome browser process;' ' "--attach renderer" will find a chrome renderer' ' process; "--attach gpu-process" will find the chrome' ' gpu process.') parser.add_argument('--cgdb', default=False, action='store_true', help='Use cgdb curses interface rather than plain gdb.' 'This option is only valid for remote debugging.') parser.add_argument( 'inf_args', nargs=argparse.REMAINDER, help='Arguments for gdb to pass to the program being' ' debugged. These are positional and must come at the end' ' of the command line. This will not work if attaching' ' to an already running program.') options = parser.parse_args(argv) options.Freeze() gdb_args = [] inf_args = [] inf_cmd = '' if options.inf_args: inf_cmd = options.inf_args[0] inf_args = options.inf_args[1:] if options.gdb_args: gdb_args = options.gdb_args if inf_cmd: fname = os.path.join(cros_build_lib.GetSysroot(options.board), inf_cmd.lstrip('/')) if not os.path.exists(fname): cros_build_lib.Die('Cannot find program %s.' % fname) else: if inf_args: parser.error('Cannot specify arguments without a program.') if inf_args and (options.pid or options.attach_name): parser.error('Cannot pass arguments to an already' ' running process (--remote-pid or --attach).') if options.remote: if not options.pid and not inf_cmd and not options.attach_name: parser.error('Must specify a program to start or a pid to attach ' 'to on the remote device.') if options.attach_name and options.attach_name == 'browser': inf_cmd = '/opt/google/chrome/chrome' else: if options.cgdb: parser.error( '--cgdb option can only be used with remote debugging.') if options.pid: parser.error('Must specify a remote device (--remote) if you want ' 'to attach to a remote pid.') if options.attach_name: parser.error('Must specify remote device (--remote) when using' ' --attach option.') # Once we've finished sanity checking args, make sure we're root. if not options.remote: _ReExecuteIfNeeded([sys.argv[0]] + argv) gdb = BoardSpecificGdb(options.board, gdb_args, inf_cmd, inf_args, options.remote, options.pid, options.attach_name, options.cgdb, options.ping) try: if options.remote: gdb.RunRemote() else: gdb.Run() except GdbException as e: if options.debug: raise else: raise cros_build_lib.Die(str(e))
class TestChromeOnCrosBisector(cros_test_lib.MockTempDirTestCase): """Tests ChromeOnCrosBisector class.""" BOARD = 'samus' TEST_NAME = 'graphics_WebGLAquarium' METRIC = 'avg_fps_1000_fishes/summary/value' REPORT_FILE = 'reports.json' DUT_ADDR = '192.168.1.1' DUT = commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH)(DUT_ADDR) # Be aware that GOOD_COMMIT_INFO and BAD_COMMIT_INFO should be assigned via # copy.deepcopy() as their users are likely to change the content. GOOD_COMMIT_SHA1 = '44af5c9a5505' GOOD_COMMIT_TIMESTAMP = 1486526594 GOOD_COMMIT_SCORE = common.Score([100]) GOOD_COMMIT_INFO = common.CommitInfo(sha1=GOOD_COMMIT_SHA1, timestamp=GOOD_COMMIT_TIMESTAMP, title='good', label='last-known-good ', score=GOOD_COMMIT_SCORE) BAD_COMMIT_SHA1 = '6a163bb66c3e' BAD_COMMIT_TIMESTAMP = 1486530021 BAD_COMMIT_SCORE = common.Score([80]) BAD_COMMIT_INFO = common.CommitInfo(sha1=BAD_COMMIT_SHA1, timestamp=BAD_COMMIT_TIMESTAMP, title='bad', label='last-known-bad ', score=BAD_COMMIT_SCORE) GOOD_CROS_VERSION = 'R60-9592.50.0' BAD_CROS_VERSION = 'R60-9592.51.0' CULPRIT_COMMIT_SHA1 = '12345abcde' CULPRIT_COMMIT_TIMESTAMP = 1486530000 CULPRIT_COMMIT_SCORE = common.Score([81]) CULPRIT_COMMIT_INFO = common.CommitInfo(sha1=CULPRIT_COMMIT_SHA1, timestamp=CULPRIT_COMMIT_TIMESTAMP, title='bad', score=CULPRIT_COMMIT_SCORE) THRESHOLD_SPLITTER = 95 # Score between good and bad, closer to good side. THRESHOLD = 5 # Distance between good score and splitter. REPEAT = 3 GOOD_METADATA_CONTENT = '\n'.join([ '{', ' "metadata-version": "2",', ' "toolchain-url": "2017/05/%(target)s-2017.05.25.101355.tar.xz",', ' "suite_scheduling": true,', ' "build_id": 1644146,', ' "version": {', ' "full": "R60-9592.50.0",', ' "android-branch": "git_mnc-dr-arc-m60",', ' "chrome": "60.0.3112.53",', ' "platform": "9592.50.0",', ' "milestone": "60",', ' "android": "4150402"', ' }', '}' ]) def setUp(self): """Sets up test case.""" self.options = cros_test_lib.EasyAttr(base_dir=self.tempdir, board=self.BOARD, reuse_repo=True, good=self.GOOD_COMMIT_SHA1, bad=self.BAD_COMMIT_SHA1, remote=self.DUT, eval_repeat=self.REPEAT, auto_threshold=False, reuse_eval=False, cros_flash_sleep=0.01, cros_flash_retry=3, cros_flash_backoff=1, eval_raise_on_error=False, skip_failed_commit=False) self.repo_dir = os.path.join(self.tempdir, builder_module.Builder.DEFAULT_REPO_DIR) self.SetUpBisector() def SetUpBisector(self): """Instantiates self.bisector using self.options.""" self.evaluator = DummyEvaluator(self.options) self.builder = builder_module.Builder(self.options) self.bisector = chrome_on_cros_bisector.ChromeOnCrosBisector( self.options, self.builder, self.evaluator) def SetUpBisectorWithCrosVersion(self): """Instantiates self.bisector using CrOS version as good and bad options.""" self.options.good = self.GOOD_CROS_VERSION self.options.bad = self.BAD_CROS_VERSION self.SetUpBisector() def SetDefaultCommitInfo(self): """Sets up default commit info.""" self.bisector.good_commit_info = copy.deepcopy(self.GOOD_COMMIT_INFO) self.bisector.bad_commit_info = copy.deepcopy(self.BAD_COMMIT_INFO) def testInit(self): """Tests __init__() with SHA1 as good and bad options.""" self.assertEqual(self.GOOD_COMMIT_SHA1, self.bisector.good_commit) self.assertIsNone(self.bisector.good_cros_version) self.assertEqual(self.BAD_COMMIT_SHA1, self.bisector.bad_commit) self.assertIsNone(self.bisector.bad_cros_version) self.assertFalse(self.bisector.bisect_between_cros_version) self.assertEqual(self.DUT_ADDR, self.bisector.remote.raw) self.assertEqual(self.REPEAT, self.bisector.eval_repeat) self.assertEqual(self.builder, self.bisector.builder) self.assertEqual(self.repo_dir, self.bisector.repo_dir) self.assertIsNone(self.bisector.good_commit_info) self.assertIsNone(self.bisector.bad_commit_info) self.assertEqual(0, len(self.bisector.bisect_log)) self.assertIsNone(self.bisector.threshold) self.assertTrue(not self.bisector.current_commit) def testInitCrosVersion(self): """Tests __init__() with CrOS version as good and bad options.""" self.SetUpBisectorWithCrosVersion() self.assertEqual(self.GOOD_CROS_VERSION, self.bisector.good_cros_version) self.assertIsNone(self.bisector.good_commit) self.assertEqual(self.BAD_CROS_VERSION, self.bisector.bad_cros_version) self.assertIsNone(self.bisector.bad_commit) self.assertTrue(self.bisector.bisect_between_cros_version) self.assertEqual(self.DUT_ADDR, self.bisector.remote.raw) self.assertEqual(self.REPEAT, self.bisector.eval_repeat) self.assertEqual(self.builder, self.bisector.builder) self.assertEqual(self.repo_dir, self.bisector.repo_dir) self.assertIsNone(self.bisector.good_commit_info) self.assertIsNone(self.bisector.bad_commit_info) self.assertEqual(0, len(self.bisector.bisect_log)) self.assertIsNone(self.bisector.threshold) self.assertTrue(not self.bisector.current_commit) def testInitMissingRequiredArgs(self): """Tests that ChromeOnCrosBisector raises for missing required argument.""" options = cros_test_lib.EasyAttr() with self.assertRaises(common.MissingRequiredOptionsException) as cm: chrome_on_cros_bisector.ChromeOnCrosBisector( options, self.builder, self.evaluator) exception_message = str(cm.exception) self.assertIn('Missing command line', exception_message) self.assertIn('ChromeOnCrosBisector', exception_message) for arg in chrome_on_cros_bisector.ChromeOnCrosBisector.REQUIRED_ARGS: self.assertIn(arg, exception_message) def testCheckCommitFormat(self): """Tests CheckCommitFormat().""" CheckCommitFormat = ( chrome_on_cros_bisector.ChromeOnCrosBisector.CheckCommitFormat) self.assertEqual(self.GOOD_COMMIT_SHA1, CheckCommitFormat(self.GOOD_COMMIT_SHA1)) self.assertEqual(self.GOOD_CROS_VERSION, CheckCommitFormat(self.GOOD_CROS_VERSION)) self.assertEqual('R60-9592.50.0', CheckCommitFormat('60.9592.50.0')) invalid = 'bad_sha1' self.assertIsNone(CheckCommitFormat(invalid)) def testObtainBisectBoundaryScoreImpl(self): """Tests ObtainBisectBoundaryScoreImpl().""" git_mock = self.StartPatcher( git_bisector_unittest.GitMock(self.repo_dir)) git_mock.AddRunGitResult(['checkout', self.GOOD_COMMIT_SHA1]) git_mock.AddRunGitResult(['checkout', self.BAD_COMMIT_SHA1]) build_deploy_eval_mock = self.PatchObject( chrome_on_cros_bisector.ChromeOnCrosBisector, 'BuildDeployEval') build_deploy_eval_mock.side_effect = [ self.GOOD_COMMIT_SCORE, self.BAD_COMMIT_SCORE ] self.assertEqual(self.GOOD_COMMIT_SCORE, self.bisector.ObtainBisectBoundaryScoreImpl(True)) self.assertEqual(self.BAD_COMMIT_SCORE, self.bisector.ObtainBisectBoundaryScoreImpl(False)) self.assertEqual([ mock.call(customize_build_deploy=None, eval_label=None), mock.call(customize_build_deploy=None, eval_label=None) ], build_deploy_eval_mock.call_args_list) def testObtainBisectBoundaryScoreImplCrosVersion(self): """Tests ObtainBisectBoundaryScoreImpl() with CrOS version.""" self.SetUpBisectorWithCrosVersion() # Inject good_commit and bad_commit as if # bisector.ResolveChromeBisectRangeFromCrosVersion() being run. self.bisector.good_commit = self.GOOD_COMMIT_SHA1 self.bisector.bad_commit = self.BAD_COMMIT_SHA1 git_mock = self.StartPatcher( git_bisector_unittest.GitMock(self.repo_dir)) git_mock.AddRunGitResult(['checkout', self.GOOD_COMMIT_SHA1]) git_mock.AddRunGitResult(['checkout', self.BAD_COMMIT_SHA1]) self.PatchObject(chrome_on_cros_bisector.ChromeOnCrosBisector, 'UpdateCurrentCommit') evaluate_mock = self.PatchObject(DummyEvaluator, 'Evaluate') # Mock FlashCrosImage() to verify that customize_build_deploy is assigned # as expected. flash_cros_image_mock = self.PatchObject( chrome_on_cros_bisector.ChromeOnCrosBisector, 'FlashCrosImage') evaluate_mock.return_value = self.GOOD_COMMIT_SCORE self.assertEqual(self.GOOD_COMMIT_SCORE, self.bisector.ObtainBisectBoundaryScoreImpl(True)) flash_cros_image_mock.assert_called_with( self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION)) evaluate_mock.assert_called_with(self.DUT, 'cros_%s' % self.GOOD_CROS_VERSION, self.REPEAT) evaluate_mock.return_value = self.BAD_COMMIT_SCORE self.assertEqual(self.BAD_COMMIT_SCORE, self.bisector.ObtainBisectBoundaryScoreImpl(False)) flash_cros_image_mock.assert_called_with( self.bisector.GetCrosXbuddyPath(self.BAD_CROS_VERSION)) evaluate_mock.assert_called_with(self.DUT, 'cros_%s' % self.BAD_CROS_VERSION, self.REPEAT) def testObtainBisectBoundaryScoreImplCrosVersionFlashError(self): """Tests ObtainBisectBoundaryScoreImpl() with CrOS version.""" self.SetUpBisectorWithCrosVersion() # Inject good_commit and bad_commit as if # bisector.ResolveChromeBisectRangeFromCrosVersion() being run. self.bisector.good_commit = self.GOOD_COMMIT_SHA1 self.bisector.bad_commit = self.BAD_COMMIT_SHA1 git_mock = self.StartPatcher( git_bisector_unittest.GitMock(self.repo_dir)) git_mock.AddRunGitResult(['checkout', self.GOOD_COMMIT_SHA1]) git_mock.AddRunGitResult(['checkout', self.BAD_COMMIT_SHA1]) self.PatchObject(chrome_on_cros_bisector.ChromeOnCrosBisector, 'UpdateCurrentCommit') evaluate_mock = self.PatchObject(DummyEvaluator, 'Evaluate') # Mock FlashCrosImage() to verify that customize_build_deploy is assigned # as expected. flash_cros_image_mock = self.PatchObject( chrome_on_cros_bisector.ChromeOnCrosBisector, 'FlashCrosImage') flash_cros_image_mock.side_effect = flash.FlashError('Flash failed.') with self.assertRaises(flash.FlashError): self.bisector.ObtainBisectBoundaryScoreImpl(True) flash_cros_image_mock.assert_called_with( self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION)) evaluate_mock.assert_not_called() with self.assertRaises(flash.FlashError): self.bisector.ObtainBisectBoundaryScoreImpl(False) flash_cros_image_mock.assert_called_with( self.bisector.GetCrosXbuddyPath(self.BAD_CROS_VERSION)) evaluate_mock.assert_not_called() def testGetCrosXbuddyPath(self): """Tests GetCrosXbuddyPath().""" self.assertEqual( 'xbuddy://remote/%s/%s/test' % (self.BOARD, self.GOOD_CROS_VERSION), self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION)) def testExchangeChromeSanityCheck(self): """Tests the flow of exchanging Chrome between good and bad CrOS.""" self.SetUpBisectorWithCrosVersion() # Inject good_commit and bad_commit as if # bisector.ResolveChromeBisectRangeFromCrosVersion() has been run. self.bisector.good_commit = self.GOOD_COMMIT_SHA1 self.bisector.bad_commit = self.BAD_COMMIT_SHA1 # Inject commit_info and threshold as if # bisector.ObtainBisectBoundaryScore() and bisector.GetThresholdFromUser() # has been run. self.SetDefaultCommitInfo() self.bisector.threshold = self.THRESHOLD # Try bad Chrome first. git_mock = self.StartPatcher( git_bisector_unittest.GitMock(self.repo_dir)) git_mock.AddRunGitResult(['checkout', self.BAD_COMMIT_SHA1]) git_mock.AddRunGitResult(['checkout', self.GOOD_COMMIT_SHA1]) self.PatchObject(chrome_on_cros_bisector.ChromeOnCrosBisector, 'UpdateCurrentCommit') evaluate_mock = self.PatchObject(DummyEvaluator, 'Evaluate') expected_evaluate_calls = [ mock.call(self.DUT, x, self.REPEAT) for x in [ 'cros_%s_cr_%s' % (self.GOOD_CROS_VERSION, self.BAD_COMMIT_SHA1), 'cros_%s_cr_%s' % (self.BAD_CROS_VERSION, self.GOOD_COMMIT_SHA1) ] ] # Mock FlashCrosImage() to verify that customize_build_deploy is assigned # as expected. flash_cros_image_mock = self.PatchObject( chrome_on_cros_bisector.ChromeOnCrosBisector, 'FlashCrosImage') expected_flash_cros_calls = [ mock.call(self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION)), mock.call(self.bisector.GetCrosXbuddyPath(self.BAD_CROS_VERSION)) ] # Make sure bisector.BuildDeploy() is also called. build_deploy_mock = self.PatchObject( chrome_on_cros_bisector.ChromeOnCrosBisector, 'BuildDeploy') # Assume culprit commit is in Chrome side, i.e. first score is bad. evaluate_mock.side_effect = [ self.BAD_COMMIT_SCORE, self.GOOD_COMMIT_SCORE ] self.assertTrue(self.bisector.ExchangeChromeSanityCheck()) flash_cros_image_mock.assert_has_calls(expected_flash_cros_calls) evaluate_mock.assert_has_calls(expected_evaluate_calls) self.assertEqual(2, build_deploy_mock.call_count) flash_cros_image_mock.reset_mock() evaluate_mock.reset_mock() build_deploy_mock.reset_mock() # Assume culprit commit is not in Chrome side, i.e. first score is good. evaluate_mock.side_effect = [ self.GOOD_COMMIT_SCORE, self.BAD_COMMIT_SCORE ] self.assertFalse(self.bisector.ExchangeChromeSanityCheck()) flash_cros_image_mock.assert_has_calls(expected_flash_cros_calls) evaluate_mock.assert_has_calls(expected_evaluate_calls) self.assertEqual(2, build_deploy_mock.call_count) def testExchangeChromeSanityCheckFlashError(self): """Tests the flow of exchanging Chrome between good and bad CrOS.""" self.SetUpBisectorWithCrosVersion() # Inject good_commit and bad_commit as if # bisector.ResolveChromeBisectRangeFromCrosVersion() has been run. self.bisector.good_commit = self.GOOD_COMMIT_SHA1 self.bisector.bad_commit = self.BAD_COMMIT_SHA1 # Inject commit_info and threshold as if # bisector.ObtainBisectBoundaryScore() and bisector.GetThresholdFromUser() # has been run. self.SetDefaultCommitInfo() self.bisector.threshold = self.THRESHOLD # Try bad Chrome first. git_mock = self.StartPatcher( git_bisector_unittest.GitMock(self.repo_dir)) git_mock.AddRunGitResult(['checkout', self.BAD_COMMIT_SHA1]) git_mock.AddRunGitResult(['checkout', self.GOOD_COMMIT_SHA1]) self.PatchObject(chrome_on_cros_bisector.ChromeOnCrosBisector, 'UpdateCurrentCommit') evaluate_mock = self.PatchObject(DummyEvaluator, 'Evaluate') # Mock FlashCrosImage() to verify that customize_build_deploy is assigned # as expected. flash_cros_image_mock = self.PatchObject( chrome_on_cros_bisector.ChromeOnCrosBisector, 'FlashCrosImage', side_effect=flash.FlashError('Flash failed.')) build_deploy_mock = self.PatchObject( chrome_on_cros_bisector.ChromeOnCrosBisector, 'BuildDeploy') with self.assertRaises(flash.FlashError): self.bisector.ExchangeChromeSanityCheck() evaluate_mock.assert_not_called() flash_cros_image_mock.assert_called() build_deploy_mock.assert_not_called() def testFlashImage(self): """Tests FlashImage().""" flash_mock = self.PatchObject(flash, 'Flash') xbuddy_path = self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION) self.bisector.FlashCrosImage(xbuddy_path) flash_mock.assert_called_with(self.DUT, xbuddy_path, board=self.BOARD, clobber_stateful=True, disable_rootfs_verification=True) def testFlashImageRetry(self): """Tests FlashImage() with retry success.""" flash_mock_call_counter = itertools.count() def flash_mock_return(*unused_args, **unused_kwargs): nth_call = next(flash_mock_call_counter) if nth_call < 3: raise flash.FlashError('Flash failed.') flash_mock = self.PatchObject(flash, 'Flash') flash_mock.side_effect = flash_mock_return xbuddy_path = self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION) self.bisector.FlashCrosImage(xbuddy_path) flash_mock.assert_called_with(self.DUT, xbuddy_path, board=self.BOARD, clobber_stateful=True, disable_rootfs_verification=True) def testFlashImageRetryFailed(self): """Tests FlashImage() with retry failed.""" flash_mock = self.PatchObject(flash, 'Flash') flash_mock.side_effect = flash.FlashError('Flash failed.') xbuddy_path = self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION) with self.assertRaises(flash.FlashError): self.bisector.FlashCrosImage(xbuddy_path) flash_mock.assert_called_with(self.DUT, xbuddy_path, board=self.BOARD, clobber_stateful=True, disable_rootfs_verification=True) def testCrosVersionToChromeCommit(self): """Tests CrosVersionToChromeCommit().""" metadata_url = ( 'gs://chromeos-image-archive/%s-release/%s/partial-metadata.json' % (self.BOARD, self.GOOD_CROS_VERSION)) gs_mock = self.StartPatcher(gs_unittest.GSContextMock()) gs_mock.AddCmdResult(['cat', metadata_url], output=self.GOOD_METADATA_CONTENT) git_log_content = '\n'.join([ '8967dd66ad72 (tag: 60.0.3112.53) Publish DEPS for Chromium ' '60.0.3112.53', '27ed0cc0c2f4 Incrementing VERSION to 60.0.3112.53' ]) git_mock = self.StartPatcher( git_bisector_unittest.GitMock(self.repo_dir)) git_mock.AddRunGitResult( ['log', '--oneline', '-n', '2', '60.0.3112.53'], output=git_log_content) self.bisector.gs_ctx = gs.GSContext() self.assertEqual( '27ed0cc0c2f4', self.bisector.CrosVersionToChromeCommit(self.GOOD_CROS_VERSION)) def testCrosVersionToChromeCommitFail(self): """Tests failure case of CrosVersionToChromeCommit().""" metadata_url = ( 'gs://chromeos-image-archive/%s-release/%s/partial-metadata.json' % (self.BOARD, self.GOOD_CROS_VERSION)) gs_mock = self.StartPatcher(gs_unittest.GSContextMock()) gs_mock.AddCmdResult(['cat', metadata_url], returncode=1) self.bisector.gs_ctx = gs.GSContext() self.assertIsNone( self.bisector.CrosVersionToChromeCommit(self.GOOD_CROS_VERSION)) metadata_content = 'not_a_json' gs_mock.AddCmdResult(['cat', metadata_url], output=metadata_content) self.assertIsNone( self.bisector.CrosVersionToChromeCommit(self.GOOD_CROS_VERSION)) metadata_content = '\n'.join([ '{', ' "metadata-version": "2",', ' "toolchain-url": "2017/05/%(target)s-2017.05.25.101355.tar.xz",', ' "suite_scheduling": true,', ' "build_id": 1644146,', ' "version": {}', '}' ]) gs_mock.AddCmdResult(['cat', metadata_url], output=metadata_content) self.assertIsNone( self.bisector.CrosVersionToChromeCommit(self.GOOD_CROS_VERSION)) gs_mock.AddCmdResult(['cat', metadata_url], output=self.GOOD_METADATA_CONTENT) git_mock = self.StartPatcher( git_bisector_unittest.GitMock(self.repo_dir)) git_mock.AddRunGitResult( ['log', '--oneline', '-n', '2', '60.0.3112.53'], returncode=128) self.assertIsNone( self.bisector.CrosVersionToChromeCommit(self.GOOD_CROS_VERSION)) def testResolveChromeBisectRangeFromCrosVersion(self): """Tests ResolveChromeBisectRangeFromCrosVersion().""" self.SetUpBisectorWithCrosVersion() cros_to_chrome_mock = self.PatchObject( chrome_on_cros_bisector.ChromeOnCrosBisector, 'CrosVersionToChromeCommit') cros_to_chrome_mock.side_effect = [ self.GOOD_COMMIT_SHA1, self.BAD_COMMIT_SHA1 ] self.assertTrue( self.bisector.ResolveChromeBisectRangeFromCrosVersion()) self.assertTrue(self.GOOD_COMMIT_SHA1, self.bisector.good_commit) self.assertTrue(self.BAD_COMMIT_SHA1, self.bisector.bad_commit) cros_to_chrome_mock.assert_has_calls([ mock.call(self.GOOD_CROS_VERSION), mock.call(self.BAD_CROS_VERSION) ]) cros_to_chrome_mock.reset_mock() cros_to_chrome_mock.side_effect = [None] self.assertFalse( self.bisector.ResolveChromeBisectRangeFromCrosVersion()) cros_to_chrome_mock.assert_called_with(self.GOOD_CROS_VERSION) cros_to_chrome_mock.reset_mock() cros_to_chrome_mock.side_effect = [self.GOOD_COMMIT_SHA1, None] self.assertFalse( self.bisector.ResolveChromeBisectRangeFromCrosVersion()) cros_to_chrome_mock.assert_has_calls([ mock.call(self.GOOD_CROS_VERSION), mock.call(self.BAD_CROS_VERSION) ]) def testPrepareBisect(self): """Tests PrepareBisect().""" # Pass SanityCheck(). git_mock = self.StartPatcher( git_bisector_unittest.GitMock(self.repo_dir)) git_mock.AddRunGitResult( partial_mock.InOrder(['rev-list', self.GOOD_COMMIT_SHA1])) git_mock.AddRunGitResult( partial_mock.InOrder(['rev-list', self.BAD_COMMIT_SHA1])) git_mock.AddRunGitResult(partial_mock.InOrder( ['show', self.GOOD_COMMIT_SHA1]), output=str(self.GOOD_COMMIT_TIMESTAMP)) git_mock.AddRunGitResult(partial_mock.InOrder( ['show', self.BAD_COMMIT_SHA1]), output=str(self.BAD_COMMIT_TIMESTAMP)) # Inject score for both side. git_mock.AddRunGitResult(['checkout', self.GOOD_COMMIT_SHA1]) git_mock.AddRunGitResult(['checkout', self.BAD_COMMIT_SHA1]) build_deploy_eval_mock = self.PatchObject( chrome_on_cros_bisector.ChromeOnCrosBisector, 'BuildDeployEval') build_deploy_eval_mock.side_effect = [ self.GOOD_COMMIT_SCORE, self.BAD_COMMIT_SCORE ] # Set auto_threshold. self.bisector.auto_threshold = True self.assertTrue(self.bisector.PrepareBisect()) def testPrepareBisectCrosVersion(self): """Tests PrepareBisect() with CrOS version.""" self.SetUpBisectorWithCrosVersion() self.StartPatcher(gs_unittest.GSContextMock()) self.PatchObject(builder_module.Builder, 'SyncToHead') self.PatchObject( chrome_on_cros_bisector.ChromeOnCrosBisector, 'ResolveChromeBisectRangeFromCrosVersion').return_value = True self.PatchObject(chrome_on_cros_bisector.ChromeOnCrosBisector, 'SanityCheck').return_value = True self.PatchObject(chrome_on_cros_bisector.ChromeOnCrosBisector, 'ObtainBisectBoundaryScore').return_value = True self.PatchObject(chrome_on_cros_bisector.ChromeOnCrosBisector, 'GetThresholdFromUser').return_value = True self.PatchObject(chrome_on_cros_bisector.ChromeOnCrosBisector, 'ExchangeChromeSanityCheck').return_value = True self.assertTrue(self.bisector.PrepareBisect())
def _CreateParser(): """Create our custom parser.""" parser = commandline.ArgumentParser(description=__doc__, caching=True) # TODO(rcui): Have this use the UI-V2 format of having source and target # device be specified as positional arguments. parser.add_argument( '--force', action='store_true', default=False, help='Skip all prompts (such as the prompt for disabling ' 'of rootfs verification). This may result in the ' 'target machine being rebooted.') sdk_board_env = os.environ.get(cros_chrome_sdk.SDKFetcher.SDK_BOARD_ENV) parser.add_argument( '--board', default=sdk_board_env, help='The board the Chrome build is targeted for. When ' "in a 'cros chrome-sdk' shell, defaults to the SDK " 'board.') parser.add_argument('--build-dir', type='path', help='The directory with Chrome build artifacts to ' 'deploy from. Typically of format ' '<chrome_root>/out/Debug. When this option is used, ' 'the GN_ARGS environment variable must be set.') parser.add_argument( '--target-dir', type='path', default=None, help='Target directory on device to deploy Chrome into.') parser.add_argument('-g', '--gs-path', type='gs_path', help='GS path that contains the chrome to deploy.') parser.add_argument('--private-key', type='path', default=None, help='An ssh private key to use when deploying to ' 'a CrOS device.') parser.add_argument('--nostartui', action='store_false', dest='startui', default=True, help="Don't restart the ui daemon after deployment.") parser.add_argument( '--nostrip', action='store_false', dest='dostrip', default=True, help="Don't strip binaries during deployment. Warning: " 'the resulting binaries will be very large!') parser.add_argument('-p', '--port', type=int, default=remote.DEFAULT_SSH_PORT, help='This arg is deprecated. Please use --device ' 'instead.') parser.add_argument('-t', '--to', deprecated='Use --device instead', help='This arg is deprecated. Please use --device ' 'instead.') parser.add_argument( '-d', '--device', type=commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH), help='Device hostname or IP in the format hostname[:port].') parser.add_argument('--mount-dir', type='path', default=None, help='Deploy Chrome in target directory and bind it ' 'to the directory specified by this flag.' 'Any existing mount on this directory will be ' 'umounted first.') parser.add_argument( '--mount', action='store_true', default=False, help='Deploy Chrome to default target directory and bind ' 'it to the default mount directory.' 'Any existing mount on this directory will be ' 'umounted first.') parser.add_argument('--noremove-rootfs-verification', action='store_true', default=False, help='Never remove rootfs verification.') parser.add_argument('--deploy-test-binaries', action='store_true', default=False, help='Also deploy any test binaries to %s. Useful for ' 'running any Tast tests that execute these ' 'binaries.' % _CHROME_TEST_BIN_DIR) parser.add_argument('--lacros', action='store_true', default=False, help='Deploys lacros-chrome rather than ash-chrome.') group = parser.add_argument_group('Advanced Options') group.add_argument('-l', '--local-pkg-path', type='path', help='Path to local chrome prebuilt package to deploy.') group.add_argument('--sloppy', action='store_true', default=False, help='Ignore when mandatory artifacts are missing.') group.add_argument( '--staging-flags', default=None, type=ValidateStagingFlags, help=('Extra flags to control staging. Valid flags are - ' '%s' % ', '.join(chrome_util.STAGING_FLAGS))) # TODO(stevenjb): Remove --strict entirely once removed from the ebuild. group.add_argument('--strict', action='store_true', default=False, help='Deprecated. Default behavior is "strict". Use ' '--sloppy to omit warnings for missing optional ' 'files.') group.add_argument('--strip-flags', default=None, help="Flags to call the 'strip' binutil tool with. " 'Overrides the default arguments.') group.add_argument('--ping', action='store_true', default=False, help='Ping the device before connection attempt.') group.add_argument('--process-timeout', type=int, default=KILL_PROC_MAX_WAIT, help='Timeout for process shutdown.') group = parser.add_argument_group( 'Metadata Overrides (Advanced)', description='Provide all of these overrides in order to remove ' 'dependencies on metadata.json existence.') group.add_argument('--target-tc', action='store', default=None, help='Override target toolchain name, e.g. ' 'x86_64-cros-linux-gnu') group.add_argument('--toolchain-url', action='store', default=None, help='Override toolchain url format pattern, e.g. ' '2014/04/%%(target)s-2014.04.23.220740.tar.xz') # DEPRECATED: --gyp-defines is ignored, but retained for backwards # compatibility. TODO(stevenjb): Remove once eliminated from the ebuild. parser.add_argument('--gyp-defines', default=None, type=ValidateStagingFlags, help=argparse.SUPPRESS) # GN_ARGS (args.gn) used to build Chrome. Influences which files are staged # when --build-dir is set. Defaults to reading from the GN_ARGS env variable. # CURRENLY IGNORED, ADDED FOR FORWARD COMPATABILITY. parser.add_argument('--gn-args', default=None, type=ValidateGnArgs, help=argparse.SUPPRESS) # Path of an empty directory to stage chrome artifacts to. Defaults to a # temporary directory that is removed when the script finishes. If the path # is specified, then it will not be removed. parser.add_argument('--staging-dir', type='path', default=None, help=argparse.SUPPRESS) # Only prepare the staging directory, and skip deploying to the device. parser.add_argument('--staging-only', action='store_true', default=False, help=argparse.SUPPRESS) # Path to a binutil 'strip' tool to strip binaries with. The passed-in path # is used as-is, and not normalized. Used by the Chrome ebuild to skip # fetching the SDK toolchain. parser.add_argument('--strip-bin', default=None, help=argparse.SUPPRESS) parser.add_argument('--compress', action='store', default='auto', choices=('always', 'never', 'auto'), help='Choose the data compression behavior. Default ' 'is set to "auto", that disables compression if ' 'the target device has a gigabit ethernet port.') return parser