예제 #1
0
 def ComparePerfViaSingleFile(self, before_dir, after_dir):
     # Convert to the single-file JSON format.
     json_file = os.path.join(self.MakeTempDir(), 'dataset.json')
     with open(json_file, 'w') as fh:
         perfcompare.Main(
             ['make_combined_perf_dataset_file', before_dir, after_dir], fh)
     # Run compare_perf on the JSON file.
     stdout = StringIO.StringIO()
     perfcompare.Main(['compare_perf', '--dataset_file', json_file], stdout)
     return stdout.getvalue()
예제 #2
0
 def test_display_single_boot_two_datasets(self):
     dataset_dirs = [
         self.ExampleDataDir(mean=1000, single_boot=True),
         self.ExampleDataDir(mean=2000, single_boot=True, drop_one=True)
     ]
     stdout = StringIO.StringIO()
     perfcompare.Main(['compare_perf'] + dataset_dirs, stdout)
     output = stdout.getvalue()
     GOLDEN.AssertCaseEq('display_single_boot_two_datasets', output)
예제 #3
0
 def test_validate_perfcompare(self):
     # This is an example input dataset that gives a high mismatch rate,
     # because the data is drawn from two very different distributions.
     results_dirs = ([self.ExampleDataDir(mean=100, stddev=10)] * 10 +
                     [self.ExampleDataDir(mean=200, stddev=10)] * 10)
     stdout = StringIO.StringIO()
     perfcompare.Main(['validate_perfcompare'] + results_dirs, stdout)
     output = stdout.getvalue()
     GOLDEN.AssertCaseEq('validate_perfcompare', output)
예제 #4
0
    def ComparePerf(self, before_dir, after_dir):
        stdout = StringIO.StringIO()
        perfcompare.Main(['compare_perf', before_dir, after_dir], stdout)
        output = stdout.getvalue()

        # Check that going via the single-file JSON format produces the
        # same result.
        self.assertEquals(self.ComparePerfViaSingleFile(before_dir, after_dir),
                          output)
        return output
예제 #5
0
 def test_error_if_dest_files_already_exist(self):
     dest_dir = os.path.join(self.MakeTempDir(), 'new_dir')
     iter_temp_file = os.path.join(self.MakeTempDir(),
                                   'result.fuchsiaperf.json')
     WriteJsonFile(iter_temp_file, [])
     args = [
         'run_local', '--boots=4', '--iter_file', iter_temp_file,
         '--iter_cmd', 'my_iter_cmd', '--reboot_cmd', 'my_reboot_cmd',
         '--dest', dest_dir
     ]
     self.assertRaises(AssertionError,
                       lambda: perfcompare.Main(args, sys.stdout))
예제 #6
0
    def test_errexit_error_checking_in_shell_commands(self):
        iter_temp_file = os.path.join(self.MakeTempDir(),
                                      'result.fuchsiaperf.json')
        stdout = StringIO.StringIO()

        def get_args():
            dest_dir = os.path.join(self.MakeTempDir(), 'new_dir')
            return [
                'run_local', '--boots=4', '--iter_file', iter_temp_file,
                '--dest', dest_dir
            ]

        perfcompare.Main(
            get_args() + ['--iter_cmd', 'true', '--reboot_cmd', 'true'],
            stdout)
        # Check that the failure of the "false" command gets caught.
        self.assertRaises(
            subprocess.CalledProcessError, lambda: perfcompare.Main(
                get_args() +
                ['--iter_cmd', 'false; true', '--reboot_cmd', 'true'], stdout))
        self.assertRaises(
            subprocess.CalledProcessError, lambda: perfcompare.Main(
                get_args() +
                ['--iter_cmd', 'true', '--reboot_cmd', 'false; true'], stdout))
예제 #7
0
    def test_validate_perfcompare(self):
        def MakeExampleDirs(**kwargs):
            by_boot_dir = os.path.join(self.ExampleDataDir(**kwargs), 'by_boot')
            return [os.path.join(by_boot_dir, name)
                    for name in sorted(os.listdir(by_boot_dir))]

        # This is an example input dataset that gives a high mismatch rate,
        # because the data is drawn from two very different distributions.
        results_dirs = (MakeExampleDirs(mean=100, stddev=10) +
                        MakeExampleDirs(mean=200, stddev=10))
        stdout = StringIO.StringIO()
        perfcompare.Main(['validate_perfcompare', '--group_size=5']
                         + results_dirs, stdout)
        output = stdout.getvalue()
        GOLDEN.AssertCaseEq('validate_perfcompare', output)
예제 #8
0
    def test_run_local(self):
        # Destination directory for the full multiboot dataset.  Use a
        # destination path that does not exist yet.
        dest_dir = os.path.join(self.MakeTempDir(), 'new_dir')

        # Destination pathnames for process dataset files.
        iter_temp_dir = self.MakeTempDir()
        iter_temp_file = os.path.join(iter_temp_dir, 'result.fuchsiaperf.json')
        iter_temp_glob = os.path.join(iter_temp_dir, '*.fuchsiaperf.json')

        data = GenerateData(mean=1000,
                            stddev_across_boots=10,
                            stddev_across_processes=10,
                            stddev_across_iters=10)
        commands = []

        # Dummy version of subprocess.check_call() for testing.
        def DummyRunCmd(cmd, shell=False):
            self.assertEquals(shell, True)
            commands.append(cmd)
            if cmd == 'set -o errexit -o nounset; my_iter_cmd':
                WriteJsonFile(iter_temp_file, [{
                    'label': 'MyTest',
                    'test_suite': 'example_suite',
                    'unit': 'nanoseconds',
                    'values': data.pop(0)[0]
                }])

        stdout = StringIO.StringIO()
        perfcompare.Main([
            'run_local', '--boots=4', '--iter_file', iter_temp_glob,
            '--iter_cmd', 'my_iter_cmd', '--reboot_cmd', 'my_reboot_cmd',
            '--dest', dest_dir
        ],
                         stdout,
                         run_cmd=DummyRunCmd)
        self.assertEquals(commands, [
            'set -o errexit -o nounset; my_reboot_cmd',
            'set -o errexit -o nounset; my_iter_cmd'
        ] * 4)
        GOLDEN.AssertCaseEq('run_local', stdout.getvalue())
예제 #9
0
 def ComparePerf(self, before_dir, after_dir):
     stdout = StringIO.StringIO()
     perfcompare.Main(['compare_perf', before_dir, after_dir], stdout)
     return stdout.getvalue()
예제 #10
0
 def test_comparing_equal_zero_width_confidence_intervals(self):
     dir_path = self.DirOfData([[[200]], [[200]]])
     stdout = StringIO.StringIO()
     perfcompare.Main(['compare_perf', dir_path, dir_path], stdout)
     output = stdout.getvalue()
     GOLDEN.AssertCaseEq('comparison_no_change_zero_width_ci', output)
예제 #11
0
 def test_display_single_boot_single_dataset(self):
     dataset_dir = self.ExampleDataDir(single_boot=True)
     stdout = StringIO.StringIO()
     perfcompare.Main(['compare_perf', dataset_dir], stdout)
     output = stdout.getvalue()
     GOLDEN.AssertCaseEq('display_single_boot_single_dataset', output)