Exemple #1
0
    def run(self, cmake_vars, compile=True, test=True):
        mkdir_p(self._base_path)

        if self.opts.pgo:
            self._collect_pgo(self._base_path)
            self.trained = True
            self.configured = False

        self._configure_if_needed()

        if self.compiled and compile:
            self._clean(self._base_path)
        if not self.compiled or compile:
            self._make(self._base_path)
            self.compiled = True

        data = self._lit(self._base_path, test)
        return self._parse_lit_output(self._base_path, data, cmake_vars), data
Exemple #2
0
    def run(self, nick, compile=True, test=True):
        path = self._base_path

        if not os.path.exists(path):
            mkdir_p(path)

        if not self.configured and self._need_to_configure(path):
            self._configure(path)
            self._clean(path)
            self.configured = True

        if self.compiled and compile:
            self._clean(path)
        if not self.compiled or compile:
            self._make(path)
            self.compiled = True

        data = self._lit(path, test)
        return self._parse_lit_output(path, data)
Exemple #3
0
    def run(self, nick, compile=True, test=True):
        path = self._base_path
        
        if not os.path.exists(path):
            mkdir_p(path)
            
        if not self.configured and self._need_to_configure(path):
            self._configure(path)
            self._clean(path)
            self.configured = True
            
        if self.compiled and compile:
            self._clean(path)
        if not self.compiled or compile:
            self._make(path)
            self.compiled = True

        data = self._lit(path, test)
        return self._parse_lit_output(path, data)
Exemple #4
0
    def run(self, cmake_vars, compile=True, test=True, profile=False):
        mkdir_p(self._base_path)

        if self.opts.pgo:
            self._collect_pgo(self._base_path)
            self.trained = True
            self.configured = False

        self._configure_if_needed()

        if self.compiled and compile:
            self._clean(self._base_path)
        if not self.compiled or compile:
            self._make(self._base_path)
            self._install_benchmark(self._base_path)
            self.compiled = True

        data = self._lit(self._base_path, test, profile)
        return self._parse_lit_output(self._base_path, data, cmake_vars), data
Exemple #5
0
    def run(self, cmake_vars, compile=True, test=True, profile=False):
        mkdir_p(self._base_path)

        # FIXME: should we only run PGO collection once, even when
        # multisampling? We could do so be adding "and not self.trained"
        # below.
        if self.opts.pgo:
            self._collect_pgo(self._base_path)
            self.trained = True
            self.configured = False

        self._configure_if_needed()

        if self.compiled and compile:
            self._clean(self._base_path)
        if not self.compiled or compile or self.opts.pgo:
            self._make(self._base_path)
            self._install_benchmark(self._base_path)
            self.compiled = True

        data = self._lit(self._base_path, test, profile)
        return self._parse_lit_output(self._base_path, data, cmake_vars), data
Exemple #6
0
def test_build(base_name,
               run_info,
               variables,
               project,
               build_config,
               num_jobs,
               codesize_util=None):
    name = '%s(config=%r,j=%d)' % (base_name, build_config, num_jobs)

    # Check if we need to expand the archive into the sandbox.
    archive_path = get_input_path(opts, project['archive'])
    with open(archive_path) as f:
        archive_hash = hashlib.md5(f.read() + str(project)).hexdigest()

    # Compute the path to unpack to.
    source_path = get_output_path("..", "Sources", project['name'])

    # Load the hash of the last unpack, in case the archive has been updated.
    last_unpack_hash_path = os.path.join(source_path, "last_unpack_hash.txt")
    if os.path.exists(last_unpack_hash_path):
        with open(last_unpack_hash_path) as f:
            last_unpack_hash = f.read()
    else:
        last_unpack_hash = None

    # Unpack if necessary.
    if last_unpack_hash == archive_hash:
        g_log.info('reusing sources %r (already unpacked)' % name)
    else:
        # Remove any existing content, if necessary.
        try:
            shutil.rmtree(source_path)
        except OSError, e:
            if e.errno != errno.ENOENT:
                raise

        # Extract the zip file.
        #
        # We shell out to unzip here because zipfile's extractall does not
        # appear to preserve permissions properly.
        commands.mkdir_p(source_path)
        g_log.info('extracting sources for %r' % name)

        if archive_path[-6:] == "tar.gz":
            p = subprocess.Popen(args=['tar', '-xzf', archive_path],
                                 stdin=None,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 cwd=source_path)
        else:
            p = subprocess.Popen(args=['unzip', '-q', archive_path],
                                 stdin=None,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 cwd=source_path)
        stdout, stderr = p.communicate()
        if p.wait() != 0:
            fatal(("unable to extract archive %r at %r\n"
                   "-- stdout --\n%s\n"
                   "-- stderr --\n%s\n") %
                  (archive_path, source_path, stdout, stderr))
        if p.wait() != 0:
            fatal

        # Apply the patch file, if necessary.
        patch_files = project.get('patch_files', [])
        for patch_file in patch_files:
            g_log.info('applying patch file %r for %r' % (patch_file, name))
            patch_file_path = get_input_path(opts, patch_file)
            p = subprocess.Popen(
                args=['patch', '-i', patch_file_path, '-p', '1'],
                stdin=None,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                cwd=source_path)
            stdout, stderr = p.communicate()
            if p.wait() != 0:
                fatal(("unable to apply patch file %r in %r\n"
                       "-- stdout --\n%s\n"
                       "-- stderr --\n%s\n") %
                      (patch_file_path, source_path, stdout, stderr))

        # Write the hash tag.
        with open(last_unpack_hash_path, "w") as f:
            f.write(archive_hash)
Exemple #7
0
        with open(last_unpack_hash_path, "w") as f:
            f.write(archive_hash)

    # Create an env dict in case the user wants to use it.
    env = dict(os.environ)

    # Form the test build command.
    build_info = project['build_info']

    # Add arguments to ensure output files go into our build directory.
    dir_name = '%s_%s_j%d' % (base_name, build_config, num_jobs)
    output_base = get_output_path(dir_name)
    build_base = os.path.join(output_base, 'build', build_config)

    # Create the build base directory and by extension output base directory.
    commands.mkdir_p(build_base)

    cmd = []
    preprocess_cmd = None

    if build_info['style'].startswith('xcode-'):
        file_path = os.path.join(source_path, build_info['file'])
        cmd.extend(['xcodebuild'])

        # Add the arguments to select the build target.
        if build_info['style'] == 'xcode-project':
            cmd.extend(
                ('-target', build_info['target'], '-project', file_path))
        elif build_info['style'] == 'xcode-workspace':
            cmd.extend(
                ('-scheme', build_info['scheme'], '-workspace', file_path))
Exemple #8
0
    def diagnose(self):
        """Build a triage report that contains information about a test.

        This is an alternate top level target for running the test-suite.  It
        will produce a triage report for a benchmark instead of running the
        test-suite normally. The report has stuff in it that will be useful
        for reproducing and diagnosing a performance change.
        """
        assert self.opts.only_test, "We don't have a benchmark to diagenose."
        bm_path, short_name = self.opts.only_test
        assert bm_path, "The benchmark path is empty?"

        report_name = "{}.report".format(short_name)
        # Make a place for the report.
        report_path = os.path.abspath(report_name)

        # Overwrite the report.
        if os.path.exists(report_path):
            shutil.rmtree(report_path)
        os.mkdir(report_path)

        path = self._base_path
        if not os.path.exists(path):
            mkdir_p(path)
        os.chdir(path)

        # Run with -save-temps
        cmd = self._configure(path, execute=False)
        cmd_temps = cmd + [
            '-DTEST_SUITE_DIAGNOSE=On',
            '-DTEST_SUITE_DIAGNOSE_FLAGS=-save-temps'
        ]

        note(' '.join(cmd_temps))

        out = subprocess.check_output(cmd_temps)
        note(out)

        # Figure out our test's target.
        make_cmd = [self.opts.make, "VERBOSE=1", 'help']

        make_targets = subprocess.check_output(make_cmd)
        matcher = re.compile(r"^\.\.\.\s{}$".format(short_name),
                             re.MULTILINE | re.IGNORECASE)
        if not matcher.search(make_targets):
            assert False, "did not find benchmark, must be nestsed? Unimplemented."

        local_path = os.path.join(path, bm_path)

        make_save_temps = [self.opts.make, "VERBOSE=1", short_name]
        note(" ".join(make_save_temps))
        out = subprocess.check_output(make_save_temps)
        note(out)
        # Executable(s) and test file:
        shutil.copy(os.path.join(local_path, short_name), report_path)
        shutil.copy(os.path.join(local_path, short_name + ".test"),
                    report_path)
        # Temp files are in:
        temp_files = os.path.join(local_path, "CMakeFiles",
                                  short_name + ".dir")

        save_temps_file = ["/*.s", "/*.ii", "/*.i", "/*.bc"]
        build_files = [
            "/*.o", "/*.time", "/*.cmake", "/*.make", "/*.includecache",
            "/*.txt"
        ]
        self._cp_artifacts(local_path, report_path, save_temps_file)
        self._cp_artifacts(temp_files, report_path, build_files)

        note("Report produced in: " + report_path)

        # Run through the rest of LNT, but don't allow this to be submitted
        # because there is no data.
        class DontSubmitResults(object):
            def get(self, url):
                return None

        return DontSubmitResults()
Exemple #9
0
def test_build(base_name, run_info, variables, project, build_config, num_jobs,
               codesize_util=None):
    name = '%s(config=%r,j=%d)' % (base_name, build_config, num_jobs)
        
    # Check if we need to expand the archive into the sandbox.
    archive_path = get_input_path(opts, project['archive'])
    with open(archive_path) as f:
        archive_hash = hashlib.md5(f.read() + str(project)).hexdigest()

    # Compute the path to unpack to.
    source_path = get_output_path("..", "Sources", project['name'])

    # Load the hash of the last unpack, in case the archive has been updated.
    last_unpack_hash_path = os.path.join(source_path, "last_unpack_hash.txt")
    if os.path.exists(last_unpack_hash_path):
        with open(last_unpack_hash_path) as f:
            last_unpack_hash = f.read()
    else:
        last_unpack_hash = None

    # Unpack if necessary.
    if last_unpack_hash == archive_hash:
        g_log.info('reusing sources %r (already unpacked)' % name)
    else:
        # Remove any existing content, if necessary.
        try:
            shutil.rmtree(source_path)
        except OSError, e:
            if e.errno != errno.ENOENT:
                raise

        # Extract the zip file.
        #
        # We shell out to unzip here because zipfile's extractall does not
        # appear to preserve permissions properly.
        commands.mkdir_p(source_path)
        g_log.info('extracting sources for %r' % name)
        
        if archive_path[-6:] == "tar.gz":
            p = subprocess.Popen(args=['tar', '-xzf', archive_path],
                                 stdin=None,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 cwd=source_path)
        else:
            p = subprocess.Popen(args=['unzip', '-q', archive_path],
                                 stdin=None,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 cwd=source_path)            
        stdout,stderr = p.communicate()
        if p.wait() != 0:
            fatal(("unable to extract archive %r at %r\n"
                   "-- stdout --\n%s\n"
                   "-- stderr --\n%s\n") % (archive_path, source_path,
                                            stdout, stderr))
        if p.wait() != 0:
            fatal

        # Apply the patch file, if necessary.
        patch_files = project.get('patch_files', [])
        for patch_file in patch_files:
            g_log.info('applying patch file %r for %r' % (patch_file, name))
            patch_file_path = get_input_path(opts, patch_file)
            p = subprocess.Popen(args=['patch', '-i', patch_file_path,
                                       '-p', '1'],
                                 stdin=None,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 cwd=source_path)
            stdout,stderr = p.communicate()
            if p.wait() != 0:
                fatal(("unable to apply patch file %r in %r\n"
                       "-- stdout --\n%s\n"
                       "-- stderr --\n%s\n") % (patch_file_path, source_path,
                                                stdout, stderr))

        # Write the hash tag.
        with open(last_unpack_hash_path, "w") as f:
            f.write(archive_hash)
Exemple #10
0
        with open(last_unpack_hash_path, "w") as f:
            f.write(archive_hash)

    # Create an env dict in case the user wants to use it.
    env = dict(os.environ)
    
    # Form the test build command.
    build_info = project['build_info']
    
    # Add arguments to ensure output files go into our build directory.
    dir_name = '%s_%s_j%d' % (base_name, build_config, num_jobs)    
    output_base = get_output_path(dir_name)
    build_base = os.path.join(output_base, 'build', build_config)
        
    # Create the build base directory and by extension output base directory.
    commands.mkdir_p(build_base)

    cmd = []
    preprocess_cmd = None
    
    if build_info['style'].startswith('xcode-'):
        file_path = os.path.join(source_path, build_info['file'])
        cmd.extend(['xcodebuild'])

        # Add the arguments to select the build target.
        if build_info['style'] == 'xcode-project':
            cmd.extend(('-target', build_info['target'],
                        '-project', file_path))
        elif build_info['style'] == 'xcode-workspace':
            cmd.extend(('-scheme', build_info['scheme'],
                        '-workspace', file_path))
Exemple #11
0
    def diagnose(self):
        """Build a triage report that contains information about a test.

        This is an alternate top level target for running the test-suite.  It
        will produce a triage report for a benchmark instead of running the
        test-suite normally. The report has stuff in it that will be useful
        for reproducing and diagnosing a performance change.
        """
        assert self.opts.only_test, "We don't have a benchmark to diagenose."
        bm_path, short_name = self.opts.only_test
        assert bm_path, "The benchmark path is empty?"

        report_name = "{}.report".format(short_name)
        # Make a place for the report.
        report_path = os.path.abspath(report_name)

        # Overwrite the report.
        if os.path.exists(report_path):
            shutil.rmtree(report_path)
        os.mkdir(report_path)

        path = self._base_path
        if not os.path.exists(path):
            mkdir_p(path)
        os.chdir(path)

        # Run with -save-temps
        cmd = self._configure(path, execute=False)
        cmd_temps = cmd + ['-DTEST_SUITE_DIAGNOSE=On',
                           '-DTEST_SUITE_DIAGNOSE_FLAGS=-save-temps']

        note(' '.join(cmd_temps))

        out = subprocess.check_output(cmd_temps)
        note(out)

        # Figure out our test's target.
        make_cmd = [self.opts.make, "VERBOSE=1", 'help']

        make_targets = subprocess.check_output(make_cmd)
        matcher = re.compile(r"^\.\.\.\s{}$".format(short_name),
                             re.MULTILINE | re.IGNORECASE)
        if not matcher.search(make_targets):
            assert False, "did not find benchmark, must be nestsed? Unimplemented."

        local_path = os.path.join(path, bm_path)

        make_save_temps = [self.opts.make, "VERBOSE=1", short_name]
        note(" ".join(make_save_temps))
        out = subprocess.check_output(make_save_temps)
        note(out)
        # Executable(s) and test file:
        shutil.copy(os.path.join(local_path, short_name), report_path)
        shutil.copy(os.path.join(local_path, short_name + ".test"), report_path)
        # Temp files are in:
        temp_files = os.path.join(local_path, "CMakeFiles",
                                  short_name + ".dir")

        save_temps_file = ["/*.s", "/*.ii", "/*.i", "/*.bc"]
        build_files = ["/*.o", "/*.time", "/*.cmake", "/*.make",
                       "/*.includecache", "/*.txt"]
        self._cp_artifacts(local_path, report_path, save_temps_file)
        self._cp_artifacts(temp_files, report_path, build_files)

        note("Report produced in: " + report_path)

        # Run through the rest of LNT, but don't allow this to be submitted
        # because there is no data.
        class DontSubmitResults(object):
            def get(self, url):
                return None

        return DontSubmitResults()
Exemple #12
0
    def diagnose(self):
        """Build a triage report that contains information about a test.

        This is an alternate top level target for running the test-suite.  It
        will produce a triage report for a benchmark instead of running the
        test-suite normally. The report has stuff in it that will be useful
        for reproducing and diagnosing a performance change.
        """
        assert self.opts.only_test, "We don't have a benchmark to diagenose."
        bm_path, short_name = self.opts.only_test
        assert bm_path, "The benchmark path is empty?"

        report_name = "{}.report".format(short_name)
        # Make a place for the report.
        report_path = os.path.abspath(report_name)

        # Overwrite the report.
        if os.path.exists(report_path):
            shutil.rmtree(report_path)
        os.mkdir(report_path)

        path = self._base_path
        mkdir_p(path)
        os.chdir(path)

        # Run with -save-temps
        cmd = self._configure(path, execute=False)
        cmd_temps = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-save-temps']

        note(' '.join(cmd_temps))

        out = subprocess.check_output(cmd_temps)
        note(out)

        # Figure out our test's target.
        make_cmd = [self.opts.make, "VERBOSE=1", 'help']

        make_targets = subprocess.check_output(make_cmd)
        matcher = re.compile(r"^\.\.\.\s{}$".format(short_name),
                             re.MULTILINE | re.IGNORECASE)
        if not matcher.search(make_targets):
            assert False, "did not find benchmark, nestsed? Unimplemented."

        local_path = os.path.join(path, bm_path)

        make_deps = [self.opts.make, "VERBOSE=1", "timeit-target",
                     "timeit-host", "fpcmp-host"]
        note(" ".join(make_deps))
        p = subprocess.Popen(make_deps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        note(std_out)

        make_save_temps = [self.opts.make, "VERBOSE=1", short_name]
        note(" ".join(make_save_temps))
        p = subprocess.Popen(make_save_temps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        note(std_out)
        with open(report_path + "/build.log", 'w') as f:
            f.write(std_out)
        # Executable(s) and test file:
        shutil.copy(os.path.join(local_path, short_name), report_path)
        shutil.copy(os.path.join(local_path, short_name + ".test"), report_path)
        # Temp files are in:
        temp_files = os.path.join(local_path, "CMakeFiles",
                                  short_name + ".dir")

        save_temps_file = ["/*.s", "/*.ii", "/*.i", "/*.bc"]
        build_files = ["/*.o", "/*.time", "/*.cmake", "/*.make",
                       "/*.includecache", "/*.txt"]
        self._cp_artifacts(local_path, report_path, save_temps_file)
        self._cp_artifacts(temp_files, report_path, build_files)

        # Now lets do -ftime-report.
        cmd_time_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-ftime-report']

        note(' '.join(cmd_time_report))

        out = subprocess.check_output(cmd_time_report)
        note(out)

        make_time_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_time_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/time-report.txt", 'w') as f:
            f.write(std_err)
        note("Wrote: " + report_path + "/time-report.txt")

        # Now lets do -llvm -stats.
        cmd_stats_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-mllvm -stats']

        note(' '.join(cmd_stats_report))

        out = subprocess.check_output(cmd_stats_report)
        note(out)

        make_stats_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_stats_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/stats-report.txt", 'w') as f:
            f.write(std_err)
        note("Wrote: " + report_path + "/stats-report.txt")

        #  Collect Profile:
        if "Darwin" in platform.platform():
            # For testing and power users, lets allow overrides of how sudo
            # and iprofiler are called.
            sudo = os.getenv("SUDO_CMD", "sudo")
            if " " in sudo:
                sudo = sudo.split(" ")
            if not sudo:
                sudo = []
            else:
                sudo = [sudo]
            iprofiler = os.getenv("IPROFILER_CMD",
                                  "iprofiler -timeprofiler -I 40u")

            cmd_iprofiler = cmd + ['-DTEST_SUITE_RUN_UNDER=' + iprofiler]
            print ' '.join(cmd_iprofiler)

            out = subprocess.check_output(cmd_iprofiler)

            os.chdir(local_path)
            make_iprofiler_temps = [self.opts.make, "VERBOSE=1", short_name]
            p = subprocess.Popen(make_iprofiler_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            warning("Using sudo to collect execution trace.")
            make_save_temps = sudo + [self.opts.lit, short_name + ".test"]
            p = subprocess.Popen(make_save_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            sys.stdout.write(std_out)
            sys.stderr.write(std_err)
            warning("Tests may fail because of iprofiler's output.")
            # The dtps file will be saved as root, make it so
            # that we can read it.
            chmod = sudo + ["chown", "-R", getpass.getuser(),
                     short_name + ".dtps"]
            subprocess.call(chmod)
            profile = local_path + "/" + short_name + ".dtps"
            shutil.copytree(profile, report_path + "/" + short_name + ".dtps")
            note(profile + "-->" + report_path)
        else:
            warning("Skipping execution profiling because this is not Darwin.")
        note("Report produced in: " + report_path)

        # Run through the rest of LNT, but don't allow this to be submitted
        # because there is no data.
        class DontSubmitResults(object):

            def get(self, url):
                return report_path

            def __getitem__(self, key):
                return report_path

        return DontSubmitResults()
Exemple #13
0
 def _configure_if_needed(self):
     mkdir_p(self._base_path)
     if not self.configured:
         self._configure(self._base_path)
         self._clean(self._base_path)
         self.configured = True
Exemple #14
0
def test_build(base_name,
               run_info,
               variables,
               project,
               build_config,
               num_jobs,
               codesize_util=None):
    name = '%s(config=%r,j=%d)' % (base_name, build_config, num_jobs)

    # Check if we need to expand the archive into the sandbox.
    archive_path = get_input_path(opts, project['archive'])
    with open(archive_path) as f:
        archive_hash = hashlib.md5(f.read() + str(project)).hexdigest()

    # Compute the path to unpack to.
    source_path = get_output_path("..", "Sources", project['name'])

    # Load the hash of the last unpack, in case the archive has been updated.
    last_unpack_hash_path = os.path.join(source_path, "last_unpack_hash.txt")
    if os.path.exists(last_unpack_hash_path):
        with open(last_unpack_hash_path) as f:
            last_unpack_hash = f.read()
    else:
        last_unpack_hash = None

    # Unpack if necessary.
    if last_unpack_hash == archive_hash:
        g_log.info('reusing sources %r (already unpacked)' % name)
    else:
        # Remove any existing content, if necessary.
        try:
            shutil.rmtree(source_path)
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise

        # Extract the zip file.
        #
        # We shell out to unzip here because zipfile's extractall does not
        # appear to preserve permissions properly.
        commands.mkdir_p(source_path)
        g_log.info('extracting sources for %r' % name)

        if archive_path.endswith(".tar.gz") or \
           archive_path.endswith(".tar.bz2") or \
           archive_path.endswith(".tar.lzma"):
            p = subprocess.Popen(args=['tar', '-xf', archive_path],
                                 stdin=None,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 cwd=source_path,
                                 universal_newlines=True)
        else:
            p = subprocess.Popen(args=['unzip', '-q', archive_path],
                                 stdin=None,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 cwd=source_path,
                                 universal_newlines=True)
        stdout, stderr = p.communicate()
        if p.wait() != 0:
            fatal(("unable to extract archive %r at %r\n"
                   "-- stdout --\n%s\n"
                   "-- stderr --\n%s\n") %
                  (archive_path, source_path, stdout, stderr))

        # Apply the patch file, if necessary.
        patch_files = project.get('patch_files', [])
        for patch_file in patch_files:
            g_log.info('applying patch file %r for %r' % (patch_file, name))
            patch_file_path = get_input_path(opts, patch_file)
            p = subprocess.Popen(
                args=['patch', '-i', patch_file_path, '-p', '1'],
                stdin=None,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                cwd=source_path,
                universal_newlines=True)
            stdout, stderr = p.communicate()
            if p.wait() != 0:
                fatal(("unable to apply patch file %r in %r\n"
                       "-- stdout --\n%s\n"
                       "-- stderr --\n%s\n") %
                      (patch_file_path, source_path, stdout, stderr))

        # Write the hash tag.
        with open(last_unpack_hash_path, "w") as f:
            f.write(archive_hash)

    # Create an env dict in case the user wants to use it.
    env = dict(os.environ)

    # Form the test build command.
    build_info = project['build_info']

    # Add arguments to ensure output files go into our build directory.
    dir_name = '%s_%s_j%d' % (base_name, build_config, num_jobs)
    output_base = get_output_path(dir_name)
    build_base = os.path.join(output_base, 'build', build_config)

    # Create the build base directory and by extension output base directory.
    commands.mkdir_p(build_base)

    cmd = []
    preprocess_cmd = None

    if build_info['style'].startswith('xcode-'):
        file_path = os.path.join(source_path, build_info['file'])
        cmd.extend(['xcodebuild'])

        # Add the arguments to select the build target.
        if build_info['style'] == 'xcode-project':
            cmd.extend(
                ('-target', build_info['target'], '-project', file_path))
        elif build_info['style'] == 'xcode-workspace':
            cmd.extend(
                ('-scheme', build_info['scheme'], '-workspace', file_path))
            cmd.extend(('-derivedDataPath', build_base))
        else:
            fatal("unknown build style in project: %r" % project)

        # Add the build configuration selection.
        cmd.extend(('-configuration', build_config))

        cmd.append('OBJROOT=%s' % os.path.join(build_base, 'obj'))
        cmd.append('SYMROOT=%s' % os.path.join(build_base, 'sym'))
        cmd.append('DSTROOT=%s' % os.path.join(build_base, 'dst'))
        cmd.append('SHARED_PRECOMPS_DIR=%s' % os.path.join(build_base, 'pch'))

        # Add arguments to force the appropriate compiler.
        cmd.append('CC=%s' % (opts.cc, ))
        cmd.append('CPLUSPLUS=%s' % (opts.cxx, ))

        # We need to force this variable here because Xcode has some completely
        # broken logic for deriving this variable from the compiler
        # name. <rdar://problem/7989147>
        cmd.append('LD=%s' % (opts.ld, ))
        cmd.append('LDPLUSPLUS=%s' % (opts.ldxx, ))

        # Force off the static analyzer, in case it was enabled in any projects
        # (we don't want to obscure what we are trying to time).
        cmd.append('RUN_CLANG_STATIC_ANALYZER=NO')

        # Inhibit all warnings, we don't want to count the time to generate
        # them against newer compilers which have added (presumably good)
        # warnings.
        cmd.append('GCC_WARN_INHIBIT_ALL_WARNINGS=YES')

        # Add additional arguments to force the build scenario we want.
        cmd.extend(('-jobs', str(num_jobs)))

        # If the user specifies any additional options to be included on the
        # command line, append them here.
        cmd.extend(build_info.get('extra_args', []))

        # If the user specifies any extra environment variables, put
        # them in our env dictionary.
        env_format = {'build_base': build_base}
        extra_env = build_info.get('extra_env', {})
        for k in extra_env:
            extra_env[k] = extra_env[k] % env_format
        env.update(extra_env)

        # Create preprocess cmd
        preprocess_cmd = 'rm -rf "%s"' % (build_base, )

    elif build_info['style'] == 'make':
        # Get the subdirectory in Source where our sources exist.
        src_dir = os.path.dirname(os.path.join(source_path,
                                               build_info['file']))
        # Grab our config from build_info. This is config is currently only
        # used in the make build style since Xcode, the only other build style
        # as of today, handles changing configuration through the configuration
        # type variables.  Make does not do this so we have to use more brute
        # force to get it right.
        config = build_info.get('config', {}).get(build_config, {})

        # Copy our source directory over to build_base.
        # We do this since we assume that we are processing a make project
        # which has already been configured and so that we do not need to worry
        # about make install or anything like that. We can just build the
        # project and use the user supplied path to its location in the build
        # directory.
        copied_src_dir = os.path.join(build_base, os.path.basename(dir_name))
        shutil.copytree(src_dir, copied_src_dir)

        # Create our make command.
        cmd.extend([
            'make', '-C', copied_src_dir, build_info['target'], "-j",
            str(num_jobs)
        ])

        # If the user specifies any additional options to be included on the
        # command line, append them here.
        cmd.extend(config.get('extra_args', []))

        # If the user specifies any extra environment variables, put
        # them in our env dictionary.

        # We create a dictionary for build_base so that users can use
        # it optionally in an environment variable via the python
        # format %(build_base)s.
        env_format = {'build_base': build_base}

        extra_env = config.get('extra_env', {})
        for k in extra_env:
            extra_env[k] = extra_env[k] % env_format
        env.update(extra_env)

        # Set build base to copied_src_dir so that if codesize_util
        # is not None, we pass it the correct path.
        build_base = copied_src_dir
        preprocess_cmd = 'rm -rf "%s"/build' % (build_base, )
        g_log.info('preprocess_cmd: %s' % preprocess_cmd)

    else:
        fatal("unknown build style in project: %r" % project)

    # Collect the samples.
    g_log.info('executing full build: %s' % args_to_quoted_string(cmd))
    stdout_path = os.path.join(output_base, "stdout.log")
    stderr_path = os.path.join(output_base, "stderr.log")

    for res in get_runN_test_data(name,
                                  variables,
                                  cmd,
                                  stdout=stdout_path,
                                  stderr=stderr_path,
                                  preprocess_cmd=preprocess_cmd,
                                  env=env):
        yield res

    # If we have a binary path, get the text size of our result.
    binary_path = build_info.get('binary_path', None)
    if binary_path is not None and codesize_util is not None:
        tname = "%s.size" % (name, )
        success = False
        samples = []

        try:
            # We use a dictionary here for our formatted processing of
            # binary_path so that if the user needs our build config he can get
            # it via %(build_config)s in his string and if he does not, an
            # error is not thrown.
            format_args = {"build_config": build_config}
            cmd = codesize_util + [
                os.path.join(build_base, binary_path % format_args)
            ]
            if opts.verbose:
                g_log.info('running: %s' % " ".join("'%s'" % arg
                                                    for arg in cmd))
            result = subprocess.check_output(cmd).strip()
            if result != "fail":
                bytes = int(result)
                success = True

                # For now, the way the software is set up things are going to
                # get confused if we don't report the same number of samples
                # as reported for other variables. So we just report the size
                # N times.
                #
                # FIXME: We should resolve this, eventually.
                for i in range(variables.get('run_count')):
                    samples.append(bytes)
            else:
                g_log.warning('Codesize failed.')

        except OSError as e:
            if e.errno != errno.ENOENT:
                raise
            else:
                g_log.warning('Codesize failed with ENOENT.')
        yield (success, tname, samples)

    # Check that the file sizes of the output log files "make sense", and warn
    # if they do not. That might indicate some kind of non-determinism in the
    # test command, which makes timing less useful.
    stdout_sizes = []
    stderr_sizes = []
    run_count = variables['run_count']
    for i in range(run_count):
        iter_stdout_path = '%s.%d' % (stdout_path, i)
        iter_stderr_path = '%s.%d' % (stderr_path, i)
        if os.path.exists(iter_stdout_path):
            stdout_sizes.append(os.stat(iter_stdout_path).st_size)
        else:
            stdout_sizes.append(None)
        if os.path.exists(iter_stderr_path):
            stderr_sizes.append(os.stat(iter_stderr_path).st_size)
        else:
            stderr_sizes.append(None)

    if len(set(stdout_sizes)) != 1:
        g_log.warning(('test command had stdout files with '
                       'different sizes: %r') % stdout_sizes)
    if len(set(stderr_sizes)) != 1:
        g_log.warning(('test command had stderr files with '
                       'different sizes: %r') % stderr_sizes)

    # Unless cleanup is disabled, rerun the preprocessing command.
    if not opts.save_temps and preprocess_cmd:
        g_log.info('cleaning up temporary results')
        if os.system(preprocess_cmd) != 0:
            g_log.warning("cleanup command returned a non-zero exit status")
Exemple #15
0
    def diagnose(self):
        """Build a triage report that contains information about a test.

        This is an alternate top level target for running the test-suite.  It
        will produce a triage report for a benchmark instead of running the
        test-suite normally. The report has stuff in it that will be useful
        for reproducing and diagnosing a performance change.
        """
        assert self.opts.only_test, "We don't have a benchmark to diagnose."
        bm_path, short_name = self.opts.only_test
        assert bm_path, "The benchmark path is empty?"

        report_name = "{}.report".format(short_name)
        # Make a place for the report.
        report_path = os.path.abspath(report_name)

        # Overwrite the report.
        if os.path.exists(report_path):
            shutil.rmtree(report_path)
        os.mkdir(report_path)

        path = self._base_path
        mkdir_p(path)
        os.chdir(path)

        # Run with -save-temps
        cmd = self._configure(path, execute=False)
        cmd_temps = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-save-temps']

        logger.info(' '.join(cmd_temps))

        out = subprocess.check_output(cmd_temps)
        logger.info(out)

        # Figure out our test's target.
        make_cmd = [self.opts.make, "VERBOSE=1", 'help']

        make_targets = subprocess.check_output(make_cmd)
        matcher = re.compile(r"^\.\.\.\s{}$".format(short_name),
                             re.MULTILINE | re.IGNORECASE)
        if not matcher.search(make_targets):
            assert False, "did not find benchmark, nestsed? Unimplemented."

        local_path = os.path.join(path, bm_path)

        make_deps = [self.opts.make, "VERBOSE=1", "timeit-target",
                     "timeit-host", "fpcmp-host"]
        logger.info(" ".join(make_deps))
        p = subprocess.Popen(make_deps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        logger.info(std_out)

        make_save_temps = [self.opts.make, "VERBOSE=1", short_name]
        logger.info(" ".join(make_save_temps))
        p = subprocess.Popen(make_save_temps,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        std_out, std_err = p.communicate()
        logger.info(std_out)
        with open(report_path + "/build.log", 'w') as f:
            f.write(std_out)
        # Executable(s) and test file:
        shutil.copy(os.path.join(local_path, short_name), report_path)
        shutil.copy(os.path.join(local_path, short_name + ".test"),
                    report_path)
        # Temp files are in:
        temp_files = os.path.join(local_path, "CMakeFiles",
                                  short_name + ".dir")

        save_temps_file = ["/*.s", "/*.ii", "/*.i", "/*.bc"]
        build_files = ["/*.o", "/*.time", "/*.cmake", "/*.make",
                       "/*.includecache", "/*.txt"]
        self._cp_artifacts(local_path, report_path, save_temps_file)
        self._cp_artifacts(temp_files, report_path, build_files)

        # Now lets do -ftime-report.
        cmd_time_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-ftime-report']

        logger.info(' '.join(cmd_time_report))

        out = subprocess.check_output(cmd_time_report)
        logger.info(out)

        make_time_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_time_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/time-report.txt", 'w') as f:
            f.write(std_err)
        logger.info("Wrote: " + report_path + "/time-report.txt")

        # Now lets do -llvm -stats.
        cmd_stats_report = cmd + ['-DTEST_SUITE_DIAGNOSE_FLAGS=-mllvm -stats']

        logger.info(' '.join(cmd_stats_report))

        out = subprocess.check_output(cmd_stats_report)
        logger.info(out)

        make_stats_report = [self.opts.make, "VERBOSE=1", short_name]
        p = subprocess.Popen(make_stats_report,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        std_out, std_err = p.communicate()

        with open(report_path + "/stats-report.txt", 'w') as f:
            f.write(std_err)
        logger.info("Wrote: " + report_path + "/stats-report.txt")

        #  Collect Profile:
        if "Darwin" in platform.platform():
            # For testing and power users, lets allow overrides of how sudo
            # and iprofiler are called.
            sudo = os.getenv("SUDO_CMD", "sudo")
            if " " in sudo:
                sudo = sudo.split(" ")
            if not sudo:
                sudo = []
            else:
                sudo = [sudo]
            iprofiler = os.getenv("IPROFILER_CMD",
                                  "iprofiler -timeprofiler -I 40u")

            cmd_iprofiler = cmd + ['-DTEST_SUITE_RUN_UNDER=' + iprofiler]
            print ' '.join(cmd_iprofiler)

            out = subprocess.check_output(cmd_iprofiler)

            os.chdir(local_path)
            make_iprofiler_temps = [self.opts.make, "VERBOSE=1", short_name]
            p = subprocess.Popen(make_iprofiler_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            logger.warning("Using sudo to collect execution trace.")
            make_save_temps = sudo + [self.opts.lit, short_name + ".test"]
            p = subprocess.Popen(make_save_temps,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            std_out, std_err = p.communicate()
            sys.stdout.write(std_out)
            sys.stderr.write(std_err)
            logger.warning("Tests may fail because of iprofiler's output.")
            # The dtps file will be saved as root, make it so
            # that we can read it.
            chmod = sudo + ["chown", "-R", getpass.getuser(),
                            short_name + ".dtps"]
            subprocess.call(chmod)
            profile = local_path + "/" + short_name + ".dtps"
            shutil.copytree(profile, report_path + "/" + short_name + ".dtps")
            logger.info(profile + "-->" + report_path)
        else:
            logger.warning("Skipping execution profiling because " +
                           "this is not Darwin.")
        logger.info("Report produced in: " + report_path)

        return lnt.util.ImportData.no_submit()
Exemple #16
0
 def _configure_if_needed(self):
     mkdir_p(self._base_path)
     if not self.configured:
         self._configure(self._base_path)
         self._clean(self._base_path)
         self.configured = True