def CheckRequiredVariables(options):
    """Check to make sure all the required input variables are defined."""

    if options.app_version == '':
        msg.PrintAndExit("Required parameter \'app_version\' not found.\n"
                         "This must be defined in order to run the script.")
    if options.compiler_version == '':
        msg.PrintAndExit("Required parameter \'compiler_version\' not found.\n"
                         "This must be defined in order to run the script.")
    if options.input_name == '':
        msg.PrintAndExit("Required parameter \'input_name\' not found.\n"
                         "This must be defined in order to run the script.")
    if options.program_name == '':
        msg.PrintAndExit("Required parameter \'program_name\' not found.\n"
                         "This must be defined in order to run the script.")
    if options.platform == '':
        msg.PrintAndExit("Required parameter \'platform\' not found.\n"
                         "This must be defined in order to run the script.")

    # Check for any illegal char in these fields.
    #
    CheckField(options.app_version)
    CheckField(options.compiler_version)
    CheckField(options.input_name)
    CheckField(options.program_name)
    CheckField(options.platform)
Exemple #2
0
    def SetPinTool(self, user_pintool, pinball=''):
        """
        Set the pintool to the users tool instead of the default for this kit.

        User can give either an explicit path to the tool or put the tool in
        the architecture dependent directory.  In either case, check to make
        sure the pintool exists.

        @param user_pintool User defined pintool to use in this kit
        @param pinball Optional - pinball kit is processing

        @return
        """

        if os.path.isfile(os.path.realpath(user_pintool)):
            self.pintool = user_pintool
        else:
            # If pinball is given, use it to find the architecture specific directory,
            # othwise just use the parameter 'binary_type'.
            #
            if pinball:
                arch = util.FindArchitecture(pinball)
                tool = os.path.join(self.ArchSpecificDir(arch), user_pintool)
            else:
                tool = os.path.join(self.ArchSpecificDir(self.binary_type),
                                    user_pintool)
            if not os.path.isfile(os.path.realpath(tool)):
                msg.PrintAndExit('Could not find user defined pintool: ' +
                                 user_pintool)
            self.pintool = user_pintool
def GetOptions():
    """
    Get users command line options/args and check to make sure they are correct.

    @return List of options and one argument: base name for traces
    """

    version = '$Revision: 1.7 $'
    version = version.replace('$Revision: ', '')
    ver = version.replace(' $', '')
    us = '%prog [options] trace_base_name'
    desc = 'Generate the traceinfo XML files for a set of traces.'

    util.CheckNonPrintChar(sys.argv)
    parser = optparse.OptionParser(usage=us, version=ver, description=desc)

    # Command line options to control script behavior.
    #
    # import pdb;  pdb.set_trace()
    # cmd_options.weight_file(parser, '')

    # Parse command line options and get any arguments.
    #
    (options, args) = parser.parse_args()

    # Added method cbsp() to 'options' to check if running CBSP.
    #
    util.AddMethodcbsp(options)

    # Is the required argument given on the command line.
    #
    if len(args) < 1:
        msg.PrintAndExit(
            'Not enough arguments given to script.  Use -h to get help')

    # Check to make sure the required 'blank' XML file are in the
    # current directory.
    #
    #import pdb;  pdb.set_trace()
    for blank_file in config.traceinfo_blank_files:
        if not os.path.isfile(blank_file):
            msg.PrintAndExit(
                'Required \'blank\' xml traceinfo file not found.\n     ' +
                blank_file)

    return options, args
Exemple #4
0
    def Replay(self, param, dirname, filename):
        """
        Run the Simulator on a single LIT file given the command line options and the name of
        the file to run. It formats the appropriate command line options,
        saves global variables in a pickle file & calls the Sim run script.
        """

        # import pdb ; pdb.set_trace()
        if 'options' in param:
            options = param['options']
        else:
            msg.PrintAndExit(
                'method replay_dir.Replay() failed to get param \'options\'')
        basename_file = os.path.join(dirname, filename)
        if config.verbose:
            msg.PrintMsg("-> Replaying pinball \"" + basename + "\"")

        cmd = self.replayer_cmd + ' ' + basename_file

        # Check to see if need to add options passed to the Sim run script.
        # These are NOT parameters, so they don't get passed in the global
        # variables.
        #
        if options.sim_options:
            cmd += ' --sim_options ' + options.sim_options
        if options.verify:
            cmd += ' --verify'

        # Add the configuration file, if one exists and print the cmd if just
        # debugging.
        #
        # import pdb ; pdb.set_trace()
        cmd += util.AddCfgFile(options)
        if options.debug:
            msg.PrintMsg(cmd)

        result = 0
        if not options.debug:

            # Dump the global data to a unique file name.  Need to add the
            # option --global_file with this unique file name to options when
            # calling a script.
            #
            gv = config.GlobalVar()
            cmd += util.AddGlobalFile(gv.DumpGlobalVars(), options)

            # import pdb ; pdb.set_trace()
            if not options.list:
                result = util.RunCmd(
                    cmd, options, filename,
                    concurrent=True)  # Run concurrent jobs here
            else:
                msg.PrintMsg(cmd)

        return result
Exemple #5
0
    def GetNullapp(self, basename):
        """
        Get the path to the nullapp for the required platform and architecture.

        @param basename Basename (file name w/o extension) of pinball to process

        @return Explicit path to nullapp
        """

        # Get explicit path to the correct nullapp for this arch.
        #
        arch = util.FindArchitecture(basename)
        nullapp_path = os.path.join(self.ArchSpecificDir(arch), self.nullapp)

        # import pdb;  pdb.set_trace()
        platform = util.Platform()
        if platform == config.WIN_CYGWIN:
            # Need to get path to nullapp using Windows format.  This is required
            # because SDE is a native Windows app and requires the path to be
            # in Windows format.  However, the path set above is in Cygwin format,
            # hence it must be converted.
            #
            try:
                nullapp_path = subprocess.check_output(
                    ['cygpath', '-w', nullapp_path])
            except (subprocess.CalledProcessError, OSError):
                msg.PrintAndExit(
                    'Could not get a valid Windows path from the Cygwin path to nullapp'
                )

            # Use forward slashes for the directory separator in the Windows path
            # (which is acceptable) because backslashes are treated as the escape character.
            #
            nullapp_path = nullapp_path.replace('\\', '/')
            nullapp_path = nullapp_path.rstrip()

        # Final check to ensure it's a valid nullapp binary
        #
        if not os.path.isfile(nullapp_path):
            msg.PrintAndExit('Unable to find valid nullapp')

        return nullapp_path
Exemple #6
0
def CheckGdbVersion():
    """
    Get the version of GDB and check to make sure it's a version which can
    be used for DrDebug.  Exit if the version of GDB is too old to run
    with DrDebug.

    @return no return
    @return NOTE: side effect sets global gdb_path
    """

    global gdb_path

    # Get GDB path
    #
    # import pdb;  pdb.set_trace()
    gdb_path = util.Which('gdb')
    if not gdb_path:
        parser.error('gdb not found in PATH')

    # Path to GDB and version info
    #
    cmd = gdb_path + ' --version'
    p = subprocess.Popen(cmd,
                         shell=True,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    (stdout, stderr) = p.communicate()

    # Parse the version string to get the actual version number.
    # Here's an example of the entire version string:
    #
    #   $ gdb --version
    #   GNU gdb (GDB) Red Hat Enterprise Linux (7.2-60.el6)
    #   Copyright (C) 2010 Free Software Foundation, Inc.
    #   License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
    #   This is free software: you are free to change and redistribute it.
    #   There is NO WARRANTY, to the extent permitted by law.  Type "show copying"
    #   and "show warranty" for details.
    #   This GDB was configured as "x86_64-redhat-linux-gnu".
    #   For bug reporting instructions, please see:
    #   <http://www.gnu.org/software/gdb/bugs/>.
    #
    line = stdout.split('\n', 1)[0]
    last = line.split()[-1]
    float_pattern = '[1-9][0-9]*\.?[0-9]*'
    f = re.search(float_pattern, last)
    version = f.group(0)

    # Check to make sure it's at least the base version required for DrDebug.
    #
    if float(version) < float(config.gdb_base_version):
        msg.PrintAndExit('gdb must be at least version: ' +
                         config.gdb_base_version + '\nVersion found was:  ' +
                         version + '\nPath to gdb binary: ' + gdb_path)
Exemple #7
0
def ScaleCombine(options):
    """
    Scale each vector in the BBV and LDV normalized matrices and concatenate
    them into a new vector.

    @return none
    """

    result_matrix = []

    # Get the two normalized input matrices and make sure they
    # have the same number of rows (i.e. slices).
    #
    bbv_matrix = ReadVectorFile(options.normal_bbv)
    ldv_matrix = ReadVectorFile(options.normal_ldv)
    num_rows = len(bbv_matrix)
    if num_rows != len(ldv_matrix):
        msg.PrintAndExit(
            'Normalized BBV and LDV matrices have a different number of rows.')

    bbv_scale = options.combine
    ldv_scale = 1.0 - bbv_scale
    index = 0
    while index < num_rows:
        vector_sum = 0.0
        tmp_vector = []
        new_vector = []
        bbv_vector = bbv_matrix[index]
        ldv_vector = ldv_matrix[index]

        # Scale each of input vectors.
        #
        for value in bbv_vector:
            value = value * bbv_scale
            tmp_vector.append(value)
        #    vector_sum += value
        for value in ldv_vector:
            value = value * ldv_scale
            tmp_vector.append(value)

        # This is for normalization after the summation, which is not needed.
        # Code kept just in case.
        #
        #    vector_sum += value
        # for value in tmp_vector:
        #     new_vector.append(value/vector_sum)
        # result_matrix.append(new_vector)

        # import pdb;  pdb.set_trace()
        result_matrix.append(tmp_vector)
        index += 1

    PrintVectorFile(result_matrix)
def OpenFile(fl, type_str):
    """
    Check to make sure a file exists and open it.

    @return file pointer
    """

    # import pdb;  pdb.set_trace()
    if not os.path.isfile(fl):
        msg.PrintAndExit('File does not exist: %s' % fl)
    fp = util.OpenCompressFile(fl)
    if fp == None:
        err_msg(type_str + fl)

    return fp
def CheckField(string):
    """
    Check to make sure a string doesn't contain any characters the GTR does not allow.

    Char '.' and '_' are not allowed as field names for files in the GTR.
    """

    err_msg = lambda string, char: msg.PrintAndExit("String '%s' contains the illegal "\
                "character '%s'.\n" \
                "Characters '.' and '_' are NOT allowed in GTR file names." % (string, char))

    char = '.'
    if string.find(char) != -1:
        err_msg(string, char)
    char = '_'
    if string.find(char) != -1:
        err_msg(string, char)
Exemple #10
0
def SetGdbCmdFile():
    """
    Get a user specific file name for the commands which will be used to invoke
    GDB.  This is a 'hidden' file which starts with the char '.'.

    @return file name
    """

    config.gdb_cmd_file = '.gdb.cmd.%s' % (getpass.getuser())

    # Create a new file or truncate the file if it already exists.
    #
    try:
        open(config.gdb_cmd_file, 'w').close()
    except:
        msg.PrintAndExit('Unable to open gdb command file: ' +
                         config.gdb_cmd_file)
Exemple #11
0
    def ArchSpecificDir(self, arch):
        """
        Get the architecture dependent directory where the pintools are located.

        @param arch Architecture of the binary/pinball kit is using

        @return Explicit path to directory
        """

        # import pdb;  pdb.set_trace()
        pintool_path = os.path.join(self.path, self.pin_dir)
        if arch == config.ARCH_IA32:
            pintool_path = os.path.join(pintool_path, 'ia32')
        elif arch == config.ARCH_INTEL64:
            pintool_path = os.path.join(pintool_path, 'intel64')
        else:
            msg.PrintAndExit('Could not identify the architecture of the pintools to run.\n' \
                'Perhaps you forgot to set the binary type using the parameter \'mode\'.')

        return pintool_path
Exemple #12
0
def GdbInitialize(options, check_version=True):
    """
    Ensure the correct version of GDB is installed and initialize the GDB command file.

    Need to ensure running on a system with more than 1 core.  Must do this
    because scripts need to be able to run jobs in the background.  This is not
    possible if there's only 1 core.  [See util.RunCmd() for details]  This is
    only required for the GDB versions of the DrDebug scripts.

    @return no return
    """

    CheckGdb(options, check_version)
    SetGdbCmdFile()

    if util.NumCores() < 2:
        msg.PrintAndExit(
            """Unable to use GDB version of DrDebug scripts on systems with
              only 1 core.  If running on a VM with only one core, please reconfigure
              to provide at least 2 cores.""")
Exemple #13
0
class Kit(object):
    """Setup the path and pintools for the PinPlay kit."""

    # First, initalize all the variables in the kit to default values.
    #

    # Path to the top level directory of the kit.
    #
    path = ''

    # What type of a kit is this.
    #
    kit_type = config.PINPLAY

    # Chose the appropriate Pin binary/tool for this platform.
    #
    platform = util.Platform()
    if platform is None:
        msg.PrintAndExit(
            'Could not identify the OS of the system where scripts are being run.'
        )
    if platform == config.WIN_NATIVE or platform == config.WIN_CYGWIN:
        # Windows
        pinbin = 'pin.exe'
        pintool = 'pinplay-driver.dll'  # Need to verify this is the correct name
        nullapp = 'nullapp.exe'
    else:
        # Linux
        pinbin = 'pinbin'
        pintool = 'pinplay-driver.so'
        nullapp = 'nullapp'

    # Some definitions for the kit.
    #
    default_dir = 'pinplay'
    pin = 'pin'
    type = 'PinPlay'
    prefix = ''

    # In case there are any default knobs needed for this pintool.
    #
    default_knobs = ''

    # Path to the Pin binary itself for both architectures.
    #
    pin_dir = os.path.join('extras', 'pinplay', 'bin')

    # Paths to the PinPlay driver for both architectures.
    #
    pinbin_intel64 = os.path.join('intel64', 'bin', pinbin)
    pinbin_ia32 = os.path.join('ia32', 'bin', pinbin)

    # Paths to the Pin binary itself for both architectures.
    #
    driver_intel64 = os.path.join(pin_dir, 'intel64', 'pinplay-driver.so')
    driver_ia32 = os.path.join(pin_dir, 'ia32', 'pinplay-driver.so')

    # Path to the shell scripts in this kit
    #
    script_path = config.pin_script_path

    # Path to simpoint
    #
    simpoint_path = os.path.join('extras', 'pinplay', 'PinPoints', 'bin')

    # Knobs which have the same behavior in the various kits, but a different
    # name in each kit.
    #
    knob_length = '-log:length'
    knob_skip = '-log:skip'
    knob_regions_epilog = '-log:regions:epilog'
    knob_regions_in = '-log:regions:in'
    knob_regions_out = '-log:regions:out'
    knob_regions_prolog = '-log:regions:prolog'
    knob_regions_warmup = '-log:regions:warmup'
    knob_pcregions_in = '-log:pcregions:in'
    knob_pcregions_out = '-log:pcregions:out'
    knob_pcregions_merge_warmup = '-log:pcregions:merge_warmup'

    # Is the binary 32-bit or 64-bit?  Only needed for the logging phase.
    #
    binary_type = config.ARCH_INVALID

    def __init__(self):
        """
        Method called when object is instantiated to initalize object.

        @return No return value
        """

        self.InitKit(self.script_path)

    def SetBinaryType(self, binary):
        """
        Set the file type: either 32-bit or 64-bit.

        @param binary Binary used to determine type

        @return No return value
        """

        self.binary_type = util.FileType(binary)

    def ValidDriver(self, path):
        """
        Is this a path to a kit with a valid pinplay driver?

        @param path Path to kit to be validated

        @return True if valid drivers found, otherwise exit with an error msg
        """

        # See if the 64-bit driver exists
        #
        # import pdb;  pdb.set_trace()
        arch = 'intel64'
        if os.path.isfile(os.path.join(path, self.driver_intel64)):

            # See if the 32-bit driver exists
            #
            arch = 'ia32'
            if os.path.isfile(os.path.join(path, self.driver_ia32)):
                return True

        # There is a valid 'pinbin' binary, or this method wouldn't get called, but
        # there isn't a valid pinplay-driver.
        #
        msg.PrintMsg('ERROR: The required PinTool \'' + self.pintool +
                     '\' for arch \'' + arch + '\' was not found.')
        msg.PrintMsg('Perhaps the PinPlay kit installation was incomplete. Check to make sure\n' \
            'there weren\'t any errors during the install.')
        sys.exit(1)

    def ValidKit(self, path):
        """
        Is this a path to a valid kit?

        A valid kit must contain both the binary 'pinbin' and the
        PinPlay driver 'pintool' for both intel64 and ia32.

        @param path Path to kit to be validated

        @return False if kit not valid, else the return value from self.ValidDriver()
        """

        if os.path.isdir(path):

            # See if the 64-bit pinbin binary exists
            #
            if os.path.isfile(os.path.join(path, self.pinbin_intel64)):

                # See if the 32-bit pinbin binary exists
                #
                if os.path.isfile(os.path.join(path, self.pinbin_ia32)):
                    return self.ValidDriver(path)
        return False

    def GetHomeDir(self):
        """
        Get the location defined by the user's 'home' parameter/option.
        """

        return config.pinplayhome

    def GetKitLocation(self, script_path):
        """
        Look for a kit in several locations, including the 'home' directory, if it's defined.

        @param script_path Path to scripts directory in a kit

        @return Path to PinPlay kit
        """

        # Get path to the default version of the kit in users
        # home directory.
        #
        # import pdb;  pdb.set_trace()
        home = os.path.expanduser("~")
        path = os.path.join(home, self.default_dir)

        # If default dir name not found in home directory, then try the default
        # in the current directory.
        #
        if not os.path.exists(path):
            path = os.path.join(os.getcwd(), self.default_dir)

        # If default dir name is not found in the current directory, then check
        # to see if this Python script resides in a valid kit.  If so, then use
        # this as the kit location.  Assume if the scripts are in a valid kit,
        # they reside in the directory:  $PINPLAYHOME/script_path,  where
        # PINPLAYHOME is the root directory of the kit.
        #
        if not os.path.exists(path):
            script_dir = util.GetScriptDir()
            base_dir = script_dir.replace(script_path, '')
            if base_dir != script_dir:
                path = base_dir

        # If a 'home' directory is given for the kit, override any kit
        # locations just discovered and use the location given in this
        # parameter.
        #
        kit_home_dir = self.GetHomeDir()
        if kit_home_dir:
            if kit_home_dir[0] == os.sep:
                # Absolute path name, use as is.
                #
                path = kit_home_dir
            else:
                # Else assume it's a directory in the users home directory.
                #
                path = os.path.join(home, kit_home_dir)

        return path

    def InitKit(self, script_path):
        """
        Get the path to a valid kit, the appropriate tool name and add several paths
        to the environment variable PATH required to find script/utilities.

        @param script_path Path to scripts directory in a kit

        @return No return value
        """

        self.path = self.GetKitLocation(script_path)

        # Check to see if it's a valid kit. If not, exit with an error.
        #
        if not self.ValidKit(self.path):
            msg.PrintMsg('ERROR: Path to the ' + self.type +
                         ' kit was not found.')
            msg.PrintMsg('Default kit location is: ' + \
                os.path.realpath(os.path.join(os.path.expanduser("~"), self.default_dir)))
            sys.exit(1)

        # Add several directories in the PinPlay kit to the environment variable PATH.
        #
        os.environ["PATH"] += os.pathsep + os.path.join(
            self.path, self.script_path)
        if self.simpoint_path != self.script_path:
            os.environ["PATH"] += os.pathsep + os.path.join(
                self.path, self.simpoint_path)

    def ArchSpecificDir(self, arch):
        """
        Get the architecture dependent directory where the pintools are located.

        @param arch Architecture of the binary/pinball kit is using

        @return Explicit path to directory
        """

        # import pdb;  pdb.set_trace()
        pintool_path = os.path.join(self.path, self.pin_dir)
        if arch == config.ARCH_IA32:
            pintool_path = os.path.join(pintool_path, 'ia32')
        elif arch == config.ARCH_INTEL64:
            pintool_path = os.path.join(pintool_path, 'intel64')
        else:
            msg.PrintAndExit('Could not identify the architecture of the pintools to run.\n' \
                'Perhaps you forgot to set the binary type using the parameter \'mode\'.')

        return pintool_path

    def SetPinTool(self, user_pintool, pinball=''):
        """
        Set the pintool to the users tool instead of the default for this kit.

        User can give either an explicit path to the tool or put the tool in
        the architecture dependent directory.  In either case, check to make
        sure the pintool exists.

        @param user_pintool User defined pintool to use in this kit
        @param pinball Optional - pinball kit is processing

        @return
        """

        if os.path.isfile(os.path.realpath(user_pintool)):
            self.pintool = user_pintool
        else:
            # If pinball is given, use it to find the architecture specific directory,
            # othwise just use the parameter 'binary_type'.
            #
            if pinball:
                arch = util.FindArchitecture(pinball)
                tool = os.path.join(self.ArchSpecificDir(arch), user_pintool)
            else:
                tool = os.path.join(self.ArchSpecificDir(self.binary_type),
                                    user_pintool)
            if not os.path.isfile(os.path.realpath(tool)):
                msg.PrintAndExit('Could not find user defined pintool: ' +
                                 user_pintool)
            self.pintool = user_pintool

    def GetPinTool(self, pinball):
        """
        Get the path to the pintool for the required architecture.

        If a pinball is given to the method, figures out the correct
        architecture for the pintool from the pinball.

        @param pinball Pinball kit is processing

        @return Path to the pintool for this kit
        """

        # import pdb;  pdb.set_trace()
        if os.path.isfile(os.path.realpath(self.pintool)):
            # If the pintool already has an explicit path, possible if the user has defined the pintool,
            # then just use it as is.
            #
            pintool_path = self.pintool
        else:
            # Otherwise, assume the tool is in the architecture dependent pintool directory.
            #
            if pinball:
                arch = util.FindArchitecture(pinball)
            else:
                arch = self.binary_type
            pintool_path = os.path.join(self.ArchSpecificDir(arch),
                                        self.pintool)

        return pintool_path

    def GetPinToolKnob(self, pinball=''):
        """
        Get the knob required to add the pintool for this kit to the Pin command line.

        Some kits don't required a pintool knob.  If that the case, just return an empty string.
        Pin based kits require a pintool knob, so return it.

        @param pinball Optional - pinball kit is processing

        @return String, including '-t', which defines explict path to pintool
        """

        return ' -t ' + self.GetPinTool(pinball)

    def GetNullapp(self, basename):
        """
        Get the path to the nullapp for the required platform and architecture.

        @param basename Basename (file name w/o extension) of pinball to process

        @return Explicit path to nullapp
        """

        # Get explicit path to the correct nullapp for this arch.
        #
        arch = util.FindArchitecture(basename)
        nullapp_path = os.path.join(self.ArchSpecificDir(arch), self.nullapp)

        # import pdb;  pdb.set_trace()
        platform = util.Platform()
        if platform == config.WIN_CYGWIN:
            # Need to get path to nullapp using Windows format.  This is required
            # because SDE is a native Windows app and requires the path to be
            # in Windows format.  However, the path set above is in Cygwin format,
            # hence it must be converted.
            #
            try:
                nullapp_path = subprocess.check_output(
                    ['cygpath', '-w', nullapp_path])
            except (subprocess.CalledProcessError, OSError):
                msg.PrintAndExit(
                    'Could not get a valid Windows path from the Cygwin path to nullapp'
                )

            # Use forward slashes for the directory separator in the Windows path
            # (which is acceptable) because backslashes are treated as the escape character.
            #
            nullapp_path = nullapp_path.replace('\\', '/')
            nullapp_path = nullapp_path.rstrip()

        # Final check to ensure it's a valid nullapp binary
        #
        if not os.path.isfile(nullapp_path):
            msg.PrintAndExit('Unable to find valid nullapp')

        return nullapp_path
Exemple #14
0
class SDEKit(kit.Kit):
    """
    Get the path and pintool for an SDE kit
    """

    # Path to the top level directory of the kit.
    #
    path = ''

    # What type of a kit is this.
    #
    kit_type = config.SDE

    # Chose the appropriate Pin binary/tool for this platform.
    #
    platform = util.Platform()
    if platform is None:
        msg.PrintAndExit(
            'Could not identify the OS of the system where scripts are being run.')
    if platform == config.WIN_NATIVE or platform == config.WIN_CYGWIN:
        # Windows/Cygwin
        pinbin = 'pin.exe'
        pintool = 'sde-pinplay-driver.dll'
        nullapp = 'nullapp.exe'
        simpoint_path = os.path.join('pinplay-scripts', 'PinPointsHome',
                                     'Windows', 'bin')
    else:
        # Linux
        pinbin = 'pinbin'
        pintool = ''  # No pintool required for SDE, it has a default tool 'sde-mix-mt.so'.
        nullapp = 'nullapp'
        simpoint_path = os.path.join('pinplay-scripts', 'PinPointsHome',
                                     'Linux', 'bin')

    # Some definitions for the kit.
    #
    default_dir = 'SDE'
    pin = 'sde'
    type = 'SDE'
    prefix = '-p'

    # Any additional knobs required by the kit.  Usually not defined.
    #
    default_knobs = ''

    # Paths to the Pin binary itself for both architectures.
    #
    pin_dir = ''
    pinbin_intel64 = os.path.join('intel64', pinbin)
    pinbin_ia32 = os.path.join('ia32', pinbin)

    # Path to the shell scripts in this kit
    #
    script_path = config.sde_script_path

    # Knobs which have the same behavior in the various kits, but a different
    # name in each kit.

    # New names for knobs when the PinPlay controller is replaced by the SDE controler.
    #
    knob_length = '-length'
    knob_skip = '-skip'
    knob_regions_epilog = '-regions:epilog'
    knob_regions_in = '-regions:in'
    knob_regions_out = '-regions:out'
    knob_regions_prolog = '-regions:prolog'
    knob_regions_warmup = '-regions:warmup'
    knob_pcregions_in = '-pcregions:in'
    knob_pcregions_out = '-pcregions:out'
    knob_pcregions_merge_warmup = '-pcregions:merge_warmup'

    # Original knob names.
    #
    # knob_length = '-log:length'
    # knob_skip   = '-log:skip'
    # knob_regions_epilog = '-log:regions:epilog'
    # knob_regions_in     = '-log:regions:in'
    # knob_regions_out    = '-log:regions:out'
    # knob_regions_prolog = '-log:regions:prolog'
    # knob_regions_warmup = '-log:regions:warmup'

    def __init__(self):

        self.InitKit(self.script_path)

    def ValidDriver(self, path):
        """
        For SDE, make sure the 'simpoint' binary is in the kit instead of
        verifying the drivers are valid.

        No need to verify drivers for SDE, as the SDE tool doesn't use a
        pinplay driver any more.  (There is a unified controller.)

        @param path Path to kit to be validated

        @return True if Simpoint found, otherwise exit with an error msg
        """

        # Chose the appropriate Pin binary/tool for this platform.
        #
        platform = util.Platform()
        if platform == config.WIN_NATIVE or platform == config.WIN_CYGWIN:
            path = os.path.join(path, self.simpoint_path, 'simpoint.exe')
        else:
            path = os.path.join(path, self.simpoint_path, 'simpoint')
        if not os.path.isfile(path):
            msg.PrintMsg(
                'ERROR: The required binary \'simpoint\' was not found.')
            msg.PrintMsg('   ' + path)
            msg.PrintMsg('Perhaps the SDE kit installation was incomplete. Check to make sure\n' \
                'there weren\'t any errors during the install.')
            sys.exit(1)

        return True

    def GetHomeDir(self):
        """
        Get the location defined by the user's 'home' parameter/option.
        """

        return config.sdehome

    def GetPinTool(self, pinball):
        """
        Just return the Pintool.  Even thought SDE no longer requires a pintool
        (because of unified controller) keep this method just in case it's needed.

        @param pinball Pinball kit is processing

        @return Path to the pintool for this kit
        """

        return self.pintool

    def GetPinToolKnob(self, pinball=''):
        """
        SDE by default does not need a pintool.  However, if user defines one
        then need to return a knob which uses this tool.

        @param pinball Optional - pinball kit is processing

        @return Either empty string or knob with user defined pintool
        """

        if self.pintool:
            return ' -t ' + self.pintool
        else:
            return ''

    def GetTraceinfoBlank(self):
        """
        Get the location of the traceinfo 'blank' XML file fragments.

        Need to remove two levels of directories names from self.simpoint_path to
        get the location where the fragments reside.

        @return Path to blank file fragments
        """

        path = os.path.dirname(os.path.dirname(self.simpoint_path))
        return os.path.join(self.path, path)
Exemple #15
0
#!/usr/bin/env python
#
# $Id: pb_weight.py,v 1.4 2015/05/19 16:30:16 tmstall Exp tmstall $

import os, sys, optparse
import cmd_options, msg, util
"""
@package pb_weight

Parse the different fields in the region pinballs generated using the default
file naming scheme.  Print either just the file name and weight or a selected
subset of all the fields.
"""

err_msg = lambda string: msg.PrintAndExit('Unable to get weight for this file' + string + \
            '\nUse -h for help.')


def GetOptions():
    """
    Get users command line options/args and check to make sure they are correct.

    @return options, args
    """
    version = '$Revision: 1.4 $'
    version = version.replace('$Revision: ', '')
    ver = version.replace(' $', '')
    us = '%prog [options] pb_file_name [pb_file_name(s)]'
    desc = 'Print the weight defined in a region pinball file name. Can also ' \
           'print out a select subset of all the fields in the name.'
def ReadVectorFile(v_file):
    """
    Read in a matrix composed of a list of list of floating point values in the
    format required by simpoint.

    Format of 1st line:
        num_rows: w
            num_rows = number of rows in matrix
            'w' indicates there are weights for each vector

    Format of subsequent lines with vector information:
        weight num_dim: value, value ... value
            weight  = 1/num_rows (i.e. all vectors have an equal weight)
            num_dim = number of values per row
            value  = the matrix values

    Example input:
        162:w
        0.00617 15:  -0.07 0.00 0.33 -0.22 -0.30 0.32 -0.05 0.27 0.15 0.32 -0.24 0.30 0.12 0.25 0.17
        0.00617 15:  -0.00 0.30 0.63 -0.30 -0.22 0.83 -0.13 0.08 0.13 0.62 -0.34 0.67 0.10 0.31 0.36
        0.00617 15:  -0.00 0.30 0.63 -0.30 -0.22 0.83 -0.13 0.08 0.13 0.62 -0.34 0.67 0.10 0.31 0.36

    @return list of lists which is the matrix
    """

    matrix = []
    weights = False

    # Read in the header of the file and do some error checking.
    #
    fp = OpenFile(v_file, 'normalized frequency vector file: ')
    line = fp.readline()
    field = line.split(':')
    num_vect = field[0]
    if len(field) == 2:
        if not 'w' in field[1]:
            msg.PrintAndExit('Illegal char given as weight: ' + field[1])
        else:
            weights = True

    count = 0
    line = fp.readline()
    while True:
        if line == '':
            return matrix

        # Read in an optional weight, the number of values in the vector and the vector itself.
        #
        vector = []
        field = line.split()
        if weights:
            field = field[1:]
        if len(field) < 2:
            msg.PrintAndExit('Corrupted vector format:\n' + line)
        num_float = int(field[0].split(':')[0])
        if len(field)-1 != num_float:
            msg.PrintAndExit('Incorrect number of values in vector:\n' + line)
        field = field[1:]
        for value in field:
            vector.append(float(value))
        matrix.append(vector)
        count += 1

        line = fp.readline()

    # Make sure 'num_vect' vectors were read from the file.
    #
    if count != num_vect:
        msg.PrintAndExit('Incorrect number of values in vector:\n' + line)

    return matrix
Exemple #17
0
    def Run(self):
        """Run the scripts required to run simpoint and generate a region CSV file with the results."""

        msg.PrintMsg('')

        # Get the command line options
        #
        (options, args) = self.ParseCommandLine()

        # Make sure required utilities exist and are executable.
        #
        # import pdb;  pdb.set_trace()
        if util.Which(self.simpoint_bin) == None:
            msg.PrintAndExit('simpoint binary not in path.\n'
                             'Add directory where it exists to your path.')
        if util.Which(self.csv_bin) == None:
            msg.PrintAndExit(
                'script to generate the region CSV file not in path.\n'
                'Add directory where it exists to your path.')

        # Go to the data directory. Both utilities should be run in this directory.
        #
        os.chdir(options.data_dir)

        # Always copy the specific BBV to the generic name used by simpoint.
        # If LDV file exists, then copy it to generic name.
        #
        if os.path.isfile(self.generic_bbv_name):
            os.remove(self.generic_bbv_name)
        shutil.copy(options.bbv_file, self.generic_bbv_name)
        ldv_file = options.bbv_file.replace('.bb', '.ldv')
        if os.path.isfile(ldv_file):
            if os.path.isfile(self.generic_ldv_name):
                os.remove(self.generic_ldv_name)
            shutil.copy(options.bbv_file.replace('.bb', '.ldv'),
                        self.generic_ldv_name)

        # Get the instruction count and slice_size from the BBV file.
        #
        try:
            f = open(self.generic_bbv_name)
        except IOError:
            msg.PrintAndExit('problem opening BBV file: ' +
                             self.generic_bbv_name)
        instr_count = 0
        slice_size = 0
        for line in f.readlines():
            if line.find('Dynamic') != -1:
                tmp = line.split()
                count = tmp[len(tmp) - 1]
                if count > instr_count:
                    instr_count = int(count)
            if line.find('SliceSize') != -1:
                tmp = line.split()
                size = tmp[len(tmp) - 1]
                if size > slice_size:
                    slice_size = int(size)

        # Check to make sure instruction count > slice_size.
        #
        if slice_size > instr_count:
            import locale
            locale.setlocale(locale.LC_ALL, "")
            msg.PrintAndExit('Slice size is greater than the number of instructions.  Reduce parameter \'slice_size\'.' + \
                '\nInstruction count: ' + locale.format('%14d', int(instr_count), True) + \
                '\nSlice size:        ' + locale.format('%14d', int(slice_size), True))

        if options.ldv:
            # Run to generate regions CSV file using both BBV and LDV files.
            #
            result = self.NormProjectBBV(options)
            util.CheckResult(
                result, options,
                'normalizing and projecting BBV with: ' + self.csv_bin)
            result = self.NormWeightLDV(options)
            util.CheckResult(
                result, options,
                'normalizing and applying weights to LDV with: ' +
                self.csv_bin)
            result = self.CombineFreqVectFiles(options)
            util.CheckResult(
                result, options,
                'scaling and combining BBV and LDV with: ' + self.csv_bin)
            result = self.RunSimpoint(options)
            util.CheckResult(
                result, options,
                'generating clusters (regions) with: ' + self.simpoint_bin)
            result, regions_csv_file = self.GenRegionCSVFile(options)
            util.CheckResult(result, options,
                             'creating regions CSV file with: ' + self.csv_bin)
            msg.PrintMsg('\nRegions CSV file: ' +
                         os.path.join(options.data_dir, regions_csv_file))
        else:
            # Run scripts to generate regions CSV file using the new method with just the BBV file.
            #
            result = self.NormProjectBBV(options)
            util.CheckResult(
                result, options,
                'normalizing and projecting BBV with: ' + self.csv_bin)
            result = self.RunSimpoint(options)
            util.CheckResult(
                result, options,
                'generating clusters (regions) with: ' + self.simpoint_bin)
            result, regions_csv_file = self.GenRegionCSVFile(options)
            util.CheckResult(result, options,
                             'creating regions CSF file with: ' + self.csv_bin)
            msg.PrintMsg('\nRegions CSV file: ' +
                         os.path.join(options.data_dir, regions_csv_file))

        return result
Exemple #18
0
    def ParseCommandLine(self):
        """
        Process command line arguments and ensure they are valid.

        @return List of command line options
        """

        # import pdb ; pdb.set_trace()
        version = '$Revision: 1.78 $'
        version = version.replace('$Revision: ', '')
        ver = version.replace(' $', '')
        us = '%prog [options]\nVersion: ' + ver
        desc = 'Replays one, or more, pinball(s). Must use one of '\
               'the following options: \n'\
               '--replay_file, --all_file, --all_dir'

        util.CheckNonPrintChar(sys.argv)
        parser = optparse.OptionParser(usage=us, version=ver, description=desc)

        # Define the command line options which control the behavior of the
        # script.  Some of these methods take a 2nd argument which is the empty
        # string ''. If the script uses option groups, then this parameter is
        # the group. However, this script does not use option groups, so the
        # argument is empty.
        #
        cmd_options.debug(parser)
        cmd_options.verbose(parser)
        cmd_options.all_dir(parser)
        cmd_options.all_file(parser)
        cmd_options.config_file(parser)
        cmd_options.global_file(parser)
        cmd_options.list(parser, '')
        cmd_options.no_glob(parser)
        cmd_options.num_cores(parser, '')
        cmd_options.replay_dir(parser)
        cmd_options.replay_file(parser)
        cmd_options.replay_filter(parser)
        cmd_options.save_global(parser)

        self.AddAdditionalOptions(parser)

        # import pdb ; pdb.set_trace()
        (options, args) = parser.parse_args()

        # Added method cbsp() to 'options' to check if running CBSP.
        #
        util.AddMethodcbsp(options)

        # Read in configuration files and set global variables.
        # No need to read in a config file.
        #
        # import pdb;  pdb.set_trace()
        config_obj = config.ConfigClass()
        config_obj.GetCfgGlobals(options,
                                 False)  # Don't need to require 4 variables

        # Once the tracing configuration parameters are read, get the kit in
        # case pinplayhome was set on the command line.
        #
        self.kit_obj = self.GetKit()

        # Print out the version number
        #
        # import pdb;  pdb.set_trace()
        if config.debug:
            print(os.path.basename(sys.argv[0]) + " $Revision: 1.78 $")

        # Some error checking.
        #
        if options.replay_file == '' and options.all_dir == '' and options.all_file == '' and \
                options.replay_dir == '':
            msg.PrintAndExit(
                "Either a replay directory or replay file must be specified!")

        elif (options.all_dir != '' and options.all_file != '') or  \
             (options.all_dir != '' and options.replay_file != '') or \
             (options.all_file != '' and options.replay_file != ''):

            msg.PrintAndExit(
                "Specify either a replay directory or a replay file, not both!")

        return options
Exemple #19
0
def GetOptions():
    """
    Get users command line options/args and check to make sure they are correct.

    @return List of options and 3 file pointers for bbv, simpoint and weights files
    """

    version = '$Revision: 1.30 $'
    version = version.replace(' ', '')
    ver = version.replace(' $', '')
    us = '%prog [options] action file_name [file_name]'
    desc = 'Implements several actions used to process FV (Frequency Vector) files.  ' \
           'One action, and only one, must be defined in order for the script to run.  '\
           'All actions require at least one file name be given using an option. \n\n'\
           '' \
           'There are two types of frequency vector files:  '\
           '                                                            '\
           'BBV = Basic Block Vector, '\
           '                                                            '\
           'LDV = LRU stack Distance Vector'

    def combine(parser, group):
        """
        IMPORTANT NOTE:
        This is a local definition for the option which has more help
        information than the default defined in cmd_options.py.  This info is
        specific to this script and is not applicable to the other locations
        where the option is used.

        Default value for combine to 'none' instead of setting it to a value
        (as it is in cmd_options.py).  This allows the option to be used to
        determine what to do.

        @return  No return value
        """

        method = cmd_options.GetMethod(parser, group)
        method(
            "--combine",
            dest="combine",
            default=None,
            help=
            "Combine the vectors for BBV and LDV files into a single FV file, use scaling "
            "factor COMBINE (0.0 >= COMBINE <= 1.0).  The BB vectors "
            "are scaled by COMBINE, while the LD vectors are scaled by 1-COMBINE.  Default: 0.5  "
            "Assumes both files have already been transformed by the appropriate process "
            "(project/normal for BBV, weight/normal for LDV). "
            "Must use --normal_bbv and --normal_ldv to define files to process."
        )

    util.CheckNonPrintChar(sys.argv)
    parser = optparse.OptionParser(
        usage=us,
        version=ver,
        description=desc,
        formatter=cmd_options.BlankLinesIndentedHelpFormatter())

    cmd_options.dimensions(parser, '')
    cmd_options.focus_thread(parser, '')

    # Options which define the actions the script to execute
    #
    action_group = cmd_options.ActionGroup(parser)

    combine(parser, action_group)
    cmd_options.csv_region(parser, action_group)
    cmd_options.project_bbv(parser, action_group)
    cmd_options.weight_ldv(parser, action_group)

    parser.add_option_group(action_group)

    # Options which list the files the script can process
    #
    # import pdb;  pdb.set_trace()
    file_group = cmd_options.FileGroup(parser)

    cmd_options.bbv_file(parser, file_group)
    cmd_options.ldv_file(parser, file_group)
    cmd_options.normal_bbv(parser, file_group)
    cmd_options.normal_ldv(parser, file_group)
    cmd_options.region_file(parser, file_group)
    cmd_options.vector_file(parser, file_group)
    cmd_options.weight_file(parser, file_group)

    parser.add_option_group(file_group)

    # Parse command line options and get any arguments.
    #
    (options, args) = parser.parse_args()

    # Added method cbsp() to 'options' to check if running CBSP.
    #
    util.AddMethodcbsp(options)

    def TrueXor(*args):
        """Return xor of some booleans."""
        return sum(args) == 1

    # Must have one, and only one, action on command line.
    #
    # import pdb;  pdb.set_trace()
    if not TrueXor(options.csv_region, options.project_bbv, options.weight_ldv, \
       options.combine != None, options.vector_file != None):
        msg.PrintAndExit(
            'Must give one, and only one, action for script to execute.\n'
            'Use -h to get help.')

    # Check to see if options required for the various actions are given.
    #
    file_error = lambda file, action: msg.PrintAndExit("Must use option '" + file + \
        "' to define the file to use with '" + action + "'.   \nUse -h for help.")

    # import pdb;  pdb.set_trace()
    fp_bbv = fp_ldv = fp_simp = fp_weight = None

    if options.combine:
        # Check to make sure the option 'combine' is an acceptable value.  If so, then turn it into a float.
        #
        util.CheckCombine(options)
        options.combine = float(options.combine)

        # Then check to make sure required files are given.
        #
        if not options.normal_bbv:
            file_error('--normal_bbv', '--combine')
        if not options.normal_ldv:
            file_error('--normal_ldv', '--combine')
        fp_bbv = OpenNormalFVFile(options.normal_bbv,
                                  'projected, normalized BBV file: ')
        fp_ldv = OpenNormalFVFile(options.normal_ldv,
                                  'projected, normalized BBV file: ')

    if options.csv_region:
        if not options.bbv_file:
            file_error('--bbv_file', '--csv_region')
        if not options.region_file:
            file_error('--region_file', '--csv_region')
        if not options.weight_file:
            file_error('--weight_file', '--csv_region')
        fp_bbv = OpenFVFile(options.bbv_file,
                            'Basic Block Vector (bbv) file: ')
        fp_simp = OpenSimpointFile(options.region_file, 'simpoints file: ')
        fp_weight = OpenWeightsFile(options.weight_file, 'weights file: ')

    if options.project_bbv:
        if not options.bbv_file:
            file_error('--bbv_file', '--project_bbv')
        fp_bbv = OpenFVFile(options.bbv_file,
                            'Basic Block Vector (bbv) file: ')

    if options.weight_ldv:
        if not options.ldv_file:
            file_error('--ldv_file', '--weight_ldv')
        fp_ldv = util.OpenCompressFile(options.ldv_file)

    return (options, fp_bbv, fp_ldv, fp_simp, fp_weight)
Exemple #20
0
def CheckGdb(options, check_version=True):
    """
    Check GDB to see if it's compatible with the DrDebug environment.

    Make sure GDB is built with Python support and, if required, GDB version is
    new enough.  A base version of GDB is required because PinADX uses a new
    shared-library related remote command (only available in/after version 7.4)
    to convey shared library load locations etc to GDB.

    Prints a warning message if either check fails, but continues to run.

    NOTE: Sets global attribute gdb_path

    @return no return
    """
    def RunCmd(cmd):
        """
        Run a command and return stdout/stderrr.
        """

        p = subprocess.Popen(cmd,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        stdout, stderr = p.communicate()

        return stdout, stderr

    global gdb_path

    # Get GDB from either the user option or the current PATH.
    #
    # import pdb;  pdb.set_trace()
    if options.gdb:
        gdb_path = util.Which(options.gdb)
    else:
        gdb_path = util.Which('gdb')
    if not gdb_path:
        if options.gdb:
            msg.PrintAndExit('gdb binary not found: ' + options.gdb)
        else:
            msg.PrintAndExit('gdb not found in PATH')

    # Is there Python support in GDB?
    #
    err_str = 'Python scripting is not supported in this copy of GDB'
    cmd = gdb_path + ' --batch -ex \'python print "hello"\''
    stdout, stderr = RunCmd(cmd)
    if err_str in stderr:
        python_support = False
        msg.PrintMsg(
            '\nWARNING: This version of gdb (%s) does not support Python.\n'
            'As a result, \'pin\' commands will not work.  Try \'monitor\' versions\n'
            'of the commands instead.\n' % (gdb_path))
    else:
        python_support = True

    if not check_version:
        return

    # Get version info from gdb
    #
    cmd = gdb_path + ' --version'
    stdout, stderr = RunCmd(cmd)

    # Parse the version string to get the actual version number.
    # Here's an example of the entire version string:
    #
    #   $ gdb --version
    #   GNU gdb (GDB) Red Hat Enterprise Linux (7.2-60.el6)
    #   Copyright (C) 2010 Free Software Foundation, Inc.
    #   License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
    #   This is free software: you are free to change and redistribute it.
    #   There is NO WARRANTY, to the extent permitted by law.  Type "show copying"
    #   and "show warranty" for details.
    #   This GDB was configured as "x86_64-redhat-linux-gnu".
    #   For bug reporting instructions, please see:
    #   <http://www.gnu.org/software/gdb/bugs/>.
    #
    line = stdout.split('\n', 1)[0]
    last = line.split()[-1]
    float_pattern = '[1-9][0-9]*\.?[0-9]*'
    f = re.search(float_pattern, last)
    version = f.group(0)
    major = int(version.split('.')[0])
    minor = int(version.split('.')[1])
    cmajor = int(config.gdb_base_version.split('.')[0])
    cminor = int(config.gdb_base_version.split('.')[1])

    # Check to make sure it's at least the base version required for DrDebug.
    #
    if (major < cmajor) or ((major == cmajor) and (minor < cminor)):
        if python_support:
            msg.PrintMsg('\n')
        msg.PrintMsg(
            'WARNING: This version of gdb is: %s  When using versions of gdb < %s'
            '\nthe script will run, but with reduced functionality.\n' %
            (version, config.gdb_base_version))
Exemple #21
0
    def GenTraceinfoFiles(self, options):
        """
        Generate traceinfo files from sysinfo files. Also do some simple error
        checking.

        One traceinfo file is generated for each directory which contains
        region pinballs.  Currently, the script 'create_traceinfo.sh' is used
        to generate the traceinfo file.  This will be replaced with a Python
        script at sometime in the future.

        The following info is printed to allow the user to determine if the
        required number of region pinballs and traces were generated:
        1) The script 'count_traces.sh' gets a count of the number of
           expected traces and the region numbers from the region CSV file.
        2) The number of actual traces generated and the trace names.
        3) The number of region pinballs generated and the pinball names.
        """

        # Check to make sure there is at least one region pinball directory for
        # this tracing instance.
        #
        all_pp_dirs = util.GetRegionPinballDir(options)
        if all_pp_dirs == [] and not options.debug:
            msg.PrintMsg(
                '\nERROR: Could not find any PinPoint \'*.pp\' directories.\n'
                'Make sure you have run the phase \'-p\'.')
            return -1

        # Get the region pinball directories for this tracing instance.
        #
        # import pdb;  pdb.set_trace()
        all_lit_dirs = util.GetLitDir()
        if all_lit_dirs == [] and not options.debug:
            msg.PrintMsg(
                '\nERROR: Could not find any LIT \'*.lit\' directories.\n'
                'Make sure you have run the phase \'-L\'.')
            return -1

        result = 0
        # import pdb;  pdb.set_trace()
        for lit_dir in all_lit_dirs:

            # Get the name of the corresponding region pinball directory
            # and make sure it exists.
            #
            pp_dir = util.ChangeExtension(lit_dir, '.lit', '.pp')
            if not os.path.isdir(pp_dir):
                if not options.list:
                    msg.PrintMsgPlus('WARNING: Generating traceinfo files, but the required \'pp\' ' \
                        'directory does not exist:\n   ' + pp_dir)

                    # If running in MPI_MT_MODE, then it's possible for one process to
                    # not have a thread corresponding to the the current focus thread.
                    # However, another process might have this thread.
                    # Thus, only return an error if not tracing a MPI_MT application.
                    #
                    if options.mode == config.MPI_MT_MODE:
                        msg.PrintMsg(
                            'Since tracing mode is \'mpi_mt\', this may be OK.')
                        continue
                    else:
                        return -1
                else:
                    # Not printing any msgs, just skip to the next directory
                    #
                    continue

            # Make sure the LIT directory exists, then go there.
            #
            old_dir = os.getcwd()
            if os.path.isdir(lit_dir):
                os.chdir(lit_dir)
            else:
                if not options.list:
                    msg.PrintMsgPlus('WARNING: Generating traceinfo files, but the LIT ' \
                        'directory does not exist:\n   ' + lit_dir)

                    # If running in MPI_MT_MODE, then it's possible for one process to
                    # not have a thread corresponding to the the current focus thread.
                    # However, another process might have this thread.
                    # Thus, only return an error if not tracing a MPI_MT application.
                    #
                    if options.mode == config.MPI_MT_MODE:
                        msg.PrintMsg(
                            'Since tracing mode is \'mpi_mt\', this may be OK.')
                        continue
                    else:
                        return -1
                else:
                    # Not printing any msgs, just skip to the next directory
                    #
                    continue

            # Copy the traceinfo 'blank' XML files from the SDE kit.
            #
            blank_path = self.kit_obj.GetTraceinfoBlank()
            for blank in config.traceinfo_blank_files:
                blank_file = os.path.join(blank_path, blank)
                try:
                    shutil.copy(blank_file, os.getcwd())
                except IOError:
                    msg.PrintMsg(
                        '\nERROR: Unable to copy traceinfo \'blank\' file:\n    '
                        + blank_file)
                    return -1

            # Run the script to generate traceinfo XML file.  Stdout from the
            # script is the XML file.  Function util.RunCMD() needs the output XML
            # file object in order to write this file.
            #
            msg.PrintMsg('')
            base_name = util.ChangeExtension(lit_dir, '.lit', '')
            tr_file = base_name + '.traceinfo.xml'
            try:
                fp_out = open(tr_file, 'w')
            except IOError:
                msg.PrintMsg('ERROR: Failed to open traceinfo output file:\n'
                             '   ' + tr_file)
                return -1
            cmd = self.traceinfo_bin + ' ' + base_name
            result = util.RunCmd(cmd, options, '',
                                 concurrent=False,
                                 f_stdout=fp_out)
            if result != 0:
                msg.PrintMsg('Error found while running script \'%s\'' %
                             self.traceinfo_bin)
                return -1

            # Print info from the CSV regions file (located in the *.Data
            # directory) about the number of expected traces.
            #
            msg.PrintMsg('')
            param = {'in_lit_dir': True}
            cluster_info, not_used, total_instr = util.GetClusterInfo(base_name, param)
            if cluster_info == {}:
                msg.PrintMsg(
                    'ERROR: Problems getting cluster info from file: %s.pinpoints.csv'
                    % (base_name))
                return -1
            cluster_list = util.ParseClusterInfo(cluster_info)
            if len(cluster_info) != len(cluster_list):
                msg.PrintMsg('ERROR: Did not parse enough clusters from CSV file: %s.pinpoints.csv\n' '   Num clusters:         %d\n' \
                    '   Num parsed clusters:  %d' % (base_name, len(cluster_info), len(cluster_list)))
                return -1

            # Print the number of expected traces from the regions CSV file.
            #
            base_tid = -1
            for cl in cluster_list:
                if (cl.has_key('tid')):
                    tid = cl['tid']
                else:
                    msg.PrintMsg(
                        'ERROR: Parsing cluster info for cluster:\n     %s' %
                        (cl))
                    return -1
                if base_tid == -1:
                    base_tid = tid
                else:
                    if tid != base_tid:
                        msg.PrintAndExit(
                            'ERROR: Expected TID %d, but found TID %d' %
                            (base_tid, tid))
                        return -1
            msg.PrintMsg('Expected trace count: %d\n' % (len(cluster_info)))

            # Print the number of actual traces in the LIT directory and the names. 
            #
            # import pdb;  pdb.set_trace()
            if not options.list:
                msg.PrintMsg(
                    'Actual trace count: ' + str(util.CountFiles('ami')))
                lit_files = glob.glob('*.ami*')
                lit_files.sort()
                for f in lit_files:
                    msg.PrintMsg('   ' + f)

            # Clean up tmp files in the LIT directory.
            #
            tmp_files = glob.glob('*blank*.xml')
            for f in tmp_files:
                os.remove(f)

            # Return to the working directory.
            #
            os.chdir(old_dir)

            # Go to the *.pp directory. Print the number of actual pinballs and the names.
            #
            os.chdir(pp_dir)
            if not options.list:
                msg.PrintMsg(
                    '\nPinball count: ' + str(util.CountFiles('address')))
                pp_files = glob.glob('*.address')
                pp_files.sort()
                for f in pp_files:
                    msg.PrintMsg('   ' + f)

                # Print a warning if the expected number of traces are not found.
                #
                if len(lit_files) != len(pp_files):
                    msg.PrintMsgPlus(
                        'WARNING: Number of traces does not match the number of region pinballs.')

            msg.PrintMsg('\nGenerated traceinfo file: ' + tr_file)

            # Return to the working directory.
            #
            os.chdir(old_dir)

        return result
# Get command line options and arguments
#
options, args = GetOptions()
base_name = args[0]

# Running in a LIT directory.
#
param = {'options': options, 'in_lit_dir': True}

# Get the cluster information from the regions CSV file.  Check to make sure we
# have parsed data for every cluster.
#
#import pdb;  pdb.set_trace()
cluster_info, not_used, total_instr = util.GetClusterInfo(base_name, param)
if cluster_info == {}:
    msg.PrintAndExit('Error reading file: ' + base_name)
cluster_list = util.ParseClusterInfo(cluster_info)
if len(cluster_info) != len(cluster_list):
    msg.PrintAndExit('traceinfo.py() did not parse enough clusters.\n' '   Num clusters:         %d\n' \
        '   Num parsed clusters:  %d' % (len(cluster_info), len(cluster_list)))

# First print some info which is independent of the specific traces in the current directory.
#
count = len(cluster_info)
util.RunCmd(FmtPrintCmd(blank_DTD), options, '',
            concurrent=False,
            print_time=False,
            print_cmd=False)
util.RunCmd(FmtPrintCmd(blank_head), options, '',
            concurrent=False,
            print_time=False,
Exemple #23
0
    def GetRegionMetric(self, sim_file, warmup, tid, options):
        """
        Get the metric of interest for just the representative region, not including
        any warmup instructions.

        It is assumed the first set of CMPSim output data is for the warmup
        instructions, if they exist.  This is true because when the CMPSim was run
        it should have printed out data at 'warmup_len' intervals.

        The last set of data will be for both the representative region and
        warmup instructions, if any.

        Of course, if there's only one set of data, then it is for the region only,
        because there aren't any warmup instruction.

        @param sim_file File with simulator results to process
        @param warmup Number of instructions in warmup section
        @param options TID of results to be processed
        @param options Options given on cmd line

        @return metric
        """

        # Get a file pointer to the simulator data file.
        #
        f = util.OpenCompressFile(sim_file)
        if f == None:
            return -1.0

        # This is the code which needs to be modified in order to use a
        # different metric of interest for a new CMPSim.  The existing code
        # uses the metric CPI.
        #
        # Get the first and last lines in the output that have the
        # cycle/instruction counts.  Assume the 1st is always the info for the
        # warmup because the CMPSim data is dumped ever 'warmup_length'
        # instructions.  Assume last data point is for warmup + region.  If
        # there is only one line, then assume it's only for the region.
        #
        # Current code assume the default Branch Predictor CMPSim is used. 
        #
        # Always use the data for thread 0 because we don't generate prediction
        # error for cooperative region pinballs.  Need to fix this when
        # this capability is added.
        #
        # import pdb ; pdb.set_trace()
        first = ''
        last = ''
        for line in f.readlines():
            pos = line.find('Thread: ' + str(0) + ' Instructions:')
            if pos != -1:

                # If the first time, save it.
                #
                if first == '':
                    first = line
                last = line
        # import pdb ; pdb.set_trace()
        l_list = last.split()
        l_instr = int(l_list[3])
        l_cycles = int(l_list[5])

        if warmup == 0:
            # No warmup. Calc metric from the last set of data.
            #
            if l_instr > 0:
                metric = l_cycles / float(l_instr)
            else:
                msg.PrintAndExit('(1) Unable to calculate CPI because number of instructions is 0:\n' \
                    '            ' + sim_file)
        else:
            # Get number of instructions & cycles for first set of data. (from warmup)
            #
            f_list = first.split()
            f_instr = int(f_list[3])
            f_cycles = int(f_list[5])

            # Calculate region data by subtracting the last values from the
            # first values. This gives number of cycles and instructions for
            # just the region.
            #
            # Check to make sure there really is valid data.  If not, the print a
            # warning.  No need to exit with an error, because it's possible for
            # MPI_MT_MODE applications to have a different number of threads in
            # each process.  This means some processes may have a thread 'tid',
            # while this process may not.
            #
            if l_instr - f_instr > 0:
                metric = (l_cycles - f_cycles) / float(l_instr - f_instr)
            else:
                # import pdb ; pdb.set_trace()
                msg.PrintMsgPlus('WARNING: It looks like there were no warmup instructions in region CMPSim output for thread ' + \
                   str(tid) + ' in file:\n         ' + sim_file)
                msg.PrintMsg('First icount: %s    Last icount: %s' % (locale.format('%d', f_instr, True), \
                    locale.format('%d', l_instr, True)))
                if l_instr < config.instr_cmpsim_phase:
                    msg.PrintMsg(
                        'Slice size may be too small to calculate prediction error.')
                    msg.PrintMsg(
                        'It needs to be at least 1,000,000 for CMPSim to generate valid data.')
                msg.PrintMsg('Prediction error for this process may be suspect.')
                if hasattr(options,
                           'mode') and options.mode == config.MPI_MT_MODE:
                    msg.PrintMsg(
                        'Since tracing mode is \'mpi_mt\', this may be OK.')
                metric = -1.0

        return metric
Exemple #24
0
    def ParseCommandLine(self):
        """
        Get the options from the command line and check for errors.

        @return tuple with parsed options and unparsed args
        """

        # Define and get command line options.
        #
        version = '$Revision: 1.33 $'
        version = version.replace('$Revision: ', '')
        ver = version.replace(' $', '')
        us = '%prog --bbv_file FILE --data_dir DIR FILE --simpoint_file FILE [options]'
        desc = 'Runs Simpoint and then generates the region CSV file.  ' \
               'Input to Simpoint can be just an BBV file or a combination of BBV/LDV files. \n\n' \
                'Required options: --bbv_file, --data_dir, --simpoint_file'

        util.CheckNonPrintChar(sys.argv)
        parser = optparse.OptionParser(
            usage=us,
            version=ver,
            description=desc,
            formatter=cmd_options.BlankLinesIndentedHelpFormatter())

        cmd_options.debug(parser)
        cmd_options.global_file(parser)
        cmd_options.list(parser, '')
        cmd_options.bbv_file(parser, '')
        cmd_options.data_dir(parser)
        cmd_options.simpoint_file(parser)
        cmd_options.ldv(parser, '')
        cmd_options.combine(parser, '')
        cmd_options.cutoff(parser, '')
        cmd_options.focus_thread(parser, '')
        cmd_options.maxk(parser, '')
        cmd_options.num_cores(parser, '')
        cmd_options.simpoint_options(parser, '')

        (options, args) = parser.parse_args()

        # Added method cbsp() to 'options' to check if running CBSP.
        #
        util.AddMethodcbsp(options)

        # Must have option '--ldv', even if using option '--combine', in order to
        # process BBV/LDV both.  Let user know if '--combine' used w/o '--ldv'.
        #
        if not options.ldv and options.combine != -1.0:
            msg.PrintMsgPlus('WARNING: Option \'--combine\' detected without \'--ldv\'.  Only using BBV for ' \
                'Simpoint.  \n              Must explicitly specify \'--ldv\' in order to use both BBV/LDV.\n')
        if options.ldv:
            msg.PrintMsgPlus(
                'Using both BBV/LDV files when running Simpoint\n')

        # If option combine is not set, then set it to the default value.
        # Check to make sure combine an acceptable value.
        #
        util.SetCombineDefault(options)
        util.CheckCombine(options)

        # Read in an optional configuration files and set global variables.
        #
        config_obj = config.ConfigClass()
        config_obj.GetCfgGlobals(options,
                                 False)  # Don't need to require 4 variables

        # Error check input to make sure all required options are on the command line.
        #
        if options.bbv_file == '':
            msg.PrintAndExit(
                'Basic block vector file must be defined with option: --bbv_file FILE'
            )
        if options.data_dir == '':
            msg.PrintAndExit(
                'Simpoint data directory must be defined with option: --data_dir DIR'
            )
        if options.simpoint_file == '':
            msg.PrintAndExit(
                'Simpoint output must be defined with option: --simpoint_file FILE'
            )

        # The data_dir should exist and contain the BBV file.
        #
        if not os.path.isdir(options.data_dir):
            msg.PrintAndExit('Data directory does not exist: ' +
                             options.data_dir)
        if not os.path.isfile(os.path.join(options.data_dir,
                                           options.bbv_file)):
            msg.PrintAndExit('Basic block vector file does not exist: ' +
                             options.bbv_file)

        # Do some 'special' things on native Windows.
        #
        util.WindowsNativeCheck(options)

        return (options, args)
Exemple #25
0
    def Replay(self, param, dirname, filename):
        """
        Replay a single pinball given the command line options and the name of
        the pinball to replay. It formats the appropriate command line options,
        saves global variables in a pickle file & calls the replayer script.

        @param param    Dictionary containing all parameters that need to be
                        passed into the method.  Need a dictionary because this
                        method is sometimes called by walk_callback() which
                        only allows one parameter in the functions it calls.
        @param dirname  Directory where pinball is located
        @param filename Pinball base file name

        @return Exit code from the replayer script.
        """

        if 'options' in param:
            options = param['options']
        else:
            msg.PrintAndExit(
                'method replay_dir.Replay() failed to get param \'options\'')
        if options.verbose:
            msg.PrintMsg('Start of Replay() in replay_dir.py')
        # import pdb ; pdb.set_trace()
        basename_file = os.path.join(dirname, filename)
        command = self.replayer_cmd + ' --replay_file ' + basename_file

        if options.verbose:
            msg.PrintMsg("-> Replaying pinball \"" + basename_file + "\"")
        if options.replay_options:
            command += ' --replay_options "' + options.replay_options + '"'

        # Check to see if need to add options for BB vector generation.  Set
        # 'log_opt' to any options the user may have put on the command line.
        #
        log_opt = options.log_options
        if options.bb_add_filename:
            file_name = os.path.basename(basename_file)

            # If there is a focus thread, then need to remove the TID from the
            # file name.
            #
            # NOTE: This code may need to be fixed when a method of running
            # Simpoints on all threads of cooperative pinballs is implemented.
            #
            file_name = util.RemoveTID(file_name)

            # Write BB vector files to the newly created *.Data directory.
            #
            data_dir = file_name + '.Data'
            if not os.path.isdir(data_dir):
                os.mkdir(data_dir)
            log_opt += ' -o ' + os.path.join(data_dir, file_name)

        # Check to see if need to add options when running a simulator.
        #
        # import pdb ; pdb.set_trace()
        if options.sim_add_filename:

            # Need to instantiate a kit of the type simulator being used.
            # This is required on order to get some kit specific information.
            #
            # NOTE: If you are adding a kit for a new simulator, then you need
            # to modify this code.
            #
            if config.sim_kit_type == config.BRPRED:
                import brpred_kit
                sim_kit = brpred_kit.BrPredKit()
            elif config.sim_kit_type == config.SINUCA:
                import sinuca_kit
                sim_kit = sinuca_kit.Sinuca_TracerKit()
            elif config.sim_kit_type == config.CMPSIM:
                import sde_cmpsim_kit
                sim_kit = sde_cmpsim_kit.CMPsimKit()
            else:
                msg.PrintAndExit('Undefined kit type in method replay_dir.Replay(): ' + \
                    str(config.sim_kit_type))

            # Add the simulator knob to specify the file for the output from
            # the simulator.
            #
            # ~ log_opt += ' ' + sim_kit.GetSimOutputFile(basename_file)

        # When 'log_opt' is added to the command line below, it will
        # put double quotes (") around all the options. Therefore, need to
        # remove any exising double quotes in the current value for the
        # string 'log_opt'.
        #
        log_opt = log_opt.replace('"', '')

        # If relogging WP pinballs, need to add the -log:basename knob with
        # the relogged pinball path/name. 
        #
        # import pdb ; pdb.set_trace()
        if options.wp_relog_dir:

            ft = util.GetFocusThreadPB(basename_file)
            if ft > -1 and not options.relog_focus:

                # If WP pinballs were relogged with a focus thread, then the
                # resulting pinballs were 'per thread', not 'cooperative'.  If
                # relogging with a different filter (i.e. options.relog_focus ==
                # False) then need to remove TID from base file name given to
                # the knob -log:basename.  
                #
                file_name = os.path.basename(util.RemoveTID(basename_file))
            else:
                file_name = os.path.basename(basename_file)

            log_opt += ' -log:basename ' + os.path.join(options.wp_relog_dir,
                                                        file_name)
            if not options.list:
                msg.PrintMsgDate('Relog whole program pinball: ' + file_name)

        if log_opt:
            command += ' --log_options "' + log_opt + '"'
        if options.playout:
            command += ' --playout '

        # if not options.list:
        #     msg.PrintMsg(command)

        # If not just listing the command, then dump the global
        # variables so the next Python script can have access to them.
        # Then run the script.
        #
        result = 0
        if not options.list:

            # Dump the global data to a unique file name.  Need to add the
            # option --global_file with this unique file name to options when
            # calling a script.
            #
            gv = config.GlobalVar()
            command += util.AddGlobalFile(gv.DumpGlobalVars(), options)
            command += util.AddCfgFile(options)

            result = util.RunCmd(command, options, filename,
                                 concurrent=True)  # Run concurrent jobs here

        else:
            # If the option 'list' is defined, then just list out the
            # commands to be exectuted, but don't execute them.
            #
            msg.PrintMsg(command)

        return result
# $Id: correct_size_matrix.py,v 1.1 2014/05/27 22:28:26 tmstall Exp tmstall $

import datetime
import glob
import math
import optparse
import os
import random
import re
import sys

import cmd_options
import msg
import util

err_msg = lambda string: msg.PrintAndExit('This is not a valid ' + string + \
            '\nUse -h for help.')

def OpenFile(fl, type_str):
    """
    Check to make sure a file exists and open it.

    @return file pointer
    """

    # import pdb;  pdb.set_trace()
    if not os.path.isfile(fl):
        msg.PrintAndExit('File does not exist: %s' % fl)
    fp = util.OpenCompressFile(fl)
    if fp == None:
        err_msg(type_str + fl)
# For each LIT directory, create a link to each file which is formatted properly
# to be submitted to the GTR.
#
dirs = util.GetLitDir()
dirs.sort()
for lit_dir in dirs:

    # Make sure the LIT directory exists, then go there.
    #
    # import pdb;  pdb.set_trace()
    old_dir = os.getcwd()
    if os.path.isdir(lit_dir):
        os.chdir(lit_dir)
        msg.PrintMsg('Dir: ' + os.path.basename(os.getcwd()))
    else:
        msg.PrintAndExit('ERROR: The LIT directory does not exist:\n   ' + \
                         lit_dir)

    # Get a list of the simulator files for each trace. Don't include any of the
    # PinPlay files.
    #
    files = ['*lit*']
    files += ['*arch*']
    files += ['*ami*']
    files += ['*cmd*']
    files += ['*cpuid*']
    files += ['*procinfo*']
    files += ['*tzcat*']
    lit_files = []
    for f in files:
        lit_files += glob.glob(f)
def GetOptions():
    """
    Get users command line options/args and check to make sure they are correct.

    @return List of options and 3 file pointers for bbv, simpoint and weights files
    """

    version = '$Revision: 1.1 $';      version = version.replace('$Revision: ', '')
    ver = version.replace(' $', '')
    us = '%prog [options] action file_name [file_name]'
    desc = 'Implements several different actions to process FV (Frequency Vector) files.  ' \
           'An action must be defined in order for the script to run.  '\
           'All actions require at least one file name be given using an option. '\
           '                                                            '\
           '--------------------------------------------'\
           '                                                            '\
           'There are two types of frequency vector files:  '\
           '                                                            '\
           'BBV = Basic Block Vector, '\
           'LDV = LRU stack Distance Vector'


    parser = optparse.OptionParser(usage=us, version=ver, description=desc)

    cmd_options.focus_thread(parser, '')

    # Options which define the actions the script to execute
    #
    action_group = cmd_options.ActionGroup(parser)

    cmd_options.combine(parser, action_group)
    cmd_options.csv_region(parser, action_group)
    cmd_options.project_bbv(parser, action_group)
    cmd_options.weight_ldv(parser, action_group)

    parser.add_option_group(action_group )

    # Options which list the files the script can process
    #
    # import pdb;  pdb.set_trace()
    file_group = cmd_options.FileGroup(parser)

    cmd_options.bbv_file(parser, file_group)
    cmd_options.ldv_file(parser, file_group)
    cmd_options.normal_bbv(parser, file_group)
    cmd_options.normal_ldv(parser, file_group)
    cmd_options.region_file(parser, file_group)
    # cmd_options.vector_file(parser, file_group)
    cmd_options.weight_file(parser, file_group)

    parser.add_option_group(file_group)

    # Parse command line options and get any arguments.
    #
    (options, args) = parser.parse_args()

    matrix = ReadVectorFile(args[0])
    PrintVectorFile(matrix)
    sys.exit(0)

    def TrueXor(*args):
        """Return xor of some booleans."""
        return sum(args) == 1

    # Must have one, and only one, action on command line.
    #
    # import pdb;  pdb.set_trace()
    if not TrueXor(options.csv_region, options.project_bbv, options.weight_ldv, \
       options.combine != None):
           msg.PrintAndExit('Must give one, and only one, action for script to execute.\n'
           'Use -h to get help.')

    # Check to see if options required for the various actions are given.
    #
    file_error = lambda file, action: msg.PrintAndExit("Must use option '" + file + \
        "' to define the file to use with '"  + action + "'.   \nUse -h for help.")

    # import pdb;  pdb.set_trace()
    fp_bbv = fp_ldv = fp_simp = fp_weight = None
    if options.combine:
        # First check to make sure the scaling factor is a valid FP number between 0.0 and 1.0
        #
        if not util.IsFloat(options.combine):
           msg.PrintAndExit('Illegal value for scaling factor: ' + str(options.combine) + \
           '\nScaling factor must be a float between 0.0 and 1.0.')
        else:
            value = float(options.combine)
            if value < 0.0 or value > 1.0:
               msg.PrintAndExit('Scaling factor given (%f) must be between 0.0 and 1.0' % value)
            options.combine = value

        # Then check to make sure required files are given.
        #
        if not options.normal_bbv:
           file_error('--normal_bbv', '--combine')
        if not options.normal_ldv:
           file_error('--normal_ldv', '--combine')
        fp_bbv = OpenNormalFVFile(options.normal_bbv, 'projected, normalized BBV file: ')
        fp_ldv = OpenNormalFVFile(options.normal_ldv, 'projected, normalized BBV file: ')

    if options.csv_region:
        if not options.bbv_file:
           file_error('--bbv_file', '--csv_region')
        if not options.region_file:
           file_error('--region_file', '--csv_region')
        if not options.weight_file:
           file_error('--weight_file', '--csv_region')
        fp_bbv = OpenFVFile(options.bbv_file, 'Basic Block Vector (bbv) file: ')
        fp_simp = OpenSimpointFile(options.region_file, 'simpoints file: ')
        fp_weight = OpenWeightsFile(options.weight_file, 'weights file: ')

    if options.project_bbv:
        if not options.bbv_file:
           file_error('--bbv_file', '--project_bbv')
        fp_bbv = OpenFVFile(options.bbv_file, 'Basic Block Vector (bbv) file: ')

    if options.weight_ldv:
        if not options.ldv_file:
           file_error('--ldv_file', '--weight_ldv')
        fp_ldv = util.OpenCompressFile(options.ldv_file)

    return (options, fp_bbv, fp_ldv, fp_simp, fp_weight)
Exemple #29
0
def FinalizeGDB(kit_script_path, options):
    """
    Execute the final section of the program.  If the user has not explicitly
    given the port to use for communications between Pin and GDB, look in the
    GDB command output file for the string 'target remote'.  Timeout if not
    found in a reasonable amount of time.  Then add some more commands to the
    GDB command file and run GDB.

    @param kit_script_path Explicit path to location in kit where scripts are located
    @param options Options given on cmd line

    @return exit code from running GDB
    """

    # Format the string 'target_str' to use the appropriate port
    #
    if options.debug_port:
        # Use a specific port if the user defines one
        #
        # import pdb;  pdb.set_trace()
        target_str = 'target remote :%s' % options.debug_port
        gdb_file = open(config.gdb_cmd_file, 'w')
    else:
        # Use the port chose by Pin.  Pin will write it out to the
        # GDB command file.   Exit with an error if it's not there
        # within 30 seconds.
        #
        target_str = 'target remote'
        timeout = 30
        count = 0
        found = False
        while count < timeout:
            with open(config.gdb_cmd_file, 'r') as gdb_file:
                if target_str in gdb_file.read():
                    found = True
                    break
            time.sleep(1)
            if hasattr(options, 'verbose') and options.verbose:
                msg.PrintMsg('Waiting for "target remote"')
            count += 1
        found = True
        if not found:
            msg.PrintAndExit('Unable to find GDB string \'%s\' in file %s' %
                             (target_str, config.gdb_cmd_file))
        time.sleep(1)
        if hasattr(options, 'verbose') and options.verbose:
            with open(config.gdb_cmd_file, 'r') as gdb_file:
                msg.PrintMsg('Target cmd:  ' + gdb_file.read())

        # Get the port selected by Pin
        #
        # import pdb;  pdb.set_trace()
        with open(config.gdb_cmd_file, 'r') as gdb_file:
            target_str = gdb_file.read()
        if hasattr(options, 'verbose') and options.verbose:
            msg.PrintMsg('Target cmd:  ' + target_str)
        gdb_file = open(config.gdb_cmd_file, 'w')

    # Write some control info and the command to load Pin Python file to the
    # GDB command file. Set PYTHONPATH to the location of the scripts.
    #
    # import pdb;  pdb.set_trace()
    time.sleep(1)
    gdb_file.write('set remotetimeout 30000\n')
    pin_python = os.path.join(kit_script_path, 'pin.py')
    gdb_file.write('source %s\n' % (pin_python))
    gdb_file.write('%s\n' % (target_str))
    gdb_file.close()
    time.sleep(1)
    os.environ["PYTHONPATH"] = kit_script_path
    if options.verbose:
        print "\nGDB cmd file:"
        os.system('cat ' + config.gdb_cmd_file)
        print

    # Format command and run GDB
    #
    cmd = gdb_path
    cmd += ' --command=%s' % config.gdb_cmd_file
    if hasattr(options, "gdb_options") and options.gdb_options:
        cmd += ' %s' % options.gdb_options
    cmd += ' %s' % options.command
    if hasattr(options, 'verbose') and options.verbose and \
       not options.debug:
        msg.PrintMsg(cmd)
    result = util.RunCmd(cmd,
                         options,
                         '',
                         concurrent=False,
                         print_time=False,
                         print_cmd=False)
    return result
Exemple #30
0
    def RunSimPoint(self, kit_path, script_path, options, bin_options):
        """
        Run Simpoint in the CBSP Data directory and generate weight files for
        each binary in the respective binary Data directory.

        @param kit_path Path to kit
        @param script_path Explicit path to location in kit where scripts are located
        @param options Options given on cmd line
        @param bin_options List of options for each CBSP binary

        @return exit code from Simpoint
        """

        # Get CBSP Data directory and WP pinball basenames.  For multi-process
        # need to expand list returned by util.GetWPPinballs() to include all
        # pinballs in each WP dir.
        #
        result = 0
        cbsp_data_dir = util.GetCBSPDataDir(options)
        wp_pinballs = [util.GetWPPinballs(bopt)[0] for bopt in bin_options]
        wp_basenames = [
            os.path.join(re.sub('whole_program.*/', '', pb))
            for pb in wp_pinballs
        ]

        # Go to CBSP Data directory to run Simpoint
        #
        orig_dir = os.getcwd()
        if os.path.isdir(cbsp_data_dir):
            os.chdir(cbsp_data_dir)
        else:
            msg.PrintMsg('ERROR: Unable to change to CBSP Data directory: ' +
                         cbsp_data_dir)
            return -1

        # Format the command to run simpoints.
        # Use 'bin_options[0]' because parameter should be the same for all
        # binaries.
        #
        sim_out_file = 'run_simpoint_out.txt'
        if not options.list:
            msg.PrintMsgDate('Running Simpoints for: %s' % options.cbsp_name)
            msg.PrintMsgPlus('Simpoint output file (including errors): %s\n' %
                             os.path.join(cbsp_data_dir, sim_out_file))
        if bin_options[0].simpoint_options:
            msg.PrintMsgPlus(
                'NOTE: Default options for Simpoint not used, only user defined options.'
            )
            cmd = 'simpoint ' + bin_options[0].simpoint_options
        else:
            if bin_options[0].maxk:
                cmd = 'simpoint -k 2:%d -dim 100 -numInitSeeds 25 -fixedLength off -iters 500' % bin_options[
                    0].maxk
            else:
                cmd = 'simpoint -k 2:25 -dim 100 -numInitSeeds 25 -fixedLength off -iters 500'
            cmd += ' -saveLabels labels.txt -saveSimpoints simpoints.txt -inputVectorsGzipped -loadFVFile matching-vec-profile.gz'
        if options.list or options.debug:
            msg.PrintMsg(cmd)
        else:
            fp_out = open(sim_out_file, 'w')
            result = util.RunCmd(cmd,
                                 options,
                                 '',
                                 f_stdout=fp_out,
                                 f_stderr=fp_out)
            fp_out.close()
            if result != 0:
                msg.PrintMsg('\nError found while running Simpoint in dir:\n'
                             '   %s' % os.getcwd())
                return result

        # Generate the binary Data directories
        #
        bin_data_dir = []
        for basename in wp_basenames:
            name = os.path.join('..', '%s.Data' % basename)
            bin_data_dir.append(name)
            if not os.path.isdir(name):
                try:
                    os.mkdir(name)
                except OSError:
                    msg.PrintAndExit(
                        'method RunSimPoint(), Unable to make directory: ' +
                        name)

        # Run command to generate weight files for the binaries
        #
        weight_out_file = 'generate_weights_out.txt'
        cmd = os.path.join('make_simpoint_weights.py --weight_file_list ')
        for data_dir in bin_data_dir:
            cmd += ' %s/weights.txt' % data_dir
        if not options.list:
            msg.PrintMsgPlus(
                'make_simpoint_weights.py output file (including errors): %s\n'
                % os.path.join(cbsp_data_dir, weight_out_file))
        if options.list or options.debug:
            msg.PrintMsg(cmd)
        else:
            fp_out = open(weight_out_file, 'w')
            result = util.RunCmd(cmd,
                                 options,
                                 '',
                                 f_stdout=fp_out,
                                 f_stderr=fp_out)
            fp_out.close()
            if result != 0:
                msg.PrintMsg(
                    '\nError found while running make_simpoint_weights.py in dir:\n'
                    '   %s' % os.getcwd())
                return result

        # Copy the simpoints and labels files to binary Data directories
        #
        #
        def copy_file(f, d):

            try:
                shutil.copy(f, d)
                return 0
            except IOError:
                msg.PrintMsg('\nError found in dir:\n'
                             '    %s\nUnable to copy file:\n    %s to %s' %
                             (os.getcwd(), f, d))
                return -1

        for data_dir in bin_data_dir:
            result = copy_file('simpoints.txt', data_dir)
            result = result | copy_file('labels.txt', data_dir)
            if result != 0:
                return result

        # Generate the CSV files in each binary Data directory
        #
        for data_dir, basename in zip(bin_data_dir, wp_basenames):
            # Go to the binary Data directory
            #
            old_dir = os.getcwd()
            os.chdir(data_dir)

            # Run the script to generate CSV files for this binary
            #
            bb_prof = os.path.join('..', cbsp_data_dir,
                                   '%s.bb-profile.bz2' % basename)
            csv_file = '%s.pinpoints.csv' % basename
            if not options.list:
                data_dir_rel_path = os.getcwd().replace(
                    os.path.join(orig_dir, ''), '')
                msg.PrintMsgPlus(
                    'Any errors from running \'regions.py\' are in: %s' %
                    os.path.join(data_dir_rel_path, csv_file))
            cmd = os.path.join('regions.py --csv_region --bbv_file %s' %
                               bb_prof)
            cmd += ' --region_file=simpoints.txt --weight_file=weights.txt'
            cmd += ' > %s 2>&1' % csv_file
            msg.PrintMsg('')
            if options.list or options.debug:
                msg.PrintMsg(cmd)
            else:
                result = util.RunCmd(cmd, options, '')
                if result != 0:
                    msg.PrintMsg(
                        '\nError found while generating CSV files in:\n   %s' %
                        os.getcwd())
                    msg.PrintMsg('Error msgs in file: %s ' % csv_file)
                    return result

            # Return to the CBSP Data directory
            #
            os.chdir(old_dir)

        # Return to original directory
        #
        os.chdir(orig_dir)
        if not options.list:
            msg.PrintMsgDate('Finished running Simpoint for: ' +
                             options.cbsp_name)

        return result