Ejemplo n.º 1
0
def GetBlockIDs(fp):
    """
    Get the information about each basic block which is stored at the end
    of BBV frequency files.

    Extract the values for fields 'block id' and 'static instructions' from
    each block.  Here's an example block id entry:

      Block id: 2233 0x69297ff1:0x69297ff5 static instructions: 2 block count: 1 block size: 5

    static instructions is number of instructions in the basic block
    block count is the number of times the block is executed

    @return list of the basic block info, elements are (block_id, icount of block)
    """

    block_id = {}
    line = ensure_string(fp.readline())
    while not line.startswith('Block id:') and line:
        line = ensure_string(fp.readline())
    if line:
        while line.startswith('Block id:'):
            bb = int(line.split('Block id:')[1].split()[0])
            count = int(line.split('static instructions:')[1].split()[0])
            block_id[bb] = count
            line = ensure_string(fp.readline())

    # import pdb;  pdb.set_trace()
    return block_id
Ejemplo n.º 2
0
def ProcessBlockIdSummary(fp):
    """
    Process records starting with 'Block id:'
     e.g. 'Block id: 2348 0x2aaac42a4306:0x2aaac42a430d'
     e.g. 'Block id: <bbid> <firstPC>:<lastPC>'
        The same 'firstPC' may start multiple 'bbid's.
    @return a dictinary mapping 'firstPC' to a 'list of (bbid, sticount) pairs'
       it starts
    """

    pcbbid_dict = {}
    line = ensure_string(fp.readline())
    while not line.startswith('Block id:') and line:
        line = ensure_string(fp.readline())

    while line.startswith('Block id:') and line:
        tokens = line.split(' ')
        bbid = int(tokens[2])
        pcrange = tokens[3]
        sticount = int(tokens[6])
        pc = pcrange.split(":")[0]
        if pc in list(pcbbid_dict.keys()):
            pcbbid_dict[pc].append((bbid, sticount))
        else:
            pcbbid_dict[pc] = []
            pcbbid_dict[pc].append((bbid, sticount))
        line = ensure_string(fp.readline())

    # import pdb;  pdb.set_trace()
    return pcbbid_dict
Ejemplo n.º 3
0
def OpenNormalFVFile(fv_file, type_str):
    """
    Open a normalized frequency vector file and check to make sure it's valid.

    The first line of a valid normalized FV file contains the number of vectors
    in the file followed by the char ':' and an optional char 'w'.

    Subsequent lines contain an optional weight (if 'w' is present on first line) followed
    by the number of fields in the vector, the char ':' and the values for the vector:  For example:

    80:w
    0.01250000000000000069 3:  0.00528070484084160793 0.00575272877173275011 0.00262986399034366479
    0.01250000000000000069 2:  0.00528070484084160793 0.00575272877173275011

    @return file pointer
    """

    # Read in the 1st line of the file and check for errors.
    #
    # import pdb;  pdb.set_trace()
    type_str = 'normalized frequency vector file: '
    fp = OpenFile(fv_file, type_str)
    line = ensure_string(fp.readline())
    field = line.split(':')
    num_vect = field[0]
    if not util.IsInt(num_vect):
        err_msg(type_str + fv_file)
    if len(field) == 2:
        if not 'w' in field[1]:
            err_msg(type_str + fv_file)
        else:
            weights = True

    # Read the 2nd line: an optional weight, the number of values in the vector and the vector itself.
    #
    line = ensure_string(fp.readline())
    if line == '':
        err_msg(type_str + fv_file)
    field = line.split()
    if weights:
        if not util.IsFloat(field[0]):
            err_msg(type_str + fv_file)
        field = field[1:]
    if len(field) < 2:
        err_msg(type_str + fv_file)
    num_field = int(field[0].split(':')[0])
    if not util.IsInt(num_field):
        err_msg(type_str + fv_file)
    field = field[1:]
    if len(field) != num_field:
        err_msg(type_str + fv_file)
    for f in field:
        if not util.IsFloat(f):
            err_msg(type_str + fv_file)
    fp.seek(0, 0)

    return fp
Ejemplo n.º 4
0
    def RunCmd(cmd):
        """
        Run a command and return stdout/stderrr.
        """

        p = subprocess.Popen(cmd,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        stdout, stderr = p.communicate()
        stdout = ensure_string(stdout)
        stderr = ensure_string(stderr)

        return stdout, stderr
Ejemplo n.º 5
0
def GetFirstPcinfo(fp):
    """
    Get information about the first block executed

    @return {'pc':firstpc,'count':1}
    """

    marker = {'pc': 0, 'count': 0}
    line = ensure_string(fp.readline())
    while not (line.startswith('S:') or line.startswith('M:')) and line:
        line = ensure_string(fp.readline())
    if line:
        mr = line.split()
        firstpc = mr[1]
        if line.startswith('S:'):
            try:
                imagepathlist = mr[3].split('/')
                imagebasename = imagepathlist[len(imagepathlist) - 1]
                sourceinfolist = mr[8].split('/')
                sourceinfo = sourceinfolist[len(sourceinfolist) - 1]
                marker = {
                    'pc': firstpc,
                    'count': 1,
                    'imagename': imagebasename,
                    'offset': mr[6],
                    'sourceinfo': sourceinfo
                }
            except IndexError:
                marker = {
                    'pc': firstpc,
                    'count': 1,
                    'imagename': "no_image",
                    'offset': '0x0',
                    'sourceinfo': "Unknown:0"
                }
        else:
            marker = {
                'pc': firstpc,
                'count': 1,
                'imagename': "no_image",
                'offset': '0x0',
                'sourceinfo': "Unknown:0"
            }

    #import pdb;  pdb.set_trace()
    return marker
Ejemplo n.º 6
0
def GetSlice(fp):
    """
    Get the frequency vector for one slice (i.e. line in the FV file).

    All the frequency vector data for a slice is contained in one line.  It
    starts with the char 'T'.  After the 'T', there should be a sequence
    containing one, or more, of the following sets of tokens:
       ':'  integer  ':' integer
    where the first integer is the dimension index and the second integer is
    the count for that dimension. Ignore any whitespace.

    @return list of the frequency vectors for the slice; element = (dimension, count)
    """

    fv = []
    line = ensure_string(fp.readline())
    while not line.startswith('T') and line:
        # print 'Skipping line: ' + line

        # Don't want to skip the part of BBV files at the end which give
        # information on the basic blocks in the file.  If 'Block id:' is
        # found, then back up the file pointer to before this string.
        #
        if line.startswith('Block id:'):
            fp.seek(0 - len(line), os.SEEK_CUR)
            return []
        line = ensure_string(fp.readline())
    if line == '': return []

    # If vector only contains the char 'T', then assume it's a slice which
    # contains no data.
    #
    if line == 'T\n':
        fv.append((0, 0))
    else:
        blocks = re.findall(':\s*(\d+)\s*:\s*(\d+)\s*', line)
        # print 'Slice:'
        for block in blocks:
            # print block
            bb = int(block[0])
            count = int(block[1])
            fv.append((bb, count))

    # import pdb;  pdb.set_trace()
    return fv
Ejemplo n.º 7
0
def OpenFVFile(fv_file, type_str):
    """
    Open a frequency vector file and check to make sure it's valid.  A valid
    FV file must contain at least one line which starts with the string 'T:'.

    @return file pointer
    """

    # import pdb;  pdb.set_trace()
    fp = OpenFile(fv_file, type_str)
    line = ensure_string(fp.readline())
    while not line.startswith('T:') and line:
        line = ensure_string(fp.readline())
    if not line.startswith('T:'):
        err_msg(type_str + fv_file)
    fp.seek(0, 0)

    return fp
Ejemplo n.º 8
0
def ProcessLabelFile(fp_lbl):
    """
    Process records in a t.labels file
    cluster distance_from_centroid
    slice number is implicit here : sliceNumber = (lineNumber-1)
    @return an array mapping 'sliceN' to the cluster it belongs to
    """

    sliceCluster = []
    sliceNum = 0
    line = ensure_string(fp_lbl.readline())
    while line:
        tokens = line.split(' ')
        clusterid = int(tokens[0])
        sliceCluster.append(clusterid)
        line = ensure_string(fp_lbl.readline())
        sliceNum = sliceNum + 1

    # import pdb;  pdb.set_trace()
    return sliceCluster
Ejemplo n.º 9
0
def GetWeights(fp):
    """
    Get the regions and weights from a weights file.

    @return lists of regions and weights
    """

    # This defines the pattern used to parse one line of the weights file.
    # There are three components to each line:
    #
    #   1) a floating point number  (group number 1 in the match object)
    #   2) white space
    #   3) a decimal number         (group number 2 in the match object)
    #
    # 1) This matches floating point numbers in either fixed point or scientific notation:
    #   -?        optionally matches a negative sign (zero or one negative signs)
    #   \ *       matches any number of spaces (to allow for formatting variations like - 2.3 or -2.3)
    #   [0-9]+    matches one or more digits
    #   \.?       optionally matches a period (zero or one periods)
    #   [0-9]*    matches any number of digits, including zero
    #   (?: ... ) groups an expression, but without forming a "capturing group" (look it up)
    #   [Ee]      matches either "e" or "E"
    #   \ *       matches any number of spaces (to allow for formats like 2.3E5 or 2.3E 5)
    #   -?        optionally matches a negative sign
    #   \ *       matches any number of spaces
    #   [0-9]+    matches one or more digits
    #   ?         makes the entire non-capturing group optional (to allow for
    #             the presence or absence of the exponent - 3000 or 3E3
    #
    # 2) This matches white space:
    #   \s
    #
    # 3) This matches a decimal number with one, or more digits:
    #   \d+
    #
    pattern = '(-?\ *[0-9]+\.?[0-9]*(?:[Ee]\ *-?\ *[0-9]+)?)\s(\d+)'

    weight_dict = {}
    for line in ensure_string(fp.readlines()):
        field = re.match(pattern, line)

        # Look for the special case where the first field is a single digit
        # without the decimal char '.'.  This should be the weight of '1'.
        #
        if not field:
            field = re.match('(\d)\s(\d)', line)
        if field:
            weight = float(field.group(1))
            region = int(field.group(2))
            weight_dict[region] = weight

    return weight_dict
Ejemplo n.º 10
0
def OpenSimpointFile(sp_file, type_str):
    """
    Open a simpoint file and check to make sure it's valid.  A valid
    simpoint file must start with an integer.

    @return file pointer
    """

    fp = OpenFile(sp_file, type_str)
    line = ensure_string(fp.readline())
    field = line.split()
    if not util.IsInt(field[0]):
        err_msg(type_str + sp_file)
    fp.seek(0, 0)

    return fp
Ejemplo n.º 11
0
def OpenLabelFile(lbl_file, type_str):
    """
    Open a labels file and check to make sure it's valid.  A valid
    labels file must start with an integer.

    @return file pointer
    """

    fp = OpenFile(lbl_file, type_str)
    line = ensure_string(fp.readline())
    field = line.split()
    if not IsInt(field[0]):
        PrintAndExit("Invalid " + type_str + lbl_file)
    fp.seek(0, 0)

    return fp
Ejemplo n.º 12
0
def GetWeights(fp):
    """
    Get the regions and weights from a weights file.

    @return lists of regions and weights
    """

    weight_dict = {}
    for line in ensure_string(fp.readlines()):
        field = re.match('(0\.\d+).*(\d+)', line)
        if field:
            weight = float(field.group(1))
            region = int(field.group(2))
            weight_dict[region] = weight

    return weight_dict
Ejemplo n.º 13
0
def OpenWeightsFile(wt_file, type_str):
    """
    Open a weight file and check to make sure it's valid.  A valid
    wieght file must start with an floating point number.

    @return file pointer
    """

    fp = OpenFile(wt_file, type_str)
    line = ensure_string(fp.readline())
    field = line.split()
    if not util.IsFloat(field[0]):
        err_msg(weight_str + wt_file)
    fp.seek(0, 0)

    return fp
Ejemplo n.º 14
0
def GetWarmuppoints(fp, wfactor):
    """
    Get the regions and slices from the Simpoint file.

    @return list of regions and slices from a Simpoint file
    """

    WarmupRegionToSlice = {}
    for line in ensure_string(fp.readlines()):
        field = re.match('(\d+)\s(\d+)', line)
        if field:
            slice_num = int(field.group(1))
            region = int(field.group(2))
            WarmupRegionToSlice[region] = slice_num - int(wfactor)

    return WarmupRegionToSlice
Ejemplo n.º 15
0
def GetSimpoints(fp):
    """
    Get the regions and slices from the Simpoint file.

    @return list of regions and slices from a Simpoint file
    """

    simp_dict = {}
    for line in ensure_string(fp.readlines()):
        field = re.match('(\d+)\s(\d+)', line)
        if field:
            slice_num = int(field.group(1))
            region = int(field.group(2))
            simp_dict[region] = slice_num

    return simp_dict
Ejemplo n.º 16
0
def GetSimpoints(fp):
    """
    Get the regions and slices from the Simpoint file.

    @return list of regions and slices from a Simpoint file
    """

    RegionToSlice = {}
    max_region_number = 0
    for line in ensure_string(fp.readlines()):
        field = re.match('(\d+)\s(\d+)', line)
        if field:
            slice_num = int(field.group(1))
            region = int(field.group(2))
            if region > max_region_number:
                max_region_number = region
            RegionToSlice[region] = slice_num

    return RegionToSlice, max_region_number
Ejemplo n.º 17
0
    def GetLastMetric(self, sim_file, tid, options):
        """
        Get the last metric in a CMPSim output file.  This is the value
        for running the entire pinball.

        Seek until we are close to the end of the file before start looking
        for data. This saves lots of time when processing very large files.

        @param sim_file File with simulator results to process
        @param options TID of results to be processed
        @param options Options given on cmd line

        @return metric (-1 if an error occurs)
        """

        import struct

        # Get the size of the uncompressed simulator data file from the last 4 bytes
        # of the compressed file.  This value is the file size modulo 2^32.
        #
        # import pdb ; pdb.set_trace()
        try:
            fo = open(sim_file, 'rb')
        except IOError:
            msg.PrintMsg('ERROR: Unable to open CMPSim file for whole program pinball:\n   ' + \
                sim_file)
            return -1.0
        try:
            fo.seek(-4, 2)
        except:
            msg.PrintMsg('ERROR: There was a problem accessing data for the WP CMPSim file:\n   ' + \
                sim_file)
            return -1.0
        r = fo.read()
        fo.close()
        size = struct.unpack('<I', r)[0]

        # Get a file pointer to the simulator file.
        #
        f = util.OpenCompressFile(sim_file)
        if f == None:
            return -1.0

        four_GB = 4294967296
        seek_past = 100
        num_chunk = 0

        # First seek to the point in the file given by the 'size'.
        #
        msg.PrintMsgPlus('Determining size of file: ' + sim_file)
        f.seek(size, 1)
        current = f.tell()

        # For files > 4GB, the value for 'size' is the true file size modulo
        # 2^32.  If this is the case, seek in 4GB chunks until the true file
        # size is found.
        #
        # import pdb ; pdb.set_trace()
        while current - (num_chunk * four_GB) >= size:

            # First see if we can seek a few bytes past the current file
            # pointer location.  If we don't advance the FP, then it's at the
            # end of the file. Otherwise, there is a 4GB chunk of the file to
            # be bypassed.
            #
            # import pdb ; pdb.set_trace()
            last = current
            f.seek(seek_past, 1)
            current = f.tell()
            if current == last:
                break
            else:
                msg.PrintMsg('Skipping 4GB in CMPSim file')
                f.seek(four_GB - seek_past, 1)
                num_chunk += 1
                current = f.tell()

            # Check to see if the last seek reached 'size' modulo 2^32
            # bytes. If so, then we are at the end of the file.
            #
            if current - (num_chunk * four_GB) < size:
                break

        # import pdb ; pdb.set_trace()
        size = num_chunk * four_GB + size

        # Skip to 100k bytes before the end of the file. Then start looking for the last set of
        # data in the file. This saves a large amount of time, especially for huge files.
        #
        msg.PrintMsgPlus('Skipping ' + locale.format('%d', size, True) +
                         ' bytes in file: ' + sim_file)
        f.seek(0)
        f.seek(size - 100000)

        # This is the code which needs to be modified in order to use a
        # different metric of interest for a new CMPSim.  The existing code
        # uses the metric CPI.
        #
        # Current code assume the is used. Get the
        # number of instructions and cycles for this thread in the last line of
        # the output.
        #
        instr = cycles = 0
        for line in ensure_string(f.readlines()):
            pos = line.find('Thread: ' + str(tid) + ' Instructions:')
            if pos != -1:
                last = line
        # import pdb ; pdb.set_trace()
        lst = last.split()
        instr = int(lst[3])
        cycles = int(lst[5])

        # Check to make sure there really is valid data.  If not, the print a
        # warning.  No need to exit with an error, because it's possible for
        # MPI_MT_MODE applications to have a different number of threads in
        # each process.  This means some processes may have a thread 'tid',
        # while this process may not.
        #
        if instr > 1:
            metric = cycles / float(instr)
        else:
            msg.PrintMsgPlus('WARNING: There were no instructions in WP CMPSim output for thread ' + \
                                   str(tid) + ' in file:\n         ' + sim_file)
            msg.PrintMsg(
                'Prediction error will not be calculated for this process.')
            if options.mode == config.MPI_MT_MODE:
                msg.PrintMsg(
                    'Since tracing mode is \'mpi_mt\', this may be OK.')
            metric = -1.0

        return metric
Ejemplo n.º 18
0
def GetMarker(fp):
    """
    Get the marker ("S:") or ("M:") for one slice 

    Marker data format:
        "S:" marker count  < other info >
        "M:" marker count  < other info >
     e.g.
       "S: 0x7ffff7dde120 1 /lib64/ld-linux-x86-64.so.2 0x7ffff7ddd000 + 0x1120"
       "M: 0x7efdef57a301 1 no_image 0"

    @return (marker, count)
    """

    mr = []
    line = ensure_string(fp.readline())
    while not (line.startswith('S:') or line.startswith('M:')) and line:
        # print 'Skipping line: ' + line

        # Don't want to skip the part of BBV files at the end which give
        # information on the basic blocks in the file.  If 'Block id:' is
        # found, then back up the file pointer to before this string.
        #
        if line.startswith('Block id:'):
            fp.seek(0 - len(line), os.SEEK_CUR)
            return []
        line = ensure_string(fp.readline())
    if line == '':
        return {
            'pc': 0,
            'count': 0,
            'imagename': "no_image",
            'offset': '0x0',
            'sourceinfo': "Unknown:0"
        }

    # If vector only contains the char 'S', then assume it's a slice which
    # contains no data.
    #
    if line == 'S\n': return {'pc': 0, 'count': 0}
    if line == 'M\n': return {'pc': 0, 'count': 0}
    mr = line.split()
    #import pdb;  pdb.set_trace()
    if mr[0] == 'S:':
        try:
            imagepathlist = mr[3].split('/')
            imagebasename = imagepathlist[len(imagepathlist) - 1]
            sourceinfolist = mr[8].split('/')
            sourceinfo = sourceinfolist[len(sourceinfolist) - 1]
            return {
                'pc': mr[1],
                'count': mr[2],
                'imagename': imagebasename,
                'offset': mr[6],
                'sourceinfo': sourceinfo
            }
        except IndexError:
            return {
                'pc': mr[1],
                'count': mr[2],
                'imagename': "no_image",
                'offset': '0x0',
                'sourceinfo': "Unknown:0"
            }
    else:  #mr[0] == 'M:':
        return {
            'pc': mr[1],
            'count': mr[2],
            'imagename': "no_image",
            'offset': '0x0',
            'sourceinfo': "Unknown:0"
        }
Ejemplo n.º 19
0
def ReadVectorFile(v_file):
    """
    Read in a matrix composed of a list of list of floating point values in the
    format required by simpoint.

    Format of 1st line:
        num_rows: w
            num_rows = number of rows in matrix
            'w' indicates there are weights for each vector

    Format of subsequent lines with vector information:
        weight num_dim: value, value ... value
            weight  = 1/num_rows (i.e. all vectors have an equal weight)
            num_dim = number of values per row
            value  = the matrix values

    Example input:
        162:w
        0.00617 15:  -0.07 0.00 0.33 -0.22 -0.30 0.32 -0.05 0.27 0.15 0.32 -0.24 0.30 0.12 0.25 0.17
        0.00617 15:  -0.00 0.30 0.63 -0.30 -0.22 0.83 -0.13 0.08 0.13 0.62 -0.34 0.67 0.10 0.31 0.36
        0.00617 15:  -0.00 0.30 0.63 -0.30 -0.22 0.83 -0.13 0.08 0.13 0.62 -0.34 0.67 0.10 0.31 0.36

    @return list of lists which is the matrix
    """

    matrix = []
    weights = False

    # Read in the header of the file and do some error checking.
    #
    fp = OpenFile(v_file, 'normalized frequency vector file: ')
    line = ensure_string(fp.readline())
    field = line.split(':')
    num_vect = field[0]
    if len(field) == 2:
        if not 'w' in field[1]:
            msg.PrintAndExit('Illegal char given as weight: ' + field[1])
        else:
            weights = True

    count = 0
    line = ensure_string(fp.readline())
    while True:
        if line == '':
            return matrix

        # Read in an optional weight, the number of values in the vector and the vector itself.
        #
        vector = []
        field = line.split()
        if weights:
            field = field[1:]
        if len(field) < 2:
            msg.PrintAndExit('Corrupted vector format:\n' + line)
        num_float = int(field[0].split(':')[0])
        if len(field) - 1 != num_float:
            msg.PrintAndExit('Incorrect number of values in vector:\n' + line)
        field = field[1:]
        for value in field:
            vector.append(float(value))
        matrix.append(vector)
        count += 1

        line = ensure_string(fp.readline())

    # Make sure 'num_vect' vectors were read from the file.
    #
    if count != num_vect:
        msg.PrintAndExit('Incorrect number of values in vector:\n' + line)

    return matrix
Ejemplo n.º 20
0
    def GetRegionMetric(self, sim_file, warmup, tid, options):
        """
        Get the metric of interest for just the representative region, not including
        any warmup instructions.

        It is assumed the first set of CMPSim output data is for the warmup
        instructions, if they exist.  This is true because when the CMPSim was run
        it should have printed out data at 'warmup_len' intervals.

        The last set of data will be for both the representative region and
        warmup instructions, if any.

        Of course, if there's only one set of data, then it is for the region only,
        because there aren't any warmup instruction.

        @param sim_file File with simulator results to process
        @param warmup Number of instructions in warmup section
        @param options TID of results to be processed
        @param options Options given on cmd line

        @return metric
        """

        # Get a file pointer to the simulator data file.
        #
        f = util.OpenCompressFile(sim_file)
        if f == None:
            return -1.0

        # This is the code which needs to be modified in order to use a
        # different metric of interest for a new CMPSim.  The existing code
        # uses the metric CPI.
        #
        # Get the first and last lines in the output that have the
        # cycle/instruction counts.  Assume the 1st is always the info for the
        # warmup because the CMPSim data is dumped ever 'warmup_length'
        # instructions.  Assume last data point is for warmup + region.  If
        # there is only one line, then assume it's only for the region.
        #
        # Current code assume the default Branch Predictor CMPSim is used.
        #
        # Always use the data for thread 0 because we don't generate prediction
        # error for cooperative region pinballs.  Need to fix this when
        # this capability is added.
        #
        # import pdb ; pdb.set_trace()
        first = ''
        last = ''
        for line in ensure_string(f.readlines()):
            pos = line.find('Thread: ' + str(0) + ' Instructions:')
            if pos != -1:

                # If the first time, save it.
                #
                if first == '':
                    first = line
                last = line
        # import pdb ; pdb.set_trace()
        l_list = last.split()
        l_instr = int(l_list[3])
        l_cycles = int(l_list[5])

        if warmup == 0:
            # No warmup. Calc metric from the last set of data.
            #
            if l_instr > 0:
                metric = l_cycles / float(l_instr)
            else:
                msg.PrintAndExit('(1) Unable to calculate CPI because number of instructions is 0:\n' \
                    '            ' + sim_file)
        else:
            # Get number of instructions & cycles for first set of data. (from warmup)
            #
            f_list = first.split()
            f_instr = int(f_list[3])
            f_cycles = int(f_list[5])

            # Calculate region data by subtracting the last values from the
            # first values. This gives number of cycles and instructions for
            # just the region.
            #
            # Check to make sure there really is valid data.  If not, the print a
            # warning.  No need to exit with an error, because it's possible for
            # MPI_MT_MODE applications to have a different number of threads in
            # each process.  This means some processes may have a thread 'tid',
            # while this process may not.
            #
            if l_instr - f_instr > 0:
                metric = (l_cycles - f_cycles) / float(l_instr - f_instr)
            else:
                # import pdb ; pdb.set_trace()
                msg.PrintMsgPlus('WARNING: It looks like there were no warmup instructions in region CMPSim output for thread ' + \
                   str(tid) + ' in file:\n         ' + sim_file)
                msg.PrintMsg('First icount: %s    Last icount: %s' % (locale.format('%d', f_instr, True), \
                    locale.format('%d', l_instr, True)))
                if l_instr < config.instr_cmpsim_phase:
                    msg.PrintMsg(
                        'Slice size may be too small to calculate prediction error.'
                    )
                    msg.PrintMsg(
                        'It needs to be at least 1,000,000 for CMPSim to generate valid data.'
                    )
                msg.PrintMsg(
                    'Prediction error for this process may be suspect.')
                if hasattr(options,
                           'mode') and options.mode == config.MPI_MT_MODE:
                    msg.PrintMsg(
                        'Since tracing mode is \'mpi_mt\', this may be OK.')
                metric = -1.0

        return metric