示例#1
0
    def ValidDriver(self, path):
        """
        For SDE, make sure the 'simpoint' binary is in the kit instead of
        verifying the drivers are valid.

        No need to verify drivers for SDE, as the SDE tool doesn't use a
        pinplay driver any more.  (There is a unified controller.)

        @param path Path to kit to be validated

        @return True if Simpoint found, otherwise exit with an error msg
        """

        # Chose the appropriate Pin binary/tool for this platform.
        #
        platform = util.Platform()
        if platform == config.WIN_NATIVE or platform == config.WIN_CYGWIN:
            path = os.path.join(path, self.simpoint_path, 'simpoint.exe')
        else:
            path = os.path.join(path, self.simpoint_path, 'simpoint')
        if not os.path.isfile(path):
            msg.PrintMsg(
                'ERROR: The required binary \'simpoint\' was not found.')
            msg.PrintMsg('   ' + path)
            msg.PrintMsg('Perhaps the SDE kit installation was incomplete. Check to make sure\n' \
                'there weren\'t any errors during the install.')
            sys.exit(1)

        return True
示例#2
0
 def setUp(self):
     self.data = [('title', 'Bonanza'), ('subtitle', 'The Shootout'),
                  ('description', 'Yee haw!'),
                  ('starttime', '2008-10-04 18:00:00'),
                  ('endtime', '2008-10-04 19:00:00'), ('channum', '23')]
     self.translator = Mock()
     self.platform = util.Platform()
     self.settings = mythtv.MythSettings(self.platform, self.translator)
示例#3
0
 def setUp(self):
     self.translator = mockito.Mock()
     self.platform = util.Platform()
     self.settings = mythtv.MythSettings(self.platform, self.translator)
     privateConfig = util.OnDemandConfig()
     self.settings.put('mysql_host', privateConfig.get('mysql_host'))
     self.settings.put('mysql_password',
                       privateConfig.get('mysql_password'))
     self.settings.put('mythtv_host', privateConfig.get('mythtv_host'))
     self.db = mythdb.MythDatabase(self.settings, self.translator)
def FmtPrintCmd(file_name):
    """
    Format the appropriate command to 'cat' a file for a given architecture.

    @return cmd line
    """

    platform = util.Platform()
    if platform == config.WIN_NATIVE:
        cmd = 'type ' + file_name
    else:
        cmd = 'cat ' + file_name

    return cmd
示例#5
0
    def WindowsNativeCheck(self, options):
        """
        If running on native Windows environment, the scripts currently only
        run on one core.  Force 'num_cores' to be 1 on this platform.

        Restriction to be removed once the scripts work on more cores.

        @return No return value
        """

        platform = util.Platform()
        if platform == config.WIN_NATIVE and hasattr(options, 'num_cores'):
            options.num_cores = config.num_cores = 1

        return
def PrintMsgNoCR(msg):
    """
    Prints a message to stdout, but don't add CR.

    Use the method subprocess.Popen() in order to
    ensure the strings printed are in order.
    """

    # Import util.py here instead of at the top level to keep from having recusive module includes, as util
    # imports msg.
    #
    import util, config

    platform = util.Platform()

    # Remove any non-printing char
    #
    msg = filter(lambda x: x in string.printable, msg)

    # Escape the char '`' so the shell won't interpret it.
    #
    if '`' in msg:
        msg = msg.replace('`', '\`')
    # import pdb;  pdb.set_trace()
    if '"' in msg:
        # String 'msg' has embeded double quotes.  Need to use
        # single quotes to bracket 'msg'.
        #
        if platform == config.WIN_NATIVE:
            cmd = "echo " + msg
        else:
            cmd = "echo '" + msg + "'"
    else:
        # String 'msg' does not have embeded double quotes. Thus
        # can use double quotes to bracket 'msg'.
        #
        if platform == config.WIN_NATIVE:
            if msg == '':
                cmd = 'echo['
            else:
                cmd = 'echo|set /p=' + msg
        else:
            cmd = 'echo -n "' + msg + '"'
    p = subprocess.Popen(cmd, shell=True)
    p.wait()
示例#7
0
文件: kit.py 项目: mazalves/OrCS
    def GetNullapp(self, basename):
        """
        Get the path to the nullapp for the required platform and architecture.

        @param basename Basename (file name w/o extension) of pinball to process

        @return Explicit path to nullapp
        """

        # Get explicit path to the correct nullapp for this arch.
        #
        arch = util.FindArchitecture(basename)
        nullapp_path = os.path.join(self.ArchSpecificDir(arch), self.nullapp)

        # import pdb;  pdb.set_trace()
        platform = util.Platform()
        if platform == config.WIN_CYGWIN:
            # Need to get path to nullapp using Windows format.  This is required
            # because SDE is a native Windows app and requires the path to be
            # in Windows format.  However, the path set above is in Cygwin format,
            # hence it must be converted.
            #
            try:
                nullapp_path = subprocess.check_output(
                    ['cygpath', '-w', nullapp_path])
            except (subprocess.CalledProcessError, OSError):
                msg.PrintAndExit(
                    'Could not get a valid Windows path from the Cygwin path to nullapp'
                )

            # Use forward slashes for the directory separator in the Windows path
            # (which is acceptable) because backslashes are treated as the escape character.
            #
            nullapp_path = nullapp_path.replace('\\', '/')
            nullapp_path = nullapp_path.rstrip()

        # Final check to ensure it's a valid nullapp binary
        #
        if not os.path.isfile(nullapp_path):
            msg.PrintAndExit('Unable to find valid nullapp')

        return nullapp_path
示例#8
0
    def Run(self):
        """
        Get all the user options and run the logger.

        @return Exit code from the logger pintool
        """

        # import pdb;  pdb.set_trace()
        options = self.ParseCommandLine()

        # Get the kit to be used for logging.
        #
        kit = self.GetKit()

        # Set the binary type in the kit.  Assume the first string in the
        # command line is the binary.  Only do this if user hasn't given 'pid'
        # or 'pintool_help'.
        #
        if not (hasattr(options, 'pid') and options.pid) and \
           not (hasattr(options, 'pintool_help') and options.pintool_help):
            binary = options.command.split()[0]
            kit.SetBinaryType(binary)
            if kit.binary_type == config.ARCH_INVALID:
                msg.PrintMsg(
                    '\nWARNING: Unable to determine binary file type.\n'
                    'Perhaps the string assumed to be the binary is incorrect.\n'
                    '   Command line:     ' + options.command + '\n'
                    '   Binary: (assumed) ' + binary + '\n'
                    'Setting binary type to \'Intel64\' as the default value.\n'
                )
        else:
            # If user has given 'pid' or wants 'pintool_help', then need to
            # explictly set the architecture of the binary in the kit.
            #
            kit.binary_type = options.arch

        # Now that we know the type of the binary, set the user defined pintool,
        # if one exists.  Need to wait until now to set the tool because the
        # user may only have the tool in the architecture dependent directory
        # for this type of application.  Thus we need the binary type in order
        # to find it.
        #
        if hasattr(options, 'pintool') and options.pintool:
            kit.SetPinTool(options.pintool)

        # If user just wants 'pintool_help' go ahead and print it, then exit
        # the script.  Need to do this after we get the kit in order to print
        # the help for the correct kit.  Also needs to be after any user
        # defined pintools have been added to the kit. This ensures the
        # correct pintool help msg will be displayed.
        #
        if hasattr(options, 'pintool_help') and options.pintool_help:
            result = util.PintoolHelpKit(kit, options)

            return result

        # Get path to the kit pin binary, any user defined Pin knobs and
        # the pintool.
        #
        # import pdb;  pdb.set_trace()
        cmd = os.path.join(kit.path, kit.pin)
        if hasattr(options, 'pin_options') and options.pin_options:
            cmd += ' ' + options.pin_options
        if hasattr(options, 'pid') and options.pid:
            if kit.kit_type == config.PINPLAY:
                cmd += ' -pid ' + str(options.pid)
            elif kit.kit_type == config.SDE:
                cmd += ' -attach-pid ' + str(options.pid)

        cmd += kit.GetPinToolKnob()

        # Pintool knobs required for logging.
        #
        if not (hasattr(options, 'no_log') and options.no_log):
            cmd += ' -log'
            cmd += ' -xyzzy '

            # Add any knobs required by user options or the type of binary
            #
            if hasattr(options, 'log_file') and options.log_file:
                cmd += self.AddOptionIfNotPresent(options, '-log:basename',
                                                  options.log_file)
            cmd += util.AddMt(options)
            cmd += util.AddCompressed(options)
            cmd += util.GetMsgFileOption(options.log_file)

            # Add any user logging options given by the user
            #
            if hasattr(options, 'log_options') and options.log_options:
                cmd += options.log_options

        # Need to add shared memory knobs and create memory pools for MPI/MP apps.
        #
        create_mem_pool = False
        if options.mode == config.MPI_MODE or options.mode == config.MPI_MT_MODE or \
           options.mode == config.MP_MODE or options.mode == config.MP_MT_MODE:

            # Need to add the MPI options to the existing MT options already in
            # 'pintool_options'.
            #
            if not (hasattr(options, 'no_log') and options.no_log):
                cmd += self.MpModeOptions(options)
                create_mem_pool = True
            else:
                create_mem_pool = False

        # Format the MPI command line, if required to run the command line.
        #
        if options.mode == config.MPI_MODE or options.mode == config.MPI_MT_MODE:
            cmd = util.MPICmdLine(options) + ' ' + cmd

        # If logging enabled for multi-threaded app, generate the mp_pool.
        #
        if create_mem_pool:
            log_key = self.RunMPCreate(kit, options)
            cmd += log_key

        # Add program and arguments
        #
        if not options.pid:
            cmd += ' -- ' + options.command

        # Print out command line used for pin and pintool
        #
        if not (hasattr(options, 'no_print_cmd')
                and options.no_print_cmd) or options.verbose:
            string = '\n' + cmd
            msg.PrintMsg(string)

        # Finally execute the command line and gather stdin and stdout.
        # Exit with the return code from executing the logger.
        #
        result = 0
        # import pdb;  pdb.set_trace()
        if not config.debug:
            platform = util.Platform()
            if not (hasattr(options, 'no_print_cmd') and options.no_print_cmd):
                if platform != config.WIN_NATIVE:
                    cmd = 'time ' + cmd
            p = subprocess.Popen(cmd, shell=True)
            p.communicate()
            result = p.returncode

        # If logging enabled for multi-threaded app, delete the mp_pool.
        #
        if create_mem_pool:
            self.RunMPDelete(kit, log_key, options)

        return result
示例#9
0
文件: kit.py 项目: mazalves/OrCS
class Kit(object):
    """Setup the path and pintools for the PinPlay kit."""

    # First, initalize all the variables in the kit to default values.
    #

    # Path to the top level directory of the kit.
    #
    path = ''

    # What type of a kit is this.
    #
    kit_type = config.PINPLAY

    # Chose the appropriate Pin binary/tool for this platform.
    #
    platform = util.Platform()
    if platform is None:
        msg.PrintAndExit(
            'Could not identify the OS of the system where scripts are being run.'
        )
    if platform == config.WIN_NATIVE or platform == config.WIN_CYGWIN:
        # Windows
        pinbin = 'pin.exe'
        pintool = 'pinplay-driver.dll'  # Need to verify this is the correct name
        nullapp = 'nullapp.exe'
    else:
        # Linux
        pinbin = 'pinbin'
        pintool = 'pinplay-driver.so'
        nullapp = 'nullapp'

    # Some definitions for the kit.
    #
    default_dir = 'pinplay'
    pin = 'pin'
    type = 'PinPlay'
    prefix = ''

    # In case there are any default knobs needed for this pintool.
    #
    default_knobs = ''

    # Path to the Pin binary itself for both architectures.
    #
    pin_dir = os.path.join('extras', 'pinplay', 'bin')

    # Paths to the PinPlay driver for both architectures.
    #
    pinbin_intel64 = os.path.join('intel64', 'bin', pinbin)
    pinbin_ia32 = os.path.join('ia32', 'bin', pinbin)

    # Paths to the Pin binary itself for both architectures.
    #
    driver_intel64 = os.path.join(pin_dir, 'intel64', 'pinplay-driver.so')
    driver_ia32 = os.path.join(pin_dir, 'ia32', 'pinplay-driver.so')

    # Path to the shell scripts in this kit
    #
    script_path = config.pin_script_path

    # Path to simpoint
    #
    simpoint_path = os.path.join('extras', 'pinplay', 'PinPoints', 'bin')

    # Knobs which have the same behavior in the various kits, but a different
    # name in each kit.
    #
    knob_length = '-log:length'
    knob_skip = '-log:skip'
    knob_regions_epilog = '-log:regions:epilog'
    knob_regions_in = '-log:regions:in'
    knob_regions_out = '-log:regions:out'
    knob_regions_prolog = '-log:regions:prolog'
    knob_regions_warmup = '-log:regions:warmup'
    knob_pcregions_in = '-log:pcregions:in'
    knob_pcregions_out = '-log:pcregions:out'
    knob_pcregions_merge_warmup = '-log:pcregions:merge_warmup'

    # Is the binary 32-bit or 64-bit?  Only needed for the logging phase.
    #
    binary_type = config.ARCH_INVALID

    def __init__(self):
        """
        Method called when object is instantiated to initalize object.

        @return No return value
        """

        self.InitKit(self.script_path)

    def SetBinaryType(self, binary):
        """
        Set the file type: either 32-bit or 64-bit.

        @param binary Binary used to determine type

        @return No return value
        """

        self.binary_type = util.FileType(binary)

    def ValidDriver(self, path):
        """
        Is this a path to a kit with a valid pinplay driver?

        @param path Path to kit to be validated

        @return True if valid drivers found, otherwise exit with an error msg
        """

        # See if the 64-bit driver exists
        #
        # import pdb;  pdb.set_trace()
        arch = 'intel64'
        if os.path.isfile(os.path.join(path, self.driver_intel64)):

            # See if the 32-bit driver exists
            #
            arch = 'ia32'
            if os.path.isfile(os.path.join(path, self.driver_ia32)):
                return True

        # There is a valid 'pinbin' binary, or this method wouldn't get called, but
        # there isn't a valid pinplay-driver.
        #
        msg.PrintMsg('ERROR: The required PinTool \'' + self.pintool +
                     '\' for arch \'' + arch + '\' was not found.')
        msg.PrintMsg('Perhaps the PinPlay kit installation was incomplete. Check to make sure\n' \
            'there weren\'t any errors during the install.')
        sys.exit(1)

    def ValidKit(self, path):
        """
        Is this a path to a valid kit?

        A valid kit must contain both the binary 'pinbin' and the
        PinPlay driver 'pintool' for both intel64 and ia32.

        @param path Path to kit to be validated

        @return False if kit not valid, else the return value from self.ValidDriver()
        """

        if os.path.isdir(path):

            # See if the 64-bit pinbin binary exists
            #
            if os.path.isfile(os.path.join(path, self.pinbin_intel64)):

                # See if the 32-bit pinbin binary exists
                #
                if os.path.isfile(os.path.join(path, self.pinbin_ia32)):
                    return self.ValidDriver(path)
        return False

    def GetHomeDir(self):
        """
        Get the location defined by the user's 'home' parameter/option.
        """

        return config.pinplayhome

    def GetKitLocation(self, script_path):
        """
        Look for a kit in several locations, including the 'home' directory, if it's defined.

        @param script_path Path to scripts directory in a kit

        @return Path to PinPlay kit
        """

        # Get path to the default version of the kit in users
        # home directory.
        #
        # import pdb;  pdb.set_trace()
        home = os.path.expanduser("~")
        path = os.path.join(home, self.default_dir)

        # If default dir name not found in home directory, then try the default
        # in the current directory.
        #
        if not os.path.exists(path):
            path = os.path.join(os.getcwd(), self.default_dir)

        # If default dir name is not found in the current directory, then check
        # to see if this Python script resides in a valid kit.  If so, then use
        # this as the kit location.  Assume if the scripts are in a valid kit,
        # they reside in the directory:  $PINPLAYHOME/script_path,  where
        # PINPLAYHOME is the root directory of the kit.
        #
        if not os.path.exists(path):
            script_dir = util.GetScriptDir()
            base_dir = script_dir.replace(script_path, '')
            if base_dir != script_dir:
                path = base_dir

        # If a 'home' directory is given for the kit, override any kit
        # locations just discovered and use the location given in this
        # parameter.
        #
        kit_home_dir = self.GetHomeDir()
        if kit_home_dir:
            if kit_home_dir[0] == os.sep:
                # Absolute path name, use as is.
                #
                path = kit_home_dir
            else:
                # Else assume it's a directory in the users home directory.
                #
                path = os.path.join(home, kit_home_dir)

        return path

    def InitKit(self, script_path):
        """
        Get the path to a valid kit, the appropriate tool name and add several paths
        to the environment variable PATH required to find script/utilities.

        @param script_path Path to scripts directory in a kit

        @return No return value
        """

        self.path = self.GetKitLocation(script_path)

        # Check to see if it's a valid kit. If not, exit with an error.
        #
        if not self.ValidKit(self.path):
            msg.PrintMsg('ERROR: Path to the ' + self.type +
                         ' kit was not found.')
            msg.PrintMsg('Default kit location is: ' + \
                os.path.realpath(os.path.join(os.path.expanduser("~"), self.default_dir)))
            sys.exit(1)

        # Add several directories in the PinPlay kit to the environment variable PATH.
        #
        os.environ["PATH"] += os.pathsep + os.path.join(
            self.path, self.script_path)
        if self.simpoint_path != self.script_path:
            os.environ["PATH"] += os.pathsep + os.path.join(
                self.path, self.simpoint_path)

    def ArchSpecificDir(self, arch):
        """
        Get the architecture dependent directory where the pintools are located.

        @param arch Architecture of the binary/pinball kit is using

        @return Explicit path to directory
        """

        # import pdb;  pdb.set_trace()
        pintool_path = os.path.join(self.path, self.pin_dir)
        if arch == config.ARCH_IA32:
            pintool_path = os.path.join(pintool_path, 'ia32')
        elif arch == config.ARCH_INTEL64:
            pintool_path = os.path.join(pintool_path, 'intel64')
        else:
            msg.PrintAndExit('Could not identify the architecture of the pintools to run.\n' \
                'Perhaps you forgot to set the binary type using the parameter \'mode\'.')

        return pintool_path

    def SetPinTool(self, user_pintool, pinball=''):
        """
        Set the pintool to the users tool instead of the default for this kit.

        User can give either an explicit path to the tool or put the tool in
        the architecture dependent directory.  In either case, check to make
        sure the pintool exists.

        @param user_pintool User defined pintool to use in this kit
        @param pinball Optional - pinball kit is processing

        @return
        """

        if os.path.isfile(os.path.realpath(user_pintool)):
            self.pintool = user_pintool
        else:
            # If pinball is given, use it to find the architecture specific directory,
            # othwise just use the parameter 'binary_type'.
            #
            if pinball:
                arch = util.FindArchitecture(pinball)
                tool = os.path.join(self.ArchSpecificDir(arch), user_pintool)
            else:
                tool = os.path.join(self.ArchSpecificDir(self.binary_type),
                                    user_pintool)
            if not os.path.isfile(os.path.realpath(tool)):
                msg.PrintAndExit('Could not find user defined pintool: ' +
                                 user_pintool)
            self.pintool = user_pintool

    def GetPinTool(self, pinball):
        """
        Get the path to the pintool for the required architecture.

        If a pinball is given to the method, figures out the correct
        architecture for the pintool from the pinball.

        @param pinball Pinball kit is processing

        @return Path to the pintool for this kit
        """

        # import pdb;  pdb.set_trace()
        if os.path.isfile(os.path.realpath(self.pintool)):
            # If the pintool already has an explicit path, possible if the user has defined the pintool,
            # then just use it as is.
            #
            pintool_path = self.pintool
        else:
            # Otherwise, assume the tool is in the architecture dependent pintool directory.
            #
            if pinball:
                arch = util.FindArchitecture(pinball)
            else:
                arch = self.binary_type
            pintool_path = os.path.join(self.ArchSpecificDir(arch),
                                        self.pintool)

        return pintool_path

    def GetPinToolKnob(self, pinball=''):
        """
        Get the knob required to add the pintool for this kit to the Pin command line.

        Some kits don't required a pintool knob.  If that the case, just return an empty string.
        Pin based kits require a pintool knob, so return it.

        @param pinball Optional - pinball kit is processing

        @return String, including '-t', which defines explict path to pintool
        """

        return ' -t ' + self.GetPinTool(pinball)

    def GetNullapp(self, basename):
        """
        Get the path to the nullapp for the required platform and architecture.

        @param basename Basename (file name w/o extension) of pinball to process

        @return Explicit path to nullapp
        """

        # Get explicit path to the correct nullapp for this arch.
        #
        arch = util.FindArchitecture(basename)
        nullapp_path = os.path.join(self.ArchSpecificDir(arch), self.nullapp)

        # import pdb;  pdb.set_trace()
        platform = util.Platform()
        if platform == config.WIN_CYGWIN:
            # Need to get path to nullapp using Windows format.  This is required
            # because SDE is a native Windows app and requires the path to be
            # in Windows format.  However, the path set above is in Cygwin format,
            # hence it must be converted.
            #
            try:
                nullapp_path = subprocess.check_output(
                    ['cygpath', '-w', nullapp_path])
            except (subprocess.CalledProcessError, OSError):
                msg.PrintAndExit(
                    'Could not get a valid Windows path from the Cygwin path to nullapp'
                )

            # Use forward slashes for the directory separator in the Windows path
            # (which is acceptable) because backslashes are treated as the escape character.
            #
            nullapp_path = nullapp_path.replace('\\', '/')
            nullapp_path = nullapp_path.rstrip()

        # Final check to ensure it's a valid nullapp binary
        #
        if not os.path.isfile(nullapp_path):
            msg.PrintAndExit('Unable to find valid nullapp')

        return nullapp_path
#
util.RunCmd(FmtPrintCmd(blank_foot), options, '',
            concurrent=False,
            print_time=False,
            print_cmd=False)
msg.PrintMsg('<![CDATA[')
msg.PrintMsg('CHANGEME: Give the information of your test machine:')
msg.PrintMsg(
    'CHANGEME: brand/model (e.g. "IBM Intellistation", "HP xxx", "Intel prototype"')
msg.PrintMsg('CHANGEME: Memory size')
msg.PrintMsg('CHANGEME: Cache sizes: I-cache, D-cache, L1, L2...')
msg.PrintMsg('')

# Don't try to run these commands on native Windows.
#
platform = util.Platform()
if platform != config.WIN_NATIVE:
    msg.PrintMsg('The follwing is automatically generated using \'uname -a\'')
    result = util.RunCmd('uname -a', options, '',
                         concurrent=False,
                         print_time=False,
                         print_cmd=False)
    msg.PrintMsg(
        'The follwing is automatically generated using \'cat /proc/cpuinfo \'')
    result = util.RunCmd('cat /proc/cpuinfo', options, '',
                         concurrent=False,
                         print_time=False,
                         print_cmd=False)

# Final set of default info to print
#
示例#11
0
def run(args, shargs):
    if shargs:
        print('running shard: ', shargs[3])
        shard_num = shargs[3]

    root = pathlib.Path('.').resolve() / 'tmp'
    input = root / 'input'
    output_root = root / 'output'

    if shargs:
        shard_result = str(args.workload) + '-' + shargs[3]
        output = output_root / args.suite / shard_result / args.platform / 'BATCH_SIZE={}'.format(
            args.batch_size)
    else:
        output = output_root / args.suite / args.workload / args.platform / 'BATCH_SIZE={}'.format(
            args.batch_size)

    with open('ci/plan.yml') as fp:
        plan = yaml.safe_load(fp)

    platform = plan['PLATFORMS'][args.platform]
    variant_name = platform['variant']
    variant = plan['VARIANTS'][variant_name]
    arch = variant['arch']

    suites = plan['SUITES']
    suite = suites.get(args.suite)
    if suite is None:
        sys.exit('Invalid suite. Available suites: {}'.format(list(suites)))
    platform_cfg = suite['platforms'][args.platform]

    workload = suite['workloads'].get(args.workload)
    if workload is None:
        sys.exit('Invalid workload. Available workloads: {}'.format(
            list(suite['workloads'])))

    popt = util.PlanOption(suite, workload, args.platform)

    shutil.rmtree(input, ignore_errors=True)
    archive_dir = pathlib.Path(args.root) / args.pipeline / args.build_id
    if args.local:
        pkg_path = pathlib.Path('bazel-bin/pkg.tar.gz')
        outdir = root / 'nas'
        version = '0.0.0.dev0'
    else:
        pkg_path = archive_dir / 'build' / variant_name / 'pkg.tar.gz'
        outdir = archive_dir
        version = args.version

    util.printf('--- Extracting {} -> {}'.format(pkg_path, input))
    with tarfile.open(str(pkg_path), 'r') as tar:
        tar.extractall(input)

    shutil.rmtree(output_root, ignore_errors=True)
    output.mkdir(parents=True)

    cwd = popt.get('cwd', '.')
    spec = pathlib.Path(popt.get('conda_env'))

    util.printf('--- :snake: Creating conda env from {}'.format(spec))
    instance_name = os.getenv('BUILDKITE_AGENT_NAME', 'harness')
    sig = hashlib.md5()
    sig.update(spec.read_bytes())
    base_path = pathlib.Path('~', '.t2', instance_name,
                             sig.hexdigest()).expanduser()

    base_env = util.CondaEnv(base_path)
    base_env.create(spec)
    conda_env = base_env.clone(root / pathlib.Path('cenv'))
    env = os.environ.copy()
    env.update(conda_env.env())

    for whl in popt.get('wheels', []):
        whl_filename = whl.format(arch=arch, version=version)
        whl_path = input / whl_filename
        conda_env.install(whl_path)

    if 'stripe' in args.platform:
        env['USE_STRIPE'] = '1'
    if 'cuda' in args.platform:
        env['CUDA_VISIBLE_DEVICES'] = buildkite_metadata(
            'CUDA_VISIBLE_DEVICES', '0')
    env['PLAIDML_DEVICE_IDS'] = buildkite_metadata('PLAIDML_DEVICE_IDS')
    env['PLAIDML_EXPERIMENTAL'] = buildkite_metadata('PLAIDML_EXPERIMENTAL',
                                                     '0')

    util.printf(
        '--- :bazel: Running test {suite}/{workload} on {platform}'.format(
            suite=args.suite,
            workload=args.workload,
            platform=args.platform,
        ))

    cmd_args = platform_cfg.get('prepend_args', []) + popt.get(
        'prepend_args', [])
    cmd_args += platform_cfg.get('args', []) + popt.get('args', [])
    cmd_args += platform_cfg.get('append_args', []) + popt.get(
        'append_args', [])

    if shargs:
        cmd_args += shargs

    ctx = dict(
        results=output,
        batch_size=args.batch_size,
        workload=args.workload,
    )
    cmd_args = [str(x).format(**ctx) for x in cmd_args]
    if 'stripe' in args.platform:
        try:
            cmd_args.remove('--no-kernel-timing')
        except ValueError:
            pass

    cmd = [popt.get('runner')] + cmd_args
    retcode = util.call(cmd, cwd=cwd, env=env)

    build_url = os.getenv('BUILDKITE_BUILD_URL')
    if build_url:
        build_url = '{}#{}'.format(build_url, os.getenv('BUILDKITE_JOB_ID'))
    else:
        build_url = DEFAULT_BUILD_URL

    gpu_flops = plan['CONST']['gpu_flops']
    baseline_name = plan['CONST']['efficiency_baseline']

    if shargs:
        test_info = util.TestInfo(
            (args.suite, suite),
            (shard_result, workload),
            (args.platform, util.Platform(args.platform, gpu_flops)),
            args.batch_size,
        )
    else:
        test_info = util.TestInfo(
            (args.suite, suite),
            (args.workload, workload),
            (args.platform, util.Platform(args.platform, gpu_flops)),
            args.batch_size,
        )

    golden_info = util.TestInfo(
        (args.suite, suite),
        (args.workload, workload),
        (baseline_name, util.Platform(baseline_name, gpu_flops)),
        args.batch_size,
    )

    result = analysis.Result(output_root, test_info, golden_info)
    report = {
        'build_url': build_url,
        'compare': result.test_result.compare,
        'efficiency': result.efficiency,
        'errors': result.test_result.errors,
        'failures': result.test_result.failures,
        'ratio': result.ratio,
        'reason': result.test_result.reason(),
        'status': result.test_result.status(),
        'compile_duration': result.cur.compile_duration,
        'cur.execution_duration': result.cur.execution_duration,
        'ref.execution_duration': result.ref.execution_duration,
    }

    report_fn = 'report.json'
    with (output / report_fn).open('w') as fp:
        json.dump(report, fp)

    src = output_root
    dst = outdir / 'test'
    copy_tree(str(src), str(dst))

    if retcode:
        sys.exit(retcode)
    if not result.test_result.is_ok():
        sys.exit(1)
示例#12
0
class SimPoint(object):

    platform = util.Platform()
    if platform == config.WIN_NATIVE or platform == config.WIN_CYGWIN:
        simpoint_bin = 'simpoint.exe'
    else:
        simpoint_bin = 'simpoint'
    csv_bin = 'regions.py'
    generic_bbv_name = 't.bb'
    generic_ldv_name = 't.ldv'
    proj_bbv_file = 'projected_t.bb'
    weight_ldv_file = 'weighted_t.ldv'
    freq_vect_file = 't.fv'

    def ParseCommandLine(self):
        """
        Get the options from the command line and check for errors.

        @return tuple with parsed options and unparsed args
        """

        # Define and get command line options.
        #
        version = '$Revision: 1.33 $'
        version = version.replace('$Revision: ', '')
        ver = version.replace(' $', '')
        us = '%prog --bbv_file FILE --data_dir DIR FILE --simpoint_file FILE [options]'
        desc = 'Runs Simpoint and then generates the region CSV file.  ' \
               'Input to Simpoint can be just an BBV file or a combination of BBV/LDV files. \n\n' \
                'Required options: --bbv_file, --data_dir, --simpoint_file'

        util.CheckNonPrintChar(sys.argv)
        parser = optparse.OptionParser(
            usage=us,
            version=ver,
            description=desc,
            formatter=cmd_options.BlankLinesIndentedHelpFormatter())

        cmd_options.debug(parser)
        cmd_options.global_file(parser)
        cmd_options.list(parser, '')
        cmd_options.bbv_file(parser, '')
        cmd_options.data_dir(parser)
        cmd_options.simpoint_file(parser)
        cmd_options.ldv(parser, '')
        cmd_options.combine(parser, '')
        cmd_options.cutoff(parser, '')
        cmd_options.focus_thread(parser, '')
        cmd_options.maxk(parser, '')
        cmd_options.num_cores(parser, '')
        cmd_options.simpoint_options(parser, '')

        (options, args) = parser.parse_args()

        # Added method cbsp() to 'options' to check if running CBSP.
        #
        util.AddMethodcbsp(options)

        # Must have option '--ldv', even if using option '--combine', in order to
        # process BBV/LDV both.  Let user know if '--combine' used w/o '--ldv'.
        #
        if not options.ldv and options.combine != -1.0:
            msg.PrintMsgPlus('WARNING: Option \'--combine\' detected without \'--ldv\'.  Only using BBV for ' \
                'Simpoint.  \n              Must explicitly specify \'--ldv\' in order to use both BBV/LDV.\n')
        if options.ldv:
            msg.PrintMsgPlus(
                'Using both BBV/LDV files when running Simpoint\n')

        # If option combine is not set, then set it to the default value.
        # Check to make sure combine an acceptable value.
        #
        util.SetCombineDefault(options)
        util.CheckCombine(options)

        # Read in an optional configuration files and set global variables.
        #
        config_obj = config.ConfigClass()
        config_obj.GetCfgGlobals(options,
                                 False)  # Don't need to require 4 variables

        # Error check input to make sure all required options are on the command line.
        #
        if options.bbv_file == '':
            msg.PrintAndExit(
                'Basic block vector file must be defined with option: --bbv_file FILE'
            )
        if options.data_dir == '':
            msg.PrintAndExit(
                'Simpoint data directory must be defined with option: --data_dir DIR'
            )
        if options.simpoint_file == '':
            msg.PrintAndExit(
                'Simpoint output must be defined with option: --simpoint_file FILE'
            )

        # The data_dir should exist and contain the BBV file.
        #
        if not os.path.isdir(options.data_dir):
            msg.PrintAndExit('Data directory does not exist: ' +
                             options.data_dir)
        if not os.path.isfile(os.path.join(options.data_dir,
                                           options.bbv_file)):
            msg.PrintAndExit('Basic block vector file does not exist: ' +
                             options.bbv_file)

        # Do some 'special' things on native Windows.
        #
        util.WindowsNativeCheck(options)

        return (options, args)

    def NormProjectBBV(self, options):
        """
        Normalize and project the basic block vector file instead of doing this in Simpoint.

        This is required so we can combine BBV and LDV files as frequency vector file given
        to Simpoint.

        @param options Options given on cmd line

        @return result of command to generate projected file
        """

        # Format the command and run it.
        #
        # import pdb;  pdb.set_trace()

        # Use options for the Python script to generate the CSV file.
        #
        output_file = 'normalize_project.out'
        try:
            fp_error = open(output_file, 'w')
        except IOError:
            msg.PrintMsg(
                'ERROR: Failed to open normalize/project error file:\n'
                '   ' + output_file)
            return -1
        try:
            fp_out = open(self.proj_bbv_file, 'w')
        except IOError:
            msg.PrintMsg(
                'ERROR: Failed to open normalize/project output file:\n'
                '   ' + self.proj_bbv_file)
            fp_error.close()
            return -1

        cmd = self.csv_bin
        cmd += ' --project_bbv'
        cmd += ' --bbv_file'
        cmd += ' ' + self.generic_bbv_name
        cmd += ' --dimensions 16'

        # msg.PrintMsg('')
        # print 'cmd: ' + cmd
        # import pdb;  pdb.set_trace()
        result = util.RunCmd(cmd, options, '', concurrent=False, f_stdout=fp_out, \
                             f_stderr=fp_error)
        msg.PrintMsg('   Output file: %s' % (self.proj_bbv_file))
        msg.PrintMsg('   Stderr file: %s\n' % (output_file))
        fp_out.close()
        fp_error.close()

        return result

    def NormWeightLDV(self, options):
        """
        Normalize and apply weights to LRU stack distance vector files.

        @param options Options given on cmd line

        @return result of command to generate file
        """

        # Ensure there's an LDV file to process.
        #
        if not os.path.isfile(self.generic_ldv_name):
            msg.PrintMsg('ERROR: Can\'t open LDV vector file.\n '
                         '   ' + self.generic_ldv_name)
            return -1

        # Use options for the Python script to process the LDV file.
        #
        # import pdb;  pdb.set_trace()
        output_file = 'normalize_weight.out'
        try:
            fp_error = open(output_file, 'w')
        except IOError:
            msg.PrintMsg(
                'ERROR: Failed to open normalize weights error file:\n'
                '   ' + output_file)
            return -1
        try:
            fp_out = open(self.weight_ldv_file, 'w')
        except IOError:
            msg.PrintMsg(
                'ERROR: Failed to open normalize weights output file:\n'
                '   ' + self.weight_ldv_file)
            fp_error.close()
            return -1

        cmd = self.csv_bin
        cmd += ' --weight_ldv '
        cmd += ' --ldv_file ' + self.generic_ldv_name
        cmd += ' --dimensions 16'

        # msg.PrintMsg('')
        # print 'cmd: ' + cmd
        # import pdb;  pdb.set_trace()
        result = util.RunCmd(cmd, options, '', concurrent=False, f_stdout=fp_out, \
                             f_stderr=fp_error)
        msg.PrintMsg('   Output file: %s' % (self.weight_ldv_file))
        msg.PrintMsg('   Stderr file: %s\n' % (output_file))
        fp_out.close()
        fp_error.close()

        return result

    def CombineFreqVectFiles(self, options):
        """
        Combine the BBV and LDV files, applying a scaling factor to allow
        different contributions from each file.

        @param options Options given on cmd line

        @return result of command to generate file
        """

        # Format the command and run it.
        #
        # import pdb;  pdb.set_trace()

        # Use options for the Python script to generate the CSV file.
        #
        output_file = 'scaled_combined.out'
        try:
            fp_error = open(output_file, 'w')
        except IOError:
            msg.PrintMsg('ERROR: Failed to open combined scale error file:\n'
                         '   ' + output_file)
            return -1
        try:
            fp_out = open(self.freq_vect_file, 'w')
        except IOError:
            msg.PrintMsg('ERROR: Failed to open combined scale output file:\n'
                         '   ' + self.freq_vect_file)
            fp_error.close()
            return -1

        cmd = self.csv_bin
        cmd += ' --combine ' + str(options.combine)
        string = 'Combining BBV and LDV files with scaling factors: BBV: %.3f, LDV: %.3f\n' % \
            (options.combine, 1 - options.combine)
        msg.PrintMsgPlus(string)
        fp_error.write(string)
        cmd += ' --normal_bbv ' + self.proj_bbv_file
        cmd += ' --normal_ldv ' + self.weight_ldv_file
        result = util.RunCmd(cmd, options, '', concurrent=False, f_stdout=fp_out, \
                             f_stderr=fp_error)
        msg.PrintMsg('   Output file: %s' % (self.freq_vect_file))
        msg.PrintMsg('   Stderr file: %s\n' % (output_file))
        fp_out.close()
        fp_error.close()

        return result

    def RunSimpoint(self, options):
        """
        Format and execute the command to run Simpoint.

        @param options Options given on cmd line

        @return result of running Simpoint
        """

        import subprocess

        # Output file for simpoint
        #
        output_file = 'simpoint.out'
        try:
            fp_out = open(output_file, 'w')
        except IOError:
            msg.PrintMsg('ERROR: Failed to open simpoint error file:\n'
                         '   ' + output_file)
            return -1

        # Format the Simpoint command and run it.
        #
        # import pdb;  pdb.set_trace()
        cmd = self.simpoint_bin
        if options.ldv:
            cmd += ' -fixedLength on -loadVectorsTxtFmt ' + self.freq_vect_file
        else:
            cmd += ' -loadFVFile ' + self.generic_bbv_name

        # Add either the default options used to configure Simpoints or the
        # Simpoint options from the user.
        #
        if options.simpoint_options == '':
            cmd += ' -coveragePct ' + str(options.cutoff)
            cmd += ' -maxK ' + str(options.maxk)
        else:
            cmd += ' ' + options.simpoint_options

        cmd += ' -saveSimpoints ./t.simpoints -saveSimpointWeights ./t.weights -saveLabels t.labels'
        result = util.RunCmd(cmd,
                             options,
                             '',
                             concurrent=False,
                             f_stdout=fp_out,
                             f_stderr=subprocess.STDOUT)
        msg.PrintMsg('   Output file: %s' % (output_file))
        msg.PrintMsg('   Stderr file: %s' % (output_file))
        fp_out.close()

        return result

    def GenRegionCSVFile(self, options):
        """
        Format and execute the command to generate the regions CSV file.

        @param options Options given on cmd line

        @return result of command to generating CSV file
        """
        # Setup some stuff for generating regions CSV files.
        #
        # import pdb;  pdb.set_trace()
        cutoff_suffix = ''
        if options.cutoff < 1.0:
            cutoff_suffix = '.lpt' + str(options.cutoff)
        pos = options.data_dir.find('.Data')

        # Output and error files
        #
        regions_csv_file = options.data_dir[:pos] + '.pinpoints.csv'
        try:
            fp_out = open(regions_csv_file, 'w')
        except IOError:
            msg.PrintMsg('ERROR: Failed to open CSV output file:\n'
                         '   ' + regions_csv_file)
            return -1

        output_file = 'create_region_file.out'
        try:
            fp_error = open(output_file, 'w')
        except IOError:
            msg.PrintMsg('ERROR: Failed to open CSV error file:\n'
                         '   ' + output_file)
            fp_out.close()
            return -1

        # Format the command to generate the region CSV file and run it.
        #
        # import pdb;  pdb.set_trace()
        if options.focus_thread < 0:
            tid = 0
        else:
            tid = options.focus_thread

        # use_orig = True  # Use Chuck's original Perl script
        use_orig = False  # Use regions.py script
        if use_orig:
            # Use Chuck's Perl script to generate the CSV file.
            #
            cmd = 'create_region_file.pl'
            cmd += ' ' + self.generic_bbv_name
            cmd += ' -seq_region_ids -tid ' + str(tid)
            cmd += ' -region_file t.simpoints' + cutoff_suffix
            cmd += ' -weight_file t.weights' + cutoff_suffix
        else:
            # Use the new Python script to generate the CSV file.
            #
            cmd = self.csv_bin
            cmd += ' -f ' + str(tid)
            cmd += ' --csv_region '
            cmd += ' --bbv_file ' + self.generic_bbv_name + cutoff_suffix
            cmd += ' --region_file t.simpoints' + cutoff_suffix
            cmd += ' --weight_file t.weights' + cutoff_suffix
        msg.PrintMsg('')
        result = util.RunCmd(cmd, options, '', concurrent=False, print_time=False, f_stdout=fp_out, \
                             f_stderr=fp_error)
        msg.PrintMsg(
            '   NOTE: For this script, problems can be in either the output or stderr files.  Check them both!'
        )
        msg.PrintMsg('      Output file: %s' % (regions_csv_file))
        msg.PrintMsg('      Stderr file: %s\n' % (output_file))
        fp_out.close()
        fp_error.close()

        return result, regions_csv_file

    def Run(self):
        """Run the scripts required to run simpoint and generate a region CSV file with the results."""

        msg.PrintMsg('')

        # Get the command line options
        #
        (options, args) = self.ParseCommandLine()

        # Make sure required utilities exist and are executable.
        #
        # import pdb;  pdb.set_trace()
        if util.Which(self.simpoint_bin) == None:
            msg.PrintAndExit('simpoint binary not in path.\n'
                             'Add directory where it exists to your path.')
        if util.Which(self.csv_bin) == None:
            msg.PrintAndExit(
                'script to generate the region CSV file not in path.\n'
                'Add directory where it exists to your path.')

        # Go to the data directory. Both utilities should be run in this directory.
        #
        os.chdir(options.data_dir)

        # Always copy the specific BBV to the generic name used by simpoint.
        # If LDV file exists, then copy it to generic name.
        #
        if os.path.isfile(self.generic_bbv_name):
            os.remove(self.generic_bbv_name)
        shutil.copy(options.bbv_file, self.generic_bbv_name)
        ldv_file = options.bbv_file.replace('.bb', '.ldv')
        if os.path.isfile(ldv_file):
            if os.path.isfile(self.generic_ldv_name):
                os.remove(self.generic_ldv_name)
            shutil.copy(options.bbv_file.replace('.bb', '.ldv'),
                        self.generic_ldv_name)

        # Get the instruction count and slice_size from the BBV file.
        #
        try:
            f = open(self.generic_bbv_name)
        except IOError:
            msg.PrintAndExit('problem opening BBV file: ' +
                             self.generic_bbv_name)
        instr_count = 0
        slice_size = 0
        for line in f.readlines():
            if line.find('Dynamic') != -1:
                tmp = line.split()
                count = tmp[len(tmp) - 1]
                if count > instr_count:
                    instr_count = int(count)
            if line.find('SliceSize') != -1:
                tmp = line.split()
                size = tmp[len(tmp) - 1]
                if size > slice_size:
                    slice_size = int(size)

        # Check to make sure instruction count > slice_size.
        #
        if slice_size > instr_count:
            import locale
            locale.setlocale(locale.LC_ALL, "")
            msg.PrintAndExit('Slice size is greater than the number of instructions.  Reduce parameter \'slice_size\'.' + \
                '\nInstruction count: ' + locale.format('%14d', int(instr_count), True) + \
                '\nSlice size:        ' + locale.format('%14d', int(slice_size), True))

        if options.ldv:
            # Run to generate regions CSV file using both BBV and LDV files.
            #
            result = self.NormProjectBBV(options)
            util.CheckResult(
                result, options,
                'normalizing and projecting BBV with: ' + self.csv_bin)
            result = self.NormWeightLDV(options)
            util.CheckResult(
                result, options,
                'normalizing and applying weights to LDV with: ' +
                self.csv_bin)
            result = self.CombineFreqVectFiles(options)
            util.CheckResult(
                result, options,
                'scaling and combining BBV and LDV with: ' + self.csv_bin)
            result = self.RunSimpoint(options)
            util.CheckResult(
                result, options,
                'generating clusters (regions) with: ' + self.simpoint_bin)
            result, regions_csv_file = self.GenRegionCSVFile(options)
            util.CheckResult(result, options,
                             'creating regions CSV file with: ' + self.csv_bin)
            msg.PrintMsg('\nRegions CSV file: ' +
                         os.path.join(options.data_dir, regions_csv_file))
        else:
            # Run scripts to generate regions CSV file using the new method with just the BBV file.
            #
            result = self.NormProjectBBV(options)
            util.CheckResult(
                result, options,
                'normalizing and projecting BBV with: ' + self.csv_bin)
            result = self.RunSimpoint(options)
            util.CheckResult(
                result, options,
                'generating clusters (regions) with: ' + self.simpoint_bin)
            result, regions_csv_file = self.GenRegionCSVFile(options)
            util.CheckResult(result, options,
                             'creating regions CSF file with: ' + self.csv_bin)
            msg.PrintMsg('\nRegions CSV file: ' +
                         os.path.join(options.data_dir, regions_csv_file))

        return result
示例#13
0
    def ParseCommandLine(self):
        """
        Process command line arguments, get Kit, tool options, and their paths.

        @return List containing: pin_options, pintool_options, options.replay_file, kit_obj
        """

        # import pdb;  pdb.set_trace()
        version = '$Revision: 1.63 $'
        version = version.replace(' ', '')
        ver = version.replace(' $', '')
        us = '%prog [options] pinball_basename \nVersion: ' + ver
        desc = 'Replays one pinball. Use \'--replay_options\' or ' \
               '\'--log_options\' to modify the pintool behavior during replay.'
        util.CheckNonPrintChar(sys.argv)
        parser = optparse.OptionParser(usage=us, version=ver, description=desc)

        # Define the command line options which control the behavior of the
        # script.
        #
        # Some of these methods take a 2nd argument which is the empty string
        # ''.  If this script used option groups, then the 2nd parameter would
        # be the group.  However, this script does not use option groups, so
        # the argument is empty.
        #
        cmd_options.arch(parser, '')
        cmd_options.config_file(parser)
        cmd_options.cross_os(parser, '')
        cmd_options.debug(parser)
        cmd_options.global_file(parser)
        cmd_options.log_options(parser)
        cmd_options.msgfile_ext(parser)
        cmd_options.no_print_cmd(parser)
        cmd_options.pintool(parser)
        cmd_options.pintool_help(parser)
        cmd_options.pin_options(parser)
        cmd_options.pinplayhome(parser, '')
        cmd_options.playout(parser)
        cmd_options.replay_file(parser)
        cmd_options.replay_options(parser)
        cmd_options.save_global(parser)
        cmd_options.sdehome(parser, '')
        cmd_options.verbose(parser)

        # import pdb;  pdb.set_trace()
        (options, args) = parser.parse_args()

        if options.verbose:
            msg.PrintMsg('Started replayer.py')
        # Check to make sure the pinball basename has been given as an argument or
        # command line option.
        #
        # import pdb;  pdb.set_trace()
        if options.replay_file == '' and \
           not (hasattr(options, 'pintool_help') and options.pintool_help):
            if len(sys.argv) == 1 or len(args) == 0:
                msg.PrintMsg(
                    "ERROR: Must have a trace basename on the command line.\n"
                    "Usage: %s [options] pinball_basename" %
                    os.path.basename(sys.argv[0]))
                util.CheckResult(-1, options, 'Checking command line options')
            options.replay_file = args[0]

        # Read in an optional configuration files and set global variables.
        #
        config_obj = config.ConfigClass()
        config_obj.GetCfgGlobals(options,
                                 False)  # Don't need to require 4 variables

        # Once the tracing configuration parameters are read, get the kit in
        # case pinplayhome was set on the command line.
        #
        # import pdb;  pdb.set_trace()
        kit_obj = self.GetKit()

        # If user just wants 'pintool_help' go ahead and print it, then exit
        # the script.
        #
        if hasattr(options, 'pintool_help') and options.pintool_help:
            result = util.PintoolHelpKit(kit_obj, options)
            sys.exit(result)

        # Translate the 'arch' value given by the user into
        # the internal arch type used by the scripts.
        #
        if hasattr(options, 'arch') and options.arch:
            if 'intel64' in options.arch:
                options.arch = config.ARCH_INTEL64
            elif 'ia32' in options.arch:
                options.arch = config.ARCH_IA32
            else:
                options.arch = config.ARCH_INVALID

        # Now that we know the type of the binary, set the user defined pintool,
        # if one exists.  Need to wait until now to set the tool because the
        # user may only have the tool in the architecture dependent directory
        # for this type of application.  Thus we need the binary type in order
        # to find it.
        #
        # import pdb;  pdb.set_trace()
        kit_obj.binary_type = options.arch

        pin_options = ''
        pintool_options = ''

        # Check to see if there is a pinball to replay.
        #
        if options.replay_file == "":
            msg.PrintHelpAndExit('Replay file not specified!')

        # If the user specified a pintool, replace the default pintool in the kit with
        # it.
        #
        if hasattr(options, "pintool") and options.pintool:
            kit_obj.SetPinTool(options.pintool, options.replay_file)

        platform = util.Platform()
        if platform == config.LINUX:
            pin_options = ' ' + kit_obj.prefix + ' -xyzzy '

            # If using NOT using Linux tools to work with whole program pinballs generated on Windows,
            # then need a set of  knobs for the pin binary itself.
            #
            if not options.cross_os:
                pin_options += kit_obj.prefix + ' -reserve_memory '
                pin_options += kit_obj.prefix + ' ' + options.replay_file + '.address '

        pintool_options += ' -replay:basename ' + options.replay_file
        if options.playout or '-replay:playout 1' in options.replay_options:
            # If present, need to remove the knob '-replay:playout 1' from
            # options.replay_options because it can only be given once on the
            # command line.
            #
            pintool_options += ' -replay:playout 1 '
            options.replay_options = options.replay_options.replace(
                '-replay:playout 1', '')
        else:
            pintool_options += ' -replay:playout 0 '

        # If running Windows WP pinballs on Linux, then need this knob for the replayer pintool.
        #
        if options.cross_os:
            pintool_options += ' -replay:addr_trans'

        # Add knobs for Pin and replay/logging user gave on the command line.
        #
        pin_options += ' ' + options.pin_options
        pintool_options += ' ' + options.replay_options + ' ' + options.log_options

        # If user has log options, then may need to at multi-thread knob.
        #
        if options.log_options:
            pintool_options += util.AddMt(options, options.replay_file)

        return pin_options, pintool_options, options.replay_file, kit_obj, options
示例#14
0
class SDEKit(kit.Kit):
    """
    Get the path and pintool for an SDE kit
    """

    # Path to the top level directory of the kit.
    #
    path = ''

    # What type of a kit is this.
    #
    kit_type = config.SDE

    # Chose the appropriate Pin binary/tool for this platform.
    #
    platform = util.Platform()
    if platform is None:
        msg.PrintAndExit(
            'Could not identify the OS of the system where scripts are being run.')
    if platform == config.WIN_NATIVE or platform == config.WIN_CYGWIN:
        # Windows/Cygwin
        pinbin = 'pin.exe'
        pintool = 'sde-pinplay-driver.dll'
        nullapp = 'nullapp.exe'
        simpoint_path = os.path.join('pinplay-scripts', 'PinPointsHome',
                                     'Windows', 'bin')
    else:
        # Linux
        pinbin = 'pinbin'
        pintool = ''  # No pintool required for SDE, it has a default tool 'sde-mix-mt.so'.
        nullapp = 'nullapp'
        simpoint_path = os.path.join('pinplay-scripts', 'PinPointsHome',
                                     'Linux', 'bin')

    # Some definitions for the kit.
    #
    default_dir = 'SDE'
    pin = 'sde'
    type = 'SDE'
    prefix = '-p'

    # Any additional knobs required by the kit.  Usually not defined.
    #
    default_knobs = ''

    # Paths to the Pin binary itself for both architectures.
    #
    pin_dir = ''
    pinbin_intel64 = os.path.join('intel64', pinbin)
    pinbin_ia32 = os.path.join('ia32', pinbin)

    # Path to the shell scripts in this kit
    #
    script_path = config.sde_script_path

    # Knobs which have the same behavior in the various kits, but a different
    # name in each kit.

    # New names for knobs when the PinPlay controller is replaced by the SDE controler.
    #
    knob_length = '-length'
    knob_skip = '-skip'
    knob_regions_epilog = '-regions:epilog'
    knob_regions_in = '-regions:in'
    knob_regions_out = '-regions:out'
    knob_regions_prolog = '-regions:prolog'
    knob_regions_warmup = '-regions:warmup'
    knob_pcregions_in = '-pcregions:in'
    knob_pcregions_out = '-pcregions:out'
    knob_pcregions_merge_warmup = '-pcregions:merge_warmup'

    # Original knob names.
    #
    # knob_length = '-log:length'
    # knob_skip   = '-log:skip'
    # knob_regions_epilog = '-log:regions:epilog'
    # knob_regions_in     = '-log:regions:in'
    # knob_regions_out    = '-log:regions:out'
    # knob_regions_prolog = '-log:regions:prolog'
    # knob_regions_warmup = '-log:regions:warmup'

    def __init__(self):

        self.InitKit(self.script_path)

    def ValidDriver(self, path):
        """
        For SDE, make sure the 'simpoint' binary is in the kit instead of
        verifying the drivers are valid.

        No need to verify drivers for SDE, as the SDE tool doesn't use a
        pinplay driver any more.  (There is a unified controller.)

        @param path Path to kit to be validated

        @return True if Simpoint found, otherwise exit with an error msg
        """

        # Chose the appropriate Pin binary/tool for this platform.
        #
        platform = util.Platform()
        if platform == config.WIN_NATIVE or platform == config.WIN_CYGWIN:
            path = os.path.join(path, self.simpoint_path, 'simpoint.exe')
        else:
            path = os.path.join(path, self.simpoint_path, 'simpoint')
        if not os.path.isfile(path):
            msg.PrintMsg(
                'ERROR: The required binary \'simpoint\' was not found.')
            msg.PrintMsg('   ' + path)
            msg.PrintMsg('Perhaps the SDE kit installation was incomplete. Check to make sure\n' \
                'there weren\'t any errors during the install.')
            sys.exit(1)

        return True

    def GetHomeDir(self):
        """
        Get the location defined by the user's 'home' parameter/option.
        """

        return config.sdehome

    def GetPinTool(self, pinball):
        """
        Just return the Pintool.  Even thought SDE no longer requires a pintool
        (because of unified controller) keep this method just in case it's needed.

        @param pinball Pinball kit is processing

        @return Path to the pintool for this kit
        """

        return self.pintool

    def GetPinToolKnob(self, pinball=''):
        """
        SDE by default does not need a pintool.  However, if user defines one
        then need to return a knob which uses this tool.

        @param pinball Optional - pinball kit is processing

        @return Either empty string or knob with user defined pintool
        """

        if self.pintool:
            return ' -t ' + self.pintool
        else:
            return ''

    def GetTraceinfoBlank(self):
        """
        Get the location of the traceinfo 'blank' XML file fragments.

        Need to remove two levels of directories names from self.simpoint_path to
        get the location where the fragments reside.

        @return Path to blank file fragments
        """

        path = os.path.dirname(os.path.dirname(self.simpoint_path))
        return os.path.join(self.path, path)
示例#15
0
def run(args, remainder):
    util.verbose = True
    util.printf('args:', args)
    if args.shard_count:
        util.printf('running shard:', args.shard)

    with open('ci/plan.yml') as fp:
        plan = yaml.safe_load(fp)

    gpu_flops = plan['CONST']['gpu_flops']
    baseline_name = plan['CONST']['efficiency_baseline']
    platform = plan['PLATFORMS'][args.platform]
    variant_name = platform['variant']
    variant = plan['VARIANTS'][variant_name]
    arch = variant['arch']

    suites = plan['SUITES']
    suite = suites.get(args.suite)
    if suite is None:
        sys.exit('Invalid suite. Available suites: {}'.format(list(suites)))
    platform_cfg = suite['platforms'][args.platform]

    workload = suite['workloads'].get(args.workload)
    if workload is None:
        sys.exit('Invalid workload. Available workloads: {}'.format(
            list(suite['workloads'])))

    popt = util.PlanOption(suite, workload, args.platform)
    test_info = util.TestInfo(
        suite=(args.suite, suite),
        workload=(args.workload, workload),
        platform=(args.platform, util.Platform(args.platform, gpu_flops)),
        batch_size=args.batch_size,
        variant=variant,
        popt=popt,
        shard_id=args.shard,
        shards=args.shard_count,
    )

    root = pathlib.Path('tmp').resolve()
    input = root / 'input'
    output_root = root / 'test'
    output = test_info.path(output_root)

    shutil.rmtree(input, ignore_errors=True)
    if args.local:
        pkg_path = pathlib.Path('bazel-bin/pkg.tar.gz')
        outdir = root / 'nas'
        version = '0.0.0.dev0'
    else:
        archive_path = os.path.join('tmp', 'build', variant_name, 'pkg.tar.gz')
        util.buildkite_download(archive_path, '.')
        pkg_path = root / 'build' / variant_name / 'pkg.tar.gz'
        outdir = root
        version = args.version

    util.printf('--- Extracting {} -> {}'.format(pkg_path, input))
    with tarfile.open(str(pkg_path), 'r') as tar:
        tar.extractall(input)

    shutil.rmtree(output_root, ignore_errors=True)
    output.mkdir(parents=True)

    cwd = popt.get('cwd', '.')
    spec = pathlib.Path(popt.get('conda_env'))

    util.printf('--- :snake: Creating conda env from {}'.format(spec))
    instance_name = os.getenv('BUILDKITE_AGENT_NAME', 'harness')
    sig = hashlib.md5()
    sig.update(spec.read_bytes())
    base_path = pathlib.Path('~', '.t2', instance_name,
                             sig.hexdigest()).expanduser()

    base_env = util.CondaEnv(base_path)
    base_env.create(spec)
    conda_env = base_env.clone(root / pathlib.Path('cenv'))
    env = os.environ.copy()
    env.update(conda_env.env())

    for whl in popt.get('wheels', []):
        whl_filename = whl.format(arch=arch, version=version)
        whl_path = input / whl_filename
        conda_env.install(whl_path)

    if 'stripe' in args.platform:
        env['PLAIDML_USE_STRIPE'] = '1'
    else:
        env['PLAIDML_USE_STRIPE'] = '0'
    if 'cuda' in args.platform:
        env['CUDA_DEVICE_ORDER'] = buildkite_metadata('CUDA_CUDA_DEVICE_ORDER',
                                                      'PCI_BUS_ID')
        env['CUDA_VISIBLE_DEVICES'] = buildkite_metadata(
            'CUDA_VISIBLE_DEVICES', '0')
    env['PLAIDML_DEVICE_IDS'] = buildkite_metadata('PLAIDML_DEVICE_IDS')
    env['PLAIDML_EXPERIMENTAL'] = buildkite_metadata('PLAIDML_EXPERIMENTAL',
                                                     '0')
    device = buildkite_metadata('PLAIDML_DEVICE')
    target = buildkite_metadata('PLAIDML_TARGET')
    if device != None:
        env['PLAIDML_DEVICE'] = device
    if target != None:
        env['PLAIDML_TARGET'] = target

    util.printf(
        '--- :bazel: Running test {suite}/{workload} on {platform}'.format(
            suite=args.suite,
            workload=args.workload,
            platform=args.platform,
        ))

    cmd_args = platform_cfg.get('prepend_args', []) + popt.get(
        'prepend_args', [])
    cmd_args += platform_cfg.get('args', []) + popt.get('args', [])
    cmd_args += platform_cfg.get('append_args', []) + popt.get(
        'append_args', [])

    if args.shard_count:
        cmd_args += ['--shard', str(args.shard)]
        cmd_args += ['--shard-count', str(args.shard_count)]

    ctx = dict(
        results=output,
        batch_size=args.batch_size,
        workload=args.workload,
    )
    cmd_args = [str(x).format(**ctx) for x in cmd_args]
    if 'stripe' in args.platform:
        try:
            cmd_args.remove('--no-kernel-timing')
        except ValueError:
            pass

    runner = shutil.which(popt.get('runner'), path=env['PATH'])
    cmd = [runner] + cmd_args
    retcode = util.call(cmd, cwd=cwd, env=env)

    build_url = os.getenv('BUILDKITE_BUILD_URL')
    if build_url:
        build_url = '{}#{}'.format(build_url, os.getenv('BUILDKITE_JOB_ID'))
    else:
        build_url = DEFAULT_BUILD_URL

    golden_info = util.TestInfo(
        suite=(args.suite, suite),
        workload=(args.workload, workload),
        platform=(baseline_name, util.Platform(baseline_name, gpu_flops)),
        batch_size=args.batch_size,
        variant=variant,
        popt=popt,
    )

    result = analysis.Result(output_root, test_info, golden_info)
    report = {
        'build_url': build_url,
        'compare': result.test_result.compare,
        'efficiency': result.efficiency,
        'errors': result.test_result.errors,
        'failures': result.test_result.failures,
        'ratio': result.ratio,
        'reason': result.test_result.reason(),
        'status': result.test_result.status(),
        'compile_duration': result.cur.compile_duration,
        'cur.execution_duration': result.cur.execution_duration,
        'ref.execution_duration': result.ref.execution_duration,
    }

    with (output / 'report.json').open('w') as fp:
        json.dump(report, fp)

    if retcode:
        sys.exit(retcode)
    if not result.test_result.is_ok():
        sys.exit(1)
示例#16
0
    def Run(self):
        """
        Get all the user options and run the logger.

        @return Exit code from the logger pintool
        """

        # import pdb;  pdb.set_trace()
        options = self.ParseCommandLine()

        # Script which will actually do the logging
        #
        cmd = "./" + self.log_cmd

        # If the user has given these options, add them
        #
        if hasattr(options, 'compressed') and options.compressed:
            cmd += ' --compressed=%s ' % options.compressed
        if hasattr(options, 'log_file') and options.log_file:
            cmd += ' --log_file ' + options.log_file
        log_opts = ' --log_options "-log:syminfo -log:pid '
        if hasattr(options, 'log_options') and options.log_options:
            log_opts += '%s" ' % options.log_options
        else:
            log_opts += '"'
        cmd += log_opts
        if hasattr(options, 'no_log') and options.no_log:
            cmd += ' --no_log '
        if hasattr(options, 'pid') and options.pid:
            cmd += ' --pid ' + str(options.pid)

        cmd += util.AddGlobalFile(self.gv.DumpGlobalVars(), options)
        cmd += util.AddCfgFile(options)

        # Finally add program and arguments
        #
        # cmd += ' ' + options.command

        # If the command is not already in double quotes, then quote it now.
        # This takes care of cases where the command may redirect input or output
        # or the command has options which contain the char '-'.
        #
        # Assume if there is one double quote, then a 2nd double quote should also
        # already be in the command.
        #
        if not (hasattr(options, 'pid') and options.pid):
            if options.command.find('"') == -1:
                cmd += ' -- "' + options.command + '" '
            else:
                cmd += ' -- ' + options.command

        # Print out command line used for pin and pintool
        #
        string = '\n' + cmd
        msg.PrintMsg(string)

        # Finally execute the command line and gather stdin and stdout.
        # Exit with the return code from executing the logger.
        #
        result = 0
        # import pdb;  pdb.set_trace()
        if not config.debug and not options.list:
            platform = util.Platform()
            if platform != config.WIN_NATIVE:
                cmd = 'time ' + cmd
            p = subprocess.Popen(cmd, shell=True)
            p.communicate()
            result = p.returncode

        return result