Beispiel #1
0
def benchCheckpoints_sync(options, maxreltick, cptdir):

    import socket
    quantum,port,host = options.sync.split(',')
    sync_quantum = long(quantum)
    TCP_IP = host
    TCP_PORT = int(port)
    BUFFER_SIZE = 1
    pre_tick = m5.curTick()
    try:
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.connect((TCP_IP, TCP_PORT))

        exit_event = m5.simulate(sync_quantum)
        exit_cause = exit_event.getCause()
        total_tick_simulated = sync_quantum
        num_checkpoints = 0
        max_checkpoints = options.max_checkpoints
        s.send("R")
        data = s.recv(BUFFER_SIZE)
        while total_tick_simulated <= maxreltick and\
              (exit_cause == "simulate() limit reached" or\
               exit_cause == "checkpoint"):
            if exit_cause == "checkpoint":
                m5.simulate(pre_tick + sync_quantum - m5.curTick())
                # send "C" to barrier to notify that we should take a
                # checkpoint at the begining of next quantum
                s.send("C")
            else:
                s.send("R")
            data = s.recv(BUFFER_SIZE)
            pre_tick = m5.curTick()
            exit_event = m5.simulate(sync_quantum)
            total_tick_simulated += sync_quantum
            # if we receive a "C" from barrier, start to dump a checkpoint
            if data == "C" :
                m5.checkpoint(joinpath(cptdir, "cpt.%d"))
                num_checkpoints += 1
                if num_checkpoints == max_checkpoints:
                    exit_cause = "maximum %d checkpoints dropped"\
                                 % max_checkpoints
                    break
            exit_cause = exit_event.getCause()
    except KeyboardInterrupt:
        sys.exit()
    except Exception as ex:
        print "exception %s occured." %(ex.args[1])
        sys.exit()
    return exit_event
Beispiel #2
0
def run_test(root, switcher=None, freq=1000):
    """Test runner for CPU switcheroo tests.

    The switcheroo test runner is used to switch CPUs in a system that
    has been prepared for CPU switching. Such systems should have
    multiple CPUs when they are instantiated, but only one should be
    switched in. Such configurations can be created using the
    base_config.BaseFSSwitcheroo class.

    A CPU switcher object is used to control switching. The default
    switcher sequentially switches between all CPUs in a system,
    starting with the CPU that is currently switched in.

    Unlike most other test runners, this one automatically configures
    the memory mode of the system based on the first CPU the switcher
    reports.

    Keyword Arguments:
      switcher -- CPU switcher implementation. See Sequential for
                  an example implementation.
      period -- Switching frequency in Hz.
    """

    if switcher == None:
        switcher = Sequential(root.system.cpu)

    current_cpu = switcher.first()
    system = root.system
    system.mem_mode = type(current_cpu).memory_mode()

    # instantiate configuration
    m5.instantiate()

    # Determine the switching period, this has to be done after
    # instantiating the system since the time base must be fixed.
    period = m5.ticks.fromSeconds(1.0 / freq)
    while True:
        exit_event = m5.simulate(period)
        exit_cause = exit_event.getCause()

        if exit_cause == "simulate() limit reached":
            next_cpu = switcher.next()

            print "Switching CPUs..."
            print "Next CPU: %s" % type(next_cpu)
            m5.drain(system)
            if current_cpu != next_cpu:
                m5.switchCpus(system, [ (current_cpu, next_cpu) ],
                              do_drain=False)
            else:
                print "Source CPU and destination CPU are the same, skipping..."
            m5.resume(system)
            current_cpu = next_cpu
        elif exit_cause == "target called exit()" or \
                exit_cause == "m5_exit instruction encountered":

            sys.exit(0)
        else:
            print "Test failed: Unknown exit cause: %s" % exit_cause
            sys.exit(1)
Beispiel #3
0
def _run_step(name, restore=None, interval=0.5):
    """
    Instantiate (optionally from a checkpoint if restore is set to the
    checkpoitn name) the system and run for interval seconds of
    simulated time. At the end of the simulation interval, create a
    checkpoint and exit.

    As this function is intended to run in its own process using the
    multiprocessing framework, the exit is a true call to exit which
    terminates the process. Exit codes are used to pass information to
    the parent.
    """
    if restore is not None:
        m5.instantiate(restore)
    else:
        m5.instantiate()

    e = m5.simulate(m5.ticks.fromSeconds(interval))
    cause = e.getCause()
    if cause in _exit_limit:
        m5.checkpoint(name)
        sys.exit(_exitcode_checkpoint)
    elif cause in _exit_normal:
        sys.exit(_exitcode_done)
    else:
        print("Test failed: Unknown exit cause: %s" % cause)
        sys.exit(_exitcode_fail)
Beispiel #4
0
def scriptCheckpoints(options, maxtick, cptdir):
    if options.at_instruction or options.simpoint:
        checkpoint_inst = options.take_checkpoints

        # maintain correct offset if we restored from some instruction
        if options.checkpoint_restore != None:
            checkpoint_inst += options.checkpoint_restore

        print "Creating checkpoint at inst:%d" % (checkpoint_inst)
        exit_event = m5.simulate()
        exit_cause = exit_event.getCause()
        print "exit cause = %s" % exit_cause

        # skip checkpoint instructions should they exist
        while exit_cause == "checkpoint":
            exit_event = m5.simulate()
            exit_cause = exit_event.getCause()

        if exit_cause == "a thread reached checkpoint inst number" or \
            exit_cause == "sp simulation reached the interval size":
            m5.checkpoint(joinpath(cptdir, "cpt.%s.%d" % \
                    (options.bench, checkpoint_inst)))
            print "Checkpoint written."

    else:
        when, period = options.take_checkpoints.split(",", 1)
        when = int(when)
        period = int(period)
        num_checkpoints = 0

        exit_event = m5.simulate(when - m5.curTick())
        exit_cause = exit_event.getCause()
        while exit_cause == "checkpoint":
            exit_event = m5.simulate(when - m5.curTick())
            exit_cause = exit_event.getCause()

        if exit_cause == "simulate() limit reached":
            m5.checkpoint(joinpath(cptdir, "cpt.%d"))
            num_checkpoints += 1

        sim_ticks = when
        max_checkpoints = options.max_checkpoints

        while num_checkpoints < max_checkpoints and \
                exit_cause == "simulate() limit reached":
            if (sim_ticks + period) > maxtick:
                exit_event = m5.simulate(maxtick - sim_ticks)
                exit_cause = exit_event.getCause()
                break
            else:
                exit_event = m5.simulate(period)
                exit_cause = exit_event.getCause()
                sim_ticks += period
                while exit_event.getCause() == "checkpoint":
                    exit_event = m5.simulate(sim_ticks - m5.curTick())
                if exit_event.getCause() == "simulate() limit reached":
                    m5.checkpoint(joinpath(cptdir, "cpt.%d"))
                    num_checkpoints += 1

    return exit_event
Beispiel #5
0
def run_test(root):
    """Default run_test implementations. Scripts can override it."""

    # instantiate configuration
    m5.instantiate()

    # simulate until program terminates
    exit_event = m5.simulate(maxtick)
    print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause())
Beispiel #6
0
def benchCheckpoints(options, maxtick, cptdir):
    exit_event = m5.simulate(maxtick - m5.curTick())
    exit_cause = exit_event.getCause()

    num_checkpoints = 0
    max_checkpoints = options.max_checkpoints

    while exit_cause == "checkpoint":
        m5.checkpoint(joinpath(cptdir, "cpt.%d"))
        num_checkpoints += 1
        if num_checkpoints == max_checkpoints:
            exit_cause = "maximum %d checkpoints dropped" % max_checkpoints
            break

        exit_event = m5.simulate(maxtick - m5.curTick())
        exit_cause = exit_event.getCause()

    return exit_event
Beispiel #7
0
def restoreSimpointCheckpoint():
    exit_event = m5.simulate()
    exit_cause = exit_event.getCause()

    if exit_cause == "simpoint starting point found":
        print "Warmed up! Dumping and resetting stats!"
        m5.stats.dump()
        m5.stats.reset()

        exit_event = m5.simulate()
        exit_cause = exit_event.getCause()

        if exit_cause == "simpoint starting point found":
            print "Done running SimPoint!"
            sys.exit(exit_event.getCode())

    print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_cause)
    sys.exit(exit_event.getCode())
Beispiel #8
0
def repeatSwitch(testsys, repeat_switch_cpu_list, maxtick, switch_freq):
    print "starting switch loop"
    while True:
        exit_event = m5.simulate(switch_freq)
        exit_cause = exit_event.getCause()

        if exit_cause != "simulate() limit reached":
            return exit_event

        m5.switchCpus(testsys, repeat_switch_cpu_list)

        tmp_cpu_list = []
        for old_cpu, new_cpu in repeat_switch_cpu_list:
            tmp_cpu_list.append((new_cpu, old_cpu))
        repeat_switch_cpu_list = tmp_cpu_list

        if (maxtick - m5.curTick()) <= switch_freq:
            exit_event = m5.simulate(maxtick - m5.curTick())
            return exit_event
Beispiel #9
0
def run_test(root):
    """gpu test requires a specialized run_test implementation to set up the
    mmio space."""

    # instantiate configuration
    m5.instantiate()

    # Now that the system has been constructed, setup the mmio space
    root.system.cpu[0].workload[0].map(0x10000000, 0x200000000, 4096)

    # simulate until program terminates
    exit_event = m5.simulate(maxtick)
    print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
Beispiel #10
0
def takeSimpointCheckpoints(simpoints, interval_length, cptdir):
    num_checkpoints = 0
    index = 0
    last_chkpnt_inst_count = -1
    for simpoint in simpoints:
        interval, weight, starting_inst_count, actual_warmup_length = simpoint
        if starting_inst_count == last_chkpnt_inst_count:
            # checkpoint starting point same as last time
            # (when warmup period longer than starting point)
            exit_cause = "simpoint starting point found"
            code = 0
        else:
            exit_event = m5.simulate()

            # skip checkpoint instructions should they exist
            while exit_event.getCause() == "checkpoint":
                print "Found 'checkpoint' exit event...ignoring..."
                exit_event = m5.simulate()

            exit_cause = exit_event.getCause()
            code = exit_event.getCode()

        if exit_cause == "simpoint starting point found":
            m5.checkpoint(joinpath(cptdir,
                "cpt.simpoint_%02d_inst_%d_weight_%f_interval_%d_warmup_%d"
                % (index, starting_inst_count, weight, interval_length,
                actual_warmup_length)))
            print "Checkpoint #%d written. start inst:%d weight:%f" % \
                (num_checkpoints, starting_inst_count, weight)
            num_checkpoints += 1
            last_chkpnt_inst_count = starting_inst_count
        else:
            break
        index += 1

    print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_cause)
    print "%d checkpoints taken" % num_checkpoints
    sys.exit(code)
Beispiel #11
0
def run(checkpoint_dir=m5.options.outdir):
    # start simulation (and drop checkpoints when requested)
    while True:
        event = m5.simulate()
        exit_msg = event.getCause()
        if exit_msg == "checkpoint":
            print("Dropping checkpoint at tick %d" % m5.curTick())
            cpt_dir = os.path.join(checkpoint_dir, "cpt.%d" % m5.curTick())
            m5.checkpoint(cpt_dir)
            print("Checkpoint done.")
        else:
            print(exit_msg, " @ ", m5.curTick())
            break

    sys.exit(event.getCode())
Beispiel #12
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__)

    parser.add_argument("commands_to_run", metavar="command(s)", nargs='*',
                        help="Command(s) to run")
    parser.add_argument("--cpu", type=str, choices=cpu_types.keys(),
                        default="atomic",
                        help="CPU model to use")
    parser.add_argument("--cpu-freq", type=str, default="4GHz")
    parser.add_argument("--num-cores", type=int, default=1,
                        help="Number of CPU cores")
    parser.add_argument("--mem-type", default="DDR3_1600_8x8",
                        choices=MemConfig.mem_names(),
                        help = "type of memory to use")
    parser.add_argument("--mem-channels", type=int, default=2,
                        help = "number of memory channels")
    parser.add_argument("--mem-ranks", type=int, default=None,
                        help = "number of memory ranks per channel")
    parser.add_argument("--mem-size", action="store", type=str,
                        default="2GB",
                        help="Specify the physical memory size")

    args = parser.parse_args()

    # Create a single root node for gem5's object hierarchy. There can
    # only exist one root node in the simulator at any given
    # time. Tell gem5 that we want to use syscall emulation mode
    # instead of full system mode.
    root = Root(full_system=False)

    # Populate the root node with a system. A system corresponds to a
    # single node with shared memory.
    root.system = create(args)

    # Instantiate the C++ object hierarchy. After this point,
    # SimObjects can't be instantiated anymore.
    m5.instantiate()

    # Start the simulator. This gives control to the C++ world and
    # starts the simulator. The returned event tells the simulation
    # script why the simulator exited.
    event = m5.simulate()

    # Print the reason for the simulation exit. Some exit codes are
    # requests for service (e.g., checkpoints) from the simulation
    # script. We'll just ignore them here and exit.
    print(event.getCause(), " @ ", m5.curTick())
    sys.exit(event.getCode())
Beispiel #13
0
def run_test(root, interval=0.5, max_checkpoints=5):
    """
    Run the simulated system for a fixed amount of time and take a
    checkpoint, then restore from the same checkpoint and run until
    the system calls m5 exit.
    """

    cpt_name = os.path.join(m5.options.outdir, "test.cpt")
    restore = None

    for cpt_no in range(max_checkpoints):
        # Create a checkpoint from a separate child process. This enables
        # us to get back to a (mostly) pristine state and restart
        # simulation from the checkpoint.
        p = Process(target=_run_step,
                    args=(cpt_name, ),
                    kwargs={
                "restore" : restore,
                "interval" : interval,
                })
        p.start()

        # Wait for the child to return
        p.join()

        # Restore from the checkpoint next iteration
        restore = cpt_name

        if p.exitcode == _exitcode_done:
            print("Test done.", file=sys.stderr)
            sys.exit(0)
        elif p.exitcode == _exitcode_checkpoint:
            pass
        else:
            print("Test failed.", file=sys.stderr)
            sys.exit(1)

    # Maximum number of checkpoints reached. Just run full-speed from
    # now on.
    m5.instantiate()
    e = m5.simulate()
    cause = e.getCause()
    if cause in _exit_normal:
        sys.exit(0)
    else:
        print("Test failed: Unknown exit cause: %s" % cause)
        sys.exit(1)
Beispiel #14
0
def main():
    parser = argparse.ArgumentParser(description="Simple system using HMC as\
                                     main memory")
    HMC.add_options(parser)
    add_options(parser)
    options = parser.parse_args()
    # build the system
    root = build_system(options)
    # instantiate all of the objects we've created so far
    m5.instantiate()
    print("Beginning simulation!")
    event = m5.simulate(10000000000)
    m5.stats.dump()
    print('Exiting @ tick %i because %s (exit code is %i)' % (m5.curTick(),
                                                              event.getCause(),
                                                              event.getCode()))
    print("Done")
Beispiel #15
0
def run(args):
    cptdir = m5.options.outdir
    if args.checkpoint:
        print "Checkpoint directory: %s" % cptdir

    while True:
        event = m5.simulate()
        exit_msg = event.getCause()
        if exit_msg == "checkpoint":
            print "Dropping checkpoint at tick %d" % m5.curTick()
            cpt_dir = os.path.join(m5.options.outdir, "cpt.%d" % m5.curTick())
            m5.checkpoint(os.path.join(cpt_dir))
            print "Checkpoint done."
        else:
            print exit_msg, " @ ", m5.curTick()
            break

    sys.exit(event.getCode())
Beispiel #16
0
# number of traffic generator
np = 4
# create a traffic generator, and point it to the file we just created
system.tgen = [TrafficGen(config_file=cfg_file_name) for i in xrange(np)]

# Config memory system with given HMC arch
MemConfig.config_mem(options, system)

if options.arch == "distributed":
    for i in xrange(np):
        system.tgen[i].port = system.membus.slave
    # connect the system port even if it is not used in this example
    system.system_port = system.membus.slave

if options.arch == "mixed":
    for i in xrange(int(np / 2)):
        system.tgen[i].port = system.membus.slave
    # connect the system port even if it is not used in this example
    system.system_port = system.membus.slave

# run Forrest, run!
root = Root(full_system=False, system=system)
root.system.mem_mode = 'timing'

m5.instantiate()
m5.simulate(10000000000)

m5.stats.dump()

print "Done!"
Beispiel #17
0
system.system_port = system.membus.slave

# every period, dump and reset all stats
periodicStatDump(period)

# run Forrest, run!
root = Root(full_system=False, system=system)
root.system.mem_mode = 'timing'

m5.instantiate()


def trace():
    generator = dram_generators[options.mode](system.tgen)
    for bank in range(1, nbr_banks + 1):
        for stride_size in range(burst_size, max_stride + 1, burst_size):
            num_seq_pkts = int(math.ceil(float(stride_size) / burst_size))
            yield generator(period, 0, max_addr, burst_size, int(itt),
                            int(itt), options.rd_perc, 0, num_seq_pkts,
                            page_size, nbr_banks, bank, options.addr_map,
                            options.mem_ranks)
    yield system.tgen.createExit(0)


system.tgen.start(trace())

m5.simulate()

print("DRAM sweep with burst: %d, banks: %d, max stride: %d" %
      (burst_size, nbr_banks, max_stride))
Beispiel #18
0
def run(options, root, testsys, cpu_class):
    if options.maxtick:
        maxtick = options.maxtick
    elif options.maxtime:
        simtime = m5.ticks.seconds(simtime)
        print "simulating for: ", simtime
        maxtick = simtime
    else:
        maxtick = m5.MaxTick

    if options.checkpoint_dir:
        cptdir = options.checkpoint_dir
    elif m5.options.outdir:
        cptdir = m5.options.outdir
    else:
        cptdir = getcwd()

    if options.fast_forward and options.checkpoint_restore != None:
        fatal("Can't specify both --fast-forward and --checkpoint-restore")

    if options.standard_switch and not options.caches:
        fatal("Must specify --caches when using --standard-switch")

    if options.standard_switch and options.repeat_switch:
        fatal("Can't specify both --standard-switch and --repeat-switch")

    if options.repeat_switch and options.take_checkpoints:
        fatal("Can't specify both --repeat-switch and --take-checkpoints")

    np = options.num_cpus
    switch_cpus = None

    if options.prog_interval:
        for i in xrange(np):
            testsys.cpu[i].progress_interval = options.prog_interval

    if options.maxinsts:
        for i in xrange(np):
            testsys.cpu[i].max_insts_any_thread = options.maxinsts

    if cpu_class:
        switch_cpus = [cpu_class(switched_out=True, cpu_id=(i))
                       for i in xrange(np)]

        for i in xrange(np):
            if options.fast_forward:
                testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
            switch_cpus[i].system =  testsys
            switch_cpus[i].workload = testsys.cpu[i].workload
            switch_cpus[i].clock = testsys.cpu[i].clock
            # simulation period
            if options.maxinsts:
                switch_cpus[i].max_insts_any_thread = options.maxinsts
            # Add checker cpu if selected
            if options.checker:
                switch_cpus[i].addCheckerCpu()

        testsys.switch_cpus = switch_cpus
        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]

    if options.repeat_switch:
        switch_class = getCPUClass(options.cpu_type)[0]
        if switch_class.require_caches() and \
                not options.caches:
            print "%s: Must be used with caches" % str(switch_class)
            sys.exit(1)
        if not switch_class.support_take_over():
            print "%s: CPU switching not supported" % str(switch_class)
            sys.exit(1)

        repeat_switch_cpus = [switch_class(switched_out=True, \
                                               cpu_id=(i)) for i in xrange(np)]

        for i in xrange(np):
            repeat_switch_cpus[i].system = testsys
            repeat_switch_cpus[i].workload = testsys.cpu[i].workload
            repeat_switch_cpus[i].clock = testsys.cpu[i].clock

            if options.maxinsts:
                repeat_switch_cpus[i].max_insts_any_thread = options.maxinsts

            if options.checker:
                repeat_switch_cpus[i].addCheckerCpu()

        testsys.repeat_switch_cpus = repeat_switch_cpus

        if cpu_class:
            repeat_switch_cpu_list = [(switch_cpus[i], repeat_switch_cpus[i])
                                      for i in xrange(np)]
        else:
            repeat_switch_cpu_list = [(testsys.cpu[i], repeat_switch_cpus[i])
                                      for i in xrange(np)]

    if options.standard_switch:
        switch_cpus = [TimingSimpleCPU(switched_out=True, cpu_id=(i))
                       for i in xrange(np)]
        switch_cpus_1 = [DerivO3CPU(switched_out=True, cpu_id=(i))
                        for i in xrange(np)]

        for i in xrange(np):
            switch_cpus[i].system =  testsys
            switch_cpus_1[i].system =  testsys
            switch_cpus[i].workload = testsys.cpu[i].workload
            switch_cpus_1[i].workload = testsys.cpu[i].workload
            switch_cpus[i].clock = testsys.cpu[i].clock
            switch_cpus_1[i].clock = testsys.cpu[i].clock

            # if restoring, make atomic cpu simulate only a few instructions
            if options.checkpoint_restore != None:
                testsys.cpu[i].max_insts_any_thread = 1
            # Fast forward to specified location if we are not restoring
            elif options.fast_forward:
                testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
            # Fast forward to a simpoint (warning: time consuming)
            elif options.simpoint:
                if testsys.cpu[i].workload[0].simpoint == 0:
                    fatal('simpoint not found')
                testsys.cpu[i].max_insts_any_thread = \
                    testsys.cpu[i].workload[0].simpoint
            # No distance specified, just switch
            else:
                testsys.cpu[i].max_insts_any_thread = 1

            # warmup period
            if options.warmup_insts:
                switch_cpus[i].max_insts_any_thread =  options.warmup_insts

            # simulation period
            if options.maxinsts:
                switch_cpus_1[i].max_insts_any_thread = options.maxinsts

            # attach the checker cpu if selected
            if options.checker:
                switch_cpus[i].addCheckerCpu()
                switch_cpus_1[i].addCheckerCpu()

        testsys.switch_cpus = switch_cpus
        testsys.switch_cpus_1 = switch_cpus_1
        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
        switch_cpu_list1 = [(switch_cpus[i], switch_cpus_1[i]) for i in xrange(np)]

    # set the checkpoint in the cpu before m5.instantiate is called
    if options.take_checkpoints != None and \
           (options.simpoint or options.at_instruction):
        offset = int(options.take_checkpoints)
        # Set an instruction break point
        if options.simpoint:
            for i in xrange(np):
                if testsys.cpu[i].workload[0].simpoint == 0:
                    fatal('no simpoint for testsys.cpu[%d].workload[0]', i)
                checkpoint_inst = int(testsys.cpu[i].workload[0].simpoint) + offset
                testsys.cpu[i].max_insts_any_thread = checkpoint_inst
                # used for output below
                options.take_checkpoints = checkpoint_inst
        else:
            options.take_checkpoints = offset
            # Set all test cpus with the right number of instructions
            # for the upcoming simulation
            for i in xrange(np):
                testsys.cpu[i].max_insts_any_thread = offset

    checkpoint_dir = None
    if options.checkpoint_restore != None:
        maxtick, checkpoint_dir = findCptDir(options, maxtick, cptdir, testsys)
    m5.instantiate(checkpoint_dir)

    if options.standard_switch or cpu_class:
        if options.standard_switch:
            print "Switch at instruction count:%s" % \
                    str(testsys.cpu[0].max_insts_any_thread)
            exit_event = m5.simulate()
        elif cpu_class and options.fast_forward:
            print "Switch at instruction count:%s" % \
                    str(testsys.cpu[0].max_insts_any_thread)
            exit_event = m5.simulate()
        else:
            print "Switch at curTick count:%s" % str(10000)
            exit_event = m5.simulate(10000)
        print "Switched CPUS @ tick %s" % (m5.curTick())

        m5.switchCpus(testsys, switch_cpu_list)

        if options.standard_switch:
            print "Switch at instruction count:%d" % \
                    (testsys.switch_cpus[0].max_insts_any_thread)

            #warmup instruction count may have already been set
            if options.warmup_insts:
                exit_event = m5.simulate()
            else:
                exit_event = m5.simulate(options.standard_switch)
            print "Switching CPUS @ tick %s" % (m5.curTick())
            print "Simulation ends instruction count:%d" % \
                    (testsys.switch_cpus_1[0].max_insts_any_thread)
            m5.switchCpus(testsys, switch_cpu_list1)

    # If we're taking and restoring checkpoints, use checkpoint_dir
    # option only for finding the checkpoints to restore from.  This
    # lets us test checkpointing by restoring from one set of
    # checkpoints, generating a second set, and then comparing them.
    if options.take_checkpoints and options.checkpoint_restore:
        if m5.options.outdir:
            cptdir = m5.options.outdir
        else:
            cptdir = getcwd()

    if options.take_checkpoints != None :
        # Checkpoints being taken via the command line at <when> and at
        # subsequent periods of <period>.  Checkpoint instructions
        # received from the benchmark running are ignored and skipped in
        # favor of command line checkpoint instructions.
        exit_event = scriptCheckpoints(options, maxtick, cptdir)
    else:
        if options.fast_forward:
            m5.stats.reset()
        print "**** REAL SIMULATION ****"

        # If checkpoints are being taken, then the checkpoint instruction
        # will occur in the benchmark code it self.
        if options.repeat_switch and maxtick > options.repeat_switch:
            exit_event = repeatSwitch(testsys, repeat_switch_cpu_list,
                                      maxtick, options.repeat_switch)
        else:
            exit_event = benchCheckpoints(options, maxtick, cptdir)

    print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())
    if options.checkpoint_at_end:
        m5.checkpoint(joinpath(cptdir, "cpt.%d"))

    if not m5.options.interactive:
        sys.exit(exit_event.getCode())
Beispiel #19
0
Use Cholesky, FFT, LUContig, LUNoncontig, Radix, Barnes, FMM, OceanContig,
OceanNoncontig, Raytrace, WaterNSquared, or WaterSpatial
""")

# --------------------
# Assign the workload to the cpus
# ====================

for cluster in clusters:
    for cpu in cluster.cpus:
        cpu.workload = root.workload

# ----------------------
# Run the simulation
# ----------------------

if options.timing or options.detailed:
    root.system.mem_mode = 'timing'

# instantiate configuration
m5.instantiate()

# simulate until program terminates
if options.maxtick:
    exit_event = m5.simulate(options.maxtick)
else:
    exit_event = m5.simulate(m5.MaxTick)

print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause())

Beispiel #20
0
def scriptCheckpoints(options, maxtick, cptdir):
    if options.at_instruction or options.simpoint:
        checkpoint_inst = int(options.take_checkpoints)

        # maintain correct offset if we restored from some instruction
        if options.checkpoint_restore != None:
            checkpoint_inst += options.checkpoint_restore

        print "Creating checkpoint at inst:%d" % (checkpoint_inst)
        exit_event = m5.simulate()
        exit_cause = exit_event.getCause()
        print "exit cause = %s" % exit_cause

        # skip checkpoint instructions should they exist
        while exit_cause == "checkpoint":
            exit_event = m5.simulate()
            exit_cause = exit_event.getCause()

        if exit_cause == "a thread reached the max instruction count":
            m5.checkpoint(joinpath(cptdir, "cpt.%s.%d" % \
                    (options.bench, checkpoint_inst)))
            print "Checkpoint written."

    else:
        when, period = options.take_checkpoints.split(",", 1)
        when = int(when)
        period = int(period)
        num_checkpoints = 0

        exit_event = m5.simulate(when - m5.curTick())
        exit_cause = exit_event.getCause()
        while exit_cause == "checkpoint":
            exit_event = m5.simulate(when - m5.curTick())
            exit_cause = exit_event.getCause()

        if exit_cause == "simulate() limit reached":
            # cuiwl: dump and reset stats.
            print "Checkpoint %d." % (num_checkpoints)
            m5.checkpoint(joinpath(cptdir, "cpt.%d"))
            num_checkpoints += 1

        sim_ticks = when
        max_checkpoints = options.max_checkpoints

        while num_checkpoints < max_checkpoints and \
                exit_cause == "simulate() limit reached":
            if (sim_ticks + period) > maxtick:
                exit_event = m5.simulate(maxtick - sim_ticks)
                exit_cause = exit_event.getCause()
                break
            else:
                exit_event = m5.simulate(period)
                exit_cause = exit_event.getCause()
                sim_ticks += period
                while exit_event.getCause() == "checkpoint":
                    exit_event = m5.simulate(sim_ticks - m5.curTick())
                if exit_event.getCause() == "simulate() limit reached":
                    # dump and reset stats.
                    print "Checkpoint %d." % (num_checkpoints)
                    m5.stats.dump()
                    m5.stats.reset()
                    m5.checkpoint(joinpath(cptdir, "cpt.%d"))
                    num_checkpoints += 1
        if options.stat_prefix:
          shutil.copyfile("/usr/local/google/home/cuiwl/gem5/m5out/stats.txt", "/usr/local/google/home/cuiwl/gem5/m5out/stats/" + options.stat_prefix + "_" + time.strftime("%Y%m%d") + "_" + time.strftime("%H:%M:%S") + ".txt")
        else:
          shutil.copyfile("/usr/local/google/home/cuiwl/gem5/m5out/stats.txt", "/usr/local/google/home/cuiwl/gem5/m5out/stats/none" + "_" + time.strftime("%Y%m%d") + "_" + time.strftime("%H:%M:%S") + ".txt")
    return exit_event
Beispiel #21
0
cfg_file.write("TRANSITION %d %d 1\n" % (nxt_state - 1, nxt_state - 1))

cfg_file.close()

# create a traffic generator, and point it to the file we just created
system.tgen = TrafficGen(config_file = cfg_file_name)

# add a communication monitor
system.monitor = CommMonitor()

# connect the traffic generator to the bus via a communication monitor
system.tgen.port = system.monitor.slave
system.monitor.master = system.membus.slave

# connect the system port even if it is not used in this example
system.system_port = system.membus.slave

# every period, dump and reset all stats
periodicStatDump(period)

# run Forrest, run!
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'

m5.instantiate()
m5.simulate(nxt_state * period)

print "DRAM sweep with burst: %d, banks: %d, max stride: %d" % \
    (burst_size, nbr_banks, max_stride)
Beispiel #22
0
# Assign input trace files to the eTraceCPU:
system.cpu.instTraceFile = "system.cpu.traceListener.inst.gz"
system.cpu.dataTraceFile = "system.cpu.traceListener.data.gz"

# Setting up L1 BUS:
system.tol2bus = L2XBar()
system.l2cache = L2Cache(size="1MB")
system.physmem = SimpleMemory(
)  # This must be instantiated, even if not needed

# Create a external TLM port:
system.tlm = ExternalSlave()
system.tlm.addr_ranges = [AddrRange('4096MB')]
system.tlm.port_type = "tlm_slave"
system.tlm.port_data = "transactor1"

# Connect everything:
system.membus = SystemXBar()
system.system_port = system.membus.slave
system.cpu.icache.mem_side = system.tol2bus.slave
system.cpu.dcache.mem_side = system.tol2bus.slave
system.tol2bus.master = system.l2cache.cpu_side
system.l2cache.mem_side = system.membus.slave
system.membus.master = system.tlm.port

# Start the simulation:
root = Root(full_system=False, system=system)
root.system.mem_mode = 'timing'
m5.instantiate()
m5.simulate()  # Simulation time specified later on commandline
Beispiel #23
0
        "or WaterSpatial",
        file=sys.stderr)
    sys.exit(1)

# --------------------
# Assign the workload to the cpus
# ====================

for cpu in cpus:
    cpu.workload = root.workload

system.workload = SEWorkload.init_compatible(root.workload.executable)

# ----------------------
# Run the simulation
# ----------------------

if options.timing or options.detailed:
    root.system.mem_mode = 'timing'

# instantiate configuration
m5.instantiate()

# simulate until program terminates
if options.maxtick:
    exit_event = m5.simulate(options.maxtick)
else:
    exit_event = m5.simulate(m5.MaxTick)

print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause())
Beispiel #24
0
def run_system_with_cpu(
    process,
    options,
    output_dir,
    warmup_cpu_class=None,
    warmup_instructions=0,
    real_cpu_create_function=lambda cpu_id: DerivO3CPU(cpu_id=cpu_id)):
    # Override the -d outdir --outdir option to gem5
    m5.options.outdir = output_dir
    m5.core.setOutputDir(m5.options.outdir)

    m5.stats.reset()

    max_tick = options.abs_max_tick
    if options.rel_max_tick:
        max_tick = options.rel_max_tick
    elif options.maxtime:
        max_tick = int(options.maxtime * 1000 * 1000 * 1000 * 1000)

    eprint("Simulating until tick=%s" % (max_tick))

    real_cpus = [real_cpu_create_function(0)]
    mem_mode = real_cpus[0].memory_mode()

    if warmup_cpu_class:
        warmup_cpus = [warmup_cpu_class(cpu_id=0)]
        warmup_cpus[0].max_insts_any_thread = warmup_instructions
    else:
        warmup_cpus = real_cpus

    system = System(cpu=warmup_cpus,
                    mem_mode=mem_mode,
                    mem_ranges=[AddrRange(options.mem_size)],
                    cache_line_size=options.cacheline_size)
    system.multi_thread = False
    system.voltage_domain = VoltageDomain(voltage=options.sys_voltage)
    system.clk_domain = SrcClockDomain(clock=options.sys_clock,
                                       voltage_domain=system.voltage_domain)
    system.cpu_voltage_domain = VoltageDomain()
    system.cpu_clk_domain = SrcClockDomain(
        clock=options.cpu_clock, voltage_domain=system.cpu_voltage_domain)
    system.cache_line_size = options.cacheline_size
    if warmup_cpu_class:
        for cpu in real_cpus:
            cpu.clk_domain = system.cpu_clk_domain
            cpu.workload = process
            cpu.system = system
            cpu.switched_out = True
            cpu.createThreads()
        system.switch_cpus = real_cpus

    for cpu in system.cpu:
        cpu.clk_domain = system.cpu_clk_domain
        cpu.workload = process
        if options.prog_interval:
            cpu.progress_interval = options.prog_interval
        cpu.createThreads()

    MemClass = Simulation.setMemClass(options)
    system.membus = SystemXBar()
    system.system_port = system.membus.slave
    system.cpu[0].connectAllPorts(system.membus)
    MemConfig.config_mem(options, system)
    root = Root(full_system=False, system=system)

    m5.options.outdir = output_dir
    m5.instantiate(None)  # None == no checkpoint
    if warmup_cpu_class:
        eprint("Running warmup with warmup CPU class (%d instrs.)" %
               (warmup_instructions))
    eprint("Starting simulation")
    exit_event = m5.simulate(max_tick)
    if warmup_cpu_class:
        max_tick -= m5.curTick()
        m5.stats.reset()
        debug_print("Finished warmup; running real simulation")
        m5.switchCpus(system, real_cpus)
        exit_event = m5.simulate(max_tick)
    eprint("Done simulation @ tick = %s: %s" %
           (m5.curTick(), exit_event.getCause()))
    m5.stats.dump()
Beispiel #25
0
                                   options.requests,
                                   generator = generator)

Ruby.create_system(options, system)

assert (options.num_cpus == len(system.ruby._cpu_ruby_ports))

for ruby_port in system.ruby._cpu_ruby_ports:
    #
    # Tie the ruby tester ports to the ruby cpu ports
    #
    system.tester.cpuPort = ruby_port.slave

# -----------------------
# run simulation
# -----------------------

root = Root(full_system=False, system=system)
root.system.mem_mode = 'timing'

# Not much point in this being higher than the L1 latency
m5.ticks.setGlobalFrequency('1ns')

# instantiate configuration
m5.instantiate()

# simulate until program terminates
exit_event = m5.simulate(options.maxtick)

print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
Beispiel #26
0
import argparse
import m5
import sys

from m5.objects import SystemC_Kernel, Root, SystemC_Printer, Gem5_Feeder

# pylint:disable=unused-variable

parser = argparse.ArgumentParser()
parser.add_argument('--word', action="append", default=[])
parser.add_argument('--delay', default='1ns')
parser.add_argument('--prefix', default='')

args = parser.parse_args()

printer = SystemC_Printer()
printer.prefix = args.prefix

feeder = Gem5_Feeder()
feeder.printer = printer
feeder.delay = args.delay
feeder.strings = args.word

kernel = SystemC_Kernel(feeder=feeder)
root = Root(full_system=True, systemc_kernel=kernel)

m5.instantiate(None)

cause = m5.simulate(m5.MaxTick).getCause()
print(cause)
Beispiel #27
0
system.l2cache = L2Cache(size='512kB', writeback_clean=True)
system.l2cache.xbar = L2XBar()
system.l1cache.mem_side = system.l2cache.xbar.slave
system.l2cache.cpu_side = system.l2cache.xbar.master

# make the L3 mostly exclusive, and correspondingly ensure that the L2
# writes back also clean lines to the L3
system.l3cache = L3Cache(size='4MB', clusivity='mostly_excl')
system.l3cache.xbar = L2XBar()
system.l2cache.mem_side = system.l3cache.xbar.slave
system.l3cache.cpu_side = system.l3cache.xbar.master
system.l3cache.mem_side = system.membus.slave

# connect the system port even if it is not used in this example
system.system_port = system.membus.slave

# every period, dump and reset all stats
periodicStatDump(period)

# run Forrest, run!
root = Root(full_system=False, system=system)
root.system.mem_mode = 'timing'

m5.instantiate()
m5.simulate(nxt_state * period)

# print all we need to make sense of the stats output
print "lat_mem_rd with %d iterations, ranges:" % iterations
for r in ranges:
    print r
Beispiel #28
0
system.cpu.createInterruptController()
system.cpu.interrupts[0].pio = system.membus.master
system.cpu.interrupts[0].int_master = system.membus.slave
system.cpu.interrupts[0].int_slave = system.membus.master

system.mem_ctrl = DDR3_1600_8x8()
system.mem_ctrl.range = system.mem_ranges[0]
system.mem_ctrl.port = system.membus.master

system.system_port = system.membus.slave

process = Process()

# thispath = os.path.dirname(os.path.realpath(__file__))
# binpath = os.path.join(thispath, '../../../',
#                        'tests/test-progs/hello/bin/x86/linux/hello')

# process.cmd = [binpath]
process.cmd = ['tests/test-progs/hello/bin/x86/linux/hello']

system.cpu.workload = process
system.cpu.createThreads()

root = Root(full_system=False, system=system)

m5.instantiate()

print("Beginning simulation!")
exit_event = m5.simulate()
print('Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause()))
Beispiel #29
0
def run(options, root, testsys, cpu_class):
    if options.checkpoint_dir:
        cptdir = options.checkpoint_dir
    elif m5.options.outdir:
        cptdir = m5.options.outdir
    else:
        cptdir = getcwd()

    if options.fast_forward and options.checkpoint_restore != None:
        fatal("Can't specify both --fast-forward and --checkpoint-restore")

    if options.fast_forward_pseudo_inst and options.checkpoint_restore != None:
        fatal(
            "Can't specify both --fast-forward-pseudo-inst and --checkpoint-restore"
        )

    if options.standard_switch and not options.caches:
        fatal("Must specify --caches when using --standard-switch")

    if options.standard_switch and options.repeat_switch:
        fatal("Can't specify both --standard-switch and --repeat-switch")

    if options.repeat_switch and options.take_checkpoints:
        fatal("Can't specify both --repeat-switch and --take-checkpoints")

    np = options.num_cpus
    switch_cpus = None

    if options.prog_interval:
        for i in xrange(np):
            testsys.cpu[i].progress_interval = options.prog_interval

    if options.maxinsts:
        for i in xrange(np):
            testsys.cpu[i].max_insts_any_thread = options.maxinsts

    if cpu_class:
        switch_cpus = [
            cpu_class(switched_out=True, cpu_id=(i)) for i in xrange(np)
        ]

        # [SafeSpec] configure simualtion scheme
        if cpu_class == DerivO3CPU:
            #fatal("Ruby can only be used with DerivO3CPU!")
            CpuConfig.config_scheme(cpu_class, switch_cpus, options)
        else:
            warn("restoring from a checkpoint, "
                 "but not simulate using DerivO3CPU.")

        for i in xrange(np):
            if options.fast_forward:
                testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
            switch_cpus[i].system = testsys
            switch_cpus[i].workload = testsys.cpu[i].workload
            switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain
            switch_cpus[i].progress_interval = \
                testsys.cpu[i].progress_interval
            switch_cpus[i].isa = testsys.cpu[i].isa
            # simulation period
            if options.maxinsts:
                switch_cpus[i].max_insts_any_thread = options.maxinsts
            # Add checker cpu if selected
            if options.checker:
                switch_cpus[i].addCheckerCpu()

        # If elastic tracing is enabled attach the elastic trace probe
        # to the switch CPUs
        if options.elastic_trace_en:
            CpuConfig.config_etrace(cpu_class, switch_cpus, options)

        testsys.switch_cpus = switch_cpus
        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i])
                           for i in xrange(np)]

    if options.repeat_switch:
        switch_class = getCPUClass(options.cpu_type)[0]
        if switch_class.require_caches() and \
                not options.caches:
            print "%s: Must be used with caches" % str(switch_class)
            sys.exit(1)
        if not switch_class.support_take_over():
            print "%s: CPU switching not supported" % str(switch_class)
            sys.exit(1)

        repeat_switch_cpus = [switch_class(switched_out=True, \
                                               cpu_id=(i)) for i in xrange(np)]

        for i in xrange(np):
            repeat_switch_cpus[i].system = testsys
            repeat_switch_cpus[i].workload = testsys.cpu[i].workload
            repeat_switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain
            repeat_switch_cpus[i].isa = testsys.cpu[i].isa

            if options.maxinsts:
                repeat_switch_cpus[i].max_insts_any_thread = options.maxinsts

            if options.checker:
                repeat_switch_cpus[i].addCheckerCpu()

        testsys.repeat_switch_cpus = repeat_switch_cpus

        if cpu_class:
            repeat_switch_cpu_list = [(switch_cpus[i], repeat_switch_cpus[i])
                                      for i in xrange(np)]
        else:
            repeat_switch_cpu_list = [(testsys.cpu[i], repeat_switch_cpus[i])
                                      for i in xrange(np)]

    if options.standard_switch:
        switch_cpus = [
            TimingSimpleCPU(switched_out=True, cpu_id=(i)) for i in xrange(np)
        ]
        switch_cpus_1 = [
            DerivO3CPU(switched_out=True, cpu_id=(i)) for i in xrange(np)
        ]

        for i in xrange(np):
            switch_cpus[i].system = testsys
            switch_cpus_1[i].system = testsys
            switch_cpus[i].workload = testsys.cpu[i].workload
            switch_cpus_1[i].workload = testsys.cpu[i].workload
            switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain
            switch_cpus_1[i].clk_domain = testsys.cpu[i].clk_domain
            switch_cpus[i].isa = testsys.cpu[i].isa
            switch_cpus_1[i].isa = testsys.cpu[i].isa

            # if restoring, make atomic cpu simulate only a few instructions
            if options.checkpoint_restore != None:
                testsys.cpu[i].max_insts_any_thread = 1
            # Fast forward to specified location if we are not restoring
            elif options.fast_forward:
                testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
            # Fast forward to a simpoint (warning: time consuming)
            elif options.simpoint:
                if testsys.cpu[i].workload[0].simpoint == 0:
                    fatal('simpoint not found')
                testsys.cpu[i].max_insts_any_thread = \
                    testsys.cpu[i].workload[0].simpoint
            # No distance specified, just switch
            # else:
            # testsys.cpu[i].max_insts_any_thread = 1

            # warmup period
            if options.warmup_insts:
                switch_cpus[i].max_insts_any_thread = options.warmup_insts

            # simulation period
            if options.maxinsts:
                switch_cpus_1[i].max_insts_any_thread = options.maxinsts

            # attach the checker cpu if selected
            if options.checker:
                switch_cpus[i].addCheckerCpu()
                switch_cpus_1[i].addCheckerCpu()

        testsys.switch_cpus = switch_cpus
        testsys.switch_cpus_1 = switch_cpus_1
        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i])
                           for i in xrange(np)]
        switch_cpu_list1 = [(switch_cpus[i], switch_cpus_1[i])
                            for i in xrange(np)]

    # set the checkpoint in the cpu before m5.instantiate is called
    if options.take_checkpoints != None and \
           (options.simpoint or options.at_instruction):
        offset = int(options.take_checkpoints)
        # Set an instruction break point
        if options.simpoint:
            for i in xrange(np):
                if testsys.cpu[i].workload[0].simpoint == 0:
                    fatal('no simpoint for testsys.cpu[%d].workload[0]', i)
                checkpoint_inst = int(
                    testsys.cpu[i].workload[0].simpoint) + offset
                testsys.cpu[i].max_insts_any_thread = checkpoint_inst
                # used for output below
                options.take_checkpoints = checkpoint_inst
        else:
            options.take_checkpoints = offset
            # Set all test cpus with the right number of instructions
            # for the upcoming simulation
            for i in xrange(np):
                testsys.cpu[i].max_insts_any_thread = offset

    if options.take_simpoint_checkpoints != None:
        simpoints, interval_length = parseSimpointAnalysisFile(
            options, testsys)

    checkpoint_dir = None
    if options.checkpoint_restore:
        cpt_starttick, checkpoint_dir = findCptDir(options, cptdir, testsys)
    m5.instantiate(checkpoint_dir)

    # Initialization is complete.  If we're not in control of simulation
    # (that is, if we're a slave simulator acting as a component in another
    #  'master' simulator) then we're done here.  The other simulator will
    # call simulate() directly. --initialize-only is used to indicate this.
    if options.initialize_only:
        return

    # Handle the max tick settings now that tick frequency was resolved
    # during system instantiation
    # NOTE: the maxtick variable here is in absolute ticks, so it must
    # include any simulated ticks before a checkpoint
    explicit_maxticks = 0
    maxtick_from_abs = m5.MaxTick
    maxtick_from_rel = m5.MaxTick
    maxtick_from_maxtime = m5.MaxTick
    if options.abs_max_tick:
        maxtick_from_abs = options.abs_max_tick
        explicit_maxticks += 1
    if options.rel_max_tick:
        maxtick_from_rel = options.rel_max_tick
        if options.checkpoint_restore:
            # NOTE: this may need to be updated if checkpoints ever store
            # the ticks per simulated second
            maxtick_from_rel += cpt_starttick
            if options.at_instruction or options.simpoint:
                warn("Relative max tick specified with --at-instruction or" \
                     " --simpoint\n      These options don't specify the " \
                     "checkpoint start tick, so assuming\n      you mean " \
                     "absolute max tick")
        explicit_maxticks += 1
    if options.maxtime:
        maxtick_from_maxtime = m5.ticks.fromSeconds(options.maxtime)
        explicit_maxticks += 1
    if explicit_maxticks > 1:
        warn("Specified multiple of --abs-max-tick, --rel-max-tick, --maxtime."\
             " Using least")
    maxtick = min([maxtick_from_abs, maxtick_from_rel, maxtick_from_maxtime])

    if options.checkpoint_restore != None and maxtick < cpt_starttick:
        fatal("Bad maxtick (%d) specified: " \
              "Checkpoint starts starts from tick: %d", maxtick, cpt_starttick)

    if options.standard_switch or cpu_class:
        if options.standard_switch:
            print "Switch at instruction count:%s" % \
                    str(testsys.cpu[0].max_insts_any_thread)
            exit_event = m5.simulate()
        elif cpu_class and options.fast_forward:
            print "Switch at instruction count:%s" % \
                    str(testsys.cpu[0].max_insts_any_thread)
            exit_event = m5.simulate()
        elif cpu_class and options.fast_forward_pseudo_inst:
            print "Switch at beginning of ROI"
            exit_event = m5.simulate()
        else:
            print "Switch at curTick count:%s" % str(10000)
            exit_event = m5.simulate(10000)

        if options.fast_forward_pseudo_inst:
            while exit_event.getCause() != 'switchcpu':
                print 'Exiting @ tick %i because %s' % (m5.curTick(),
                                                        exit_event.getCause())
                exit_event = m5.simulate()

        print "Switched CPUS @ tick %s" % (m5.curTick())

        m5.switchCpus(testsys, switch_cpu_list)

        if options.standard_switch:
            print "Switch at instruction count:%d" % \
                    (testsys.switch_cpus[0].max_insts_any_thread)

            #warmup instruction count may have already been set
            if options.warmup_insts:
                exit_event = m5.simulate()
            else:
                exit_event = m5.simulate(options.standard_switch)
            print "Switching CPUS @ tick %s" % (m5.curTick())
            print "Simulation ends instruction count:%d" % \
                    (testsys.switch_cpus_1[0].max_insts_any_thread)
            m5.switchCpus(testsys, switch_cpu_list1)

    # If we're taking and restoring checkpoints, use checkpoint_dir
    # option only for finding the checkpoints to restore from.  This
    # lets us test checkpointing by restoring from one set of
    # checkpoints, generating a second set, and then comparing them.
    if (options.take_checkpoints or options.take_simpoint_checkpoints) \
        and options.checkpoint_restore:

        if m5.options.outdir:
            cptdir = m5.options.outdir
        else:
            cptdir = getcwd()

    if options.take_checkpoints != None:
        # Checkpoints being taken via the command line at <when> and at
        # subsequent periods of <period>.  Checkpoint instructions
        # received from the benchmark running are ignored and skipped in
        # favor of command line checkpoint instructions.
        exit_event = scriptCheckpoints(options, maxtick, cptdir)

    # Take SimPoint checkpoints
    elif options.take_simpoint_checkpoints != None:
        takeSimpointCheckpoints(simpoints, interval_length, cptdir)

    # Restore from SimPoint checkpoints
    elif options.restore_simpoint_checkpoint != None:
        restoreSimpointCheckpoint()

    else:
        if options.fast_forward or options.fast_forward_pseudo_inst:
            m5.stats.reset()
        print "**** REAL SIMULATION ****"

        # If checkpoints are being taken, then the checkpoint instruction
        # will occur in the benchmark code it self.
        if options.repeat_switch and maxtick > options.repeat_switch:
            exit_event = repeatSwitch(testsys, repeat_switch_cpu_list, maxtick,
                                      options.repeat_switch)
        else:
            exit_event = benchCheckpoints(options, maxtick, cptdir)

    m5.stats.dump()
    print 'Exiting @ tick %i because %s' % (m5.curTick(),
                                            exit_event.getCause())
    if options.checkpoint_at_end:
        m5.checkpoint(joinpath(cptdir, "cpt.%d"))

    if not m5.options.interactive:
        sys.exit(exit_event.getCode())
Beispiel #30
0
    if len(stream) == 6:
        task_id = int(stream[5])
    if len(stream) > 6:
        wbase, wrange = [ Addr(v) for v in stream[6:8] ]
    if len(stream) == 9:
        task_id = int(stream[8])
    system.atp.initStream(master, rootp, rbase, rrange, wbase, wrange, task_id)

# connect the ATP gem5 adaptor to the system bus (hence to the memory)
for i in range(options.master_ports):
    system.atp.port = system.membus.cpu_side_ports

# connect the system port even if it is not used in this example
system.system_port = system.membus.cpu_side_ports

root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'

m5.instantiate()

#take x stats snapshots
if options.abs_max_tick is not m5.MaxTick:
    period = int(options.abs_max_tick/options.dump_stats)
    periodicStatDump(period)

exit_event = m5.simulate(options.abs_max_tick)

print ('Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause()))

sys.exit(exit_event.getCode())
Beispiel #31
0
def run(options, root, testsys, cpu_class):
# NOTE: this function is called from example from configs/example/ruby_fs.py 
# like this: "Simulation.run(options, root, system, FutureClass)"
# so, "system" is "testsys" here;

    if options.maxtick:
        maxtick = options.maxtick
    elif options.maxtime:
        simtime = m5.ticks.seconds(simtime)
        print "simulating for: ", simtime
        maxtick = simtime
    else:
        maxtick = m5.MaxTick

    if options.checkpoint_dir:
        cptdir = options.checkpoint_dir
    elif m5.options.outdir:
        cptdir = m5.options.outdir
    else:
        cptdir = getcwd()

    if options.fast_forward and options.checkpoint_restore != None:
        fatal("Can't specify both --fast-forward and --checkpoint-restore")

    if options.standard_switch and not options.caches:
        fatal("Must specify --caches when using --standard-switch")

    np = options.num_cpus
    max_checkpoints = options.max_checkpoints
    switch_cpus = None

    if options.prog_interval:
        for i in xrange(np):
            testsys.cpu[i].progress_interval = options.prog_interval

    if options.maxinsts:
        for i in xrange(np):
            testsys.cpu[i].max_insts_any_thread = options.maxinsts

    if cpu_class:
        switch_cpus = [cpu_class(defer_registration=True, cpu_id=(np+i))
                       for i in xrange(np)]

        for i in xrange(np):
            if options.fast_forward:
                testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
            switch_cpus[i].system =  testsys
            if not buildEnv['FULL_SYSTEM']:
                switch_cpus[i].workload = testsys.cpu[i].workload
            switch_cpus[i].clock = testsys.cpu[0].clock
            # simulation period
            if options.maxinsts:
                switch_cpus[i].max_insts_any_thread = options.maxinsts

        testsys.switch_cpus = switch_cpus
        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]

    if options.standard_switch:
        if not options.caches:
            # O3 CPU must have a cache to work.
            print "O3 CPU must be used with caches"
            sys.exit(1)

        switch_cpus = [TimingSimpleCPU(defer_registration=True, cpu_id=(np+i))
                       for i in xrange(np)]
        switch_cpus_1 = [DerivO3CPU(defer_registration=True, cpu_id=(2*np+i))
                        for i in xrange(np)]

        for i in xrange(np):
            switch_cpus[i].system =  testsys
            switch_cpus_1[i].system =  testsys
            if not buildEnv['FULL_SYSTEM']:
                switch_cpus[i].workload = testsys.cpu[i].workload
                switch_cpus_1[i].workload = testsys.cpu[i].workload
            switch_cpus[i].clock = testsys.cpu[0].clock
            switch_cpus_1[i].clock = testsys.cpu[0].clock

            # if restoring, make atomic cpu simulate only a few instructions
            if options.checkpoint_restore != None:
                testsys.cpu[i].max_insts_any_thread = 1
            # Fast forward to specified location if we are not restoring
            elif options.fast_forward:
                testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
            # Fast forward to a simpoint (warning: time consuming)
            elif options.simpoint:
                if testsys.cpu[i].workload[0].simpoint == 0:
                    fatal('simpoint not found')
                testsys.cpu[i].max_insts_any_thread = \
                    testsys.cpu[i].workload[0].simpoint
            # No distance specified, just switch
            else:
                testsys.cpu[i].max_insts_any_thread = 1

            # warmup period
            if options.warmup_insts:
                switch_cpus[i].max_insts_any_thread =  options.warmup_insts

            # simulation period
            if options.maxinsts:
                switch_cpus_1[i].max_insts_any_thread = options.maxinsts

        testsys.switch_cpus = switch_cpus
        testsys.switch_cpus_1 = switch_cpus_1
        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
        switch_cpu_list1 = [(switch_cpus[i], switch_cpus_1[i]) for i in xrange(np)]

    # set the checkpoint in the cpu before m5.instantiate is called
    if options.take_checkpoints != None and \
           (options.simpoint or options.at_instruction):
        offset = int(options.take_checkpoints)
        # Set an instruction break point
        if options.simpoint:
            for i in xrange(np):
                if testsys.cpu[i].workload[0].simpoint == 0:
                    fatal('no simpoint for testsys.cpu[%d].workload[0]', i)
                checkpoint_inst = int(testsys.cpu[i].workload[0].simpoint) + offset
                testsys.cpu[i].max_insts_any_thread = checkpoint_inst
                # used for output below
                options.take_checkpoints = checkpoint_inst
        else:
            options.take_checkpoints = offset
            # Set all test cpus with the right number of instructions
            # for the upcoming simulation
            for i in xrange(np):
                testsys.cpu[i].max_insts_any_thread = offset

    checkpoint_dir = None
    if options.checkpoint_restore != None:
        from os.path import isdir, exists
        from os import listdir
        import re

        if not isdir(cptdir):
            fatal("checkpoint dir %s does not exist!", cptdir)

        if options.at_instruction or options.simpoint:
            inst = options.checkpoint_restore
            if options.simpoint:
                # assume workload 0 has the simpoint
                if testsys.cpu[0].workload[0].simpoint == 0:
                    fatal('Unable to find simpoint')
                inst += int(testsys.cpu[0].workload[0].simpoint)

            checkpoint_dir = joinpath(cptdir,
                                      "cpt.%s.%s" % (options.bench, inst))
            if not exists(checkpoint_dir):
                fatal("Unable to find checkpoint directory %s", checkpoint_dir)
        else:
            dirs = listdir(cptdir)
            expr = re.compile('cpt\.([0-9]*)')
            cpts = []
            for dir in dirs:
                match = expr.match(dir)
                if match:
                    cpts.append(match.group(1))

            cpts.sort(lambda a,b: cmp(long(a), long(b)))

            cpt_num = options.checkpoint_restore

            if cpt_num > len(cpts):
                fatal('Checkpoint %d not found', cpt_num)

            ## Adjust max tick based on our starting tick
            maxtick = maxtick - int(cpts[cpt_num - 1])
            checkpoint_dir = joinpath(cptdir, "cpt.%s" % cpts[cpt_num - 1])

    m5.instantiate(checkpoint_dir)

    if options.standard_switch or cpu_class:
        if options.standard_switch:
            print "Switch at instruction count:%s" % \
                    str(testsys.cpu[0].max_insts_any_thread)
            exit_event = m5.simulate()
        elif cpu_class and options.fast_forward:
            print "Switch at instruction count:%s" % \
                    str(testsys.cpu[0].max_insts_any_thread)
            exit_event = m5.simulate()
        else:
            print "Switch at curTick count:%s" % str(10000)
            exit_event = m5.simulate(10000)
        print "Switched CPUS @ tick %s" % (m5.curTick())

        # when you change to Timing (or Atomic), you halt the system
        # given as argument.  When you are finished with the system
        # changes (including switchCpus), you must resume the system
        # manually.  You DON'T need to resume after just switching
        # CPUs if you haven't changed anything on the system level.

        m5.changeToTiming(testsys)
        m5.switchCpus(switch_cpu_list)
        m5.resume(testsys)

        if options.standard_switch:
            print "Switch at instruction count:%d" % \
                    (testsys.switch_cpus[0].max_insts_any_thread)

            #warmup instruction count may have already been set
            if options.warmup_insts:
                exit_event = m5.simulate()
            else:
                exit_event = m5.simulate(options.warmup)
            print "Switching CPUS @ tick %s" % (m5.curTick())
            print "Simulation ends instruction count:%d" % \
                    (testsys.switch_cpus_1[0].max_insts_any_thread)
            m5.drain(testsys)
            m5.switchCpus(switch_cpu_list1)
            m5.resume(testsys)

    num_checkpoints = 0
    exit_cause = ''

    # If we're taking and restoring checkpoints, use checkpoint_dir
    # option only for finding the checkpoints to restore from.  This
    # lets us test checkpointing by restoring from one set of
    # checkpoints, generating a second set, and then comparing them.
    if options.take_checkpoints and options.checkpoint_restore:
        if m5.options.outdir:
            cptdir = m5.options.outdir
        else:
            cptdir = getcwd()

    # Checkpoints being taken via the command line at <when> and at
    # subsequent periods of <period>.  Checkpoint instructions
    # received from the benchmark running are ignored and skipped in
    # favor of command line checkpoint instructions.
    if options.take_checkpoints != None :
        if options.at_instruction or options.simpoint:
            checkpoint_inst = int(options.take_checkpoints)

            # maintain correct offset if we restored from some instruction
            if options.checkpoint_restore != None:
                checkpoint_inst += options.checkpoint_restore

            print "Creating checkpoint at inst:%d" % (checkpoint_inst)
            exit_event = m5.simulate()
            print "exit cause = %s" % (exit_event.getCause())

            # skip checkpoint instructions should they exist
            while exit_event.getCause() == "checkpoint":
                exit_event = m5.simulate()

            if exit_event.getCause() == \
                   "a thread reached the max instruction count":
                m5.checkpoint(joinpath(cptdir, "cpt.%s.%d" % \
                        (options.bench, checkpoint_inst)))
                print "Checkpoint written."
                num_checkpoints += 1

            if exit_event.getCause() == "user interrupt received":
                exit_cause = exit_event.getCause();
        else:
            when, period = options.take_checkpoints.split(",", 1)
            when = int(when)
            period = int(period)

            exit_event = m5.simulate(when)
            while exit_event.getCause() == "checkpoint":
                exit_event = m5.simulate(when - m5.curTick())

            if exit_event.getCause() == "simulate() limit reached":
                m5.checkpoint(joinpath(cptdir, "cpt.%d"))
                num_checkpoints += 1

            sim_ticks = when
            exit_cause = "maximum %d checkpoints dropped" % max_checkpoints
            while num_checkpoints < max_checkpoints and \
                    exit_event.getCause() == "simulate() limit reached":
                if (sim_ticks + period) > maxtick:
                    exit_event = m5.simulate(maxtick - sim_ticks)
                    exit_cause = exit_event.getCause()
                    break
                else:
                    exit_event = m5.simulate(period)
                    sim_ticks += period
                    while exit_event.getCause() == "checkpoint":
                        exit_event = m5.simulate(sim_ticks - m5.curTick())
                    if exit_event.getCause() == "simulate() limit reached":
                        m5.checkpoint(joinpath(cptdir, "cpt.%d"))
                        num_checkpoints += 1

            if exit_event.getCause() != "simulate() limit reached":
                exit_cause = exit_event.getCause();

    else: # no checkpoints being taken via this script
        if options.fast_forward:
            m5.stats.reset()
        print "**** REAL SIMULATION ****"
        #exit_event = m5.simulate(maxtick)


        # --Note1: Ruby is created in ruby_fs.py by 
        # "Ruby.create_system(options, system, system.piobus, system._dma_devices)"
        # which assigned: "stats_filename = options.ruby_stats";
        # definition of "create_system" is in configs/ruby/Ruby.py, which
        # instantiate the ctor of RubySystem: "system.ruby = RubySystem(...)";
        #print testsys.ruby._cpu_ruby_ports 
        #print testsys.ruby.network.ni_flit_size
        #print testsys.ruby.profiler.ruby_system
        # the ctor of RubySystem is defined in src/mem/ruby/system/RubySystem.py;
        # which sets some defaults:
        #print testsys.ruby.stats_filename # i.e., ruby.stats
        #print testsys.ruby.type
        #print testsys.ruby.random_seed
        #print testsys.ruby.clock
        #print testsys.ruby.block_size_bytes
        #print testsys.ruby.mem_size
        #print testsys.ruby.no_mem_vec

        # () cris: description of changes
        # --Note2: initially writing into ruby.stats was done with overwriting;
        # so, for each dump point the file was re-written; to fix that I
        # changed function "OutputDirectory::create(...)" from src/base/output.cc
        # which is called by "RubyExitCallback::process()" from src/mem/ruby/system/System.cc
        # function that is the one called at the end of the gem5 run
        # as a calback to dump all ruby stats (callback is "registered" in the
        # ctor of RubySystem::RubySystem() inside the same file...); 
        # --Note3: using doExitCleanup inspired from src/python/m5/simulate.py 
        # (inside which ini and json files are created; you need to rebuild each
        # time you change that Python file):
        #m5.internal.core.doExitCleanup( False) #clear callback queue?
        # --Note4: python/m5/internal/core.py describes "m5.internal.core";

        # cris: here I want to dump stats every other delta ticks;
        # I need these to be able to generate reliability traces;
        
        NUM_OF_DUMPS = 100
        num_i = 0
        delta = maxtick/NUM_OF_DUMPS
        sim_ticks = m5.curTick()
        while (m5.curTick() < maxtick):
            sim_ticks += delta
            exit_event = m5.simulate(sim_ticks - m5.curTick())
            if exit_event.getCause() == "simulate() limit reached":
                #--Note5: "doExitCleanup()" is described in src/sim/core.cc;
                # I changed it to be able to call it multiple times;
                #--Note6: do not dump stats in ruby.stats for last iteration 
                # because it will be repeated once more via the exit callbacks
                # in src/python/m5/simulate.py...
                # Note6: next call of doExitCleanup does actually also reset/clear
                # the stats of ruby system via the RubyExitCallback::process() in
                # src/mem/ruby/system/System.cc
                if num_i < (NUM_OF_DUMPS-1):
                    print "Dumping also ruby stats at inst %d to file: ruby.stats" %(num_i)
                    m5.internal.core.doExitCleanup( False) #clear callback queue?
                print "Dumping gem5 stats at inst %d to file: stats.txt" %(num_i)
                # --Note7: dump() wites into stats.txt; dump() is defined in
                # src/python/m5/stats/__init__.py and does its 
                # thing via the functions described in base/stats/text.cc
                #atexit.register(stats.dump) <--- does not work (from simulate.py)
                m5.stats.dump()
                m5.stats.reset() # <--- what does it actually do?
                num_i += 1
                

        # alex's changes:
	#while exit_event.getCause() != "m5_exit instruction encountered":
	#    m5.stats.dump()
	#    m5.stats.reset()
	#    exit_event = m5.simulate(maxtick)

        #while exit_event.getCause() == "checkpoint":
        #    m5.checkpoint(joinpath(cptdir, "cpt.%d"))
        #    num_checkpoints += 1
        #    if num_checkpoints == max_checkpoints:
        #        exit_cause = "maximum %d checkpoints dropped" % max_checkpoints
        #        break
        #    exit_event = m5.simulate(maxtick - m5.curTick())
        #    exit_cause = exit_event.getCause()

    if exit_cause == '':
        exit_cause = exit_event.getCause()
    print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_cause)

    if options.checkpoint_at_end:
        m5.checkpoint(joinpath(cptdir, "cpt.%d"))
Beispiel #32
0
# obj1.master = obj2.slave  <==> obj2.slave = obj1.master
# But the direction of data stream between cache and the membus should be double direction ?


system.cpu.createInterruptController()
print type(system.cpu.interrupts.pio)
system.cpu.interrupts.pio = system.membus.master
system.cpu.interrupts.int_master = system.membus.slave
system.cpu.interrupts.int_slave = system.membus.master


system.system_port = system.membus.slave # x86 specific, allow system to r/w memory

system.mem_ctrl = DDR3_1600_x64() # memory controller
system.mem_ctrl.range = system.mem_ranges[0]
system.mem_ctrl.port = system.membus.master

process = LiveProcess()
process.cmd = ['tests/test-progs/hello/bin/x86/linux/hello']
system.cpu.workload = process
system.cpu.createThreads()

root = Root(full_system=False, system=system)
m5.instantiate()

print 'Beginning simulation!'
exit_event = m5.simulate()

print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())

Beispiel #33
0
def run_test(root, switcher=None, freq=1000, verbose=False):
    """Test runner for CPU switcheroo tests.

    The switcheroo test runner is used to switch CPUs in a system that
    has been prepared for CPU switching. Such systems should have
    multiple CPUs when they are instantiated, but only one should be
    switched in. Such configurations can be created using the
    base_config.BaseFSSwitcheroo class.

    A CPU switcher object is used to control switching. The default
    switcher sequentially switches between all CPUs in a system,
    starting with the CPU that is currently switched in.

    Unlike most other test runners, this one automatically configures
    the memory mode of the system based on the first CPU the switcher
    reports.

    Keyword Arguments:
      switcher -- CPU switcher implementation. See Sequential for
                  an example implementation.
      period -- Switching frequency in Hz.
      verbose -- Enable output at each switch (suppressed by default).
    """

    if switcher == None:
        switcher = Sequential(root.system.cpu)

    current_cpu = switcher.first()
    system = root.system
    system.mem_mode = type(current_cpu).memory_mode()

    # Suppress "Entering event queue" messages since we get tons of them.
    # Worse yet, they include the timestamp, which makes them highly
    # variable and unsuitable for comparing as test outputs.
    if not verbose:
        _m5.core.setLogLevel(_m5.core.LogLevel.WARN)

    # instantiate configuration
    m5.instantiate()

    # Determine the switching period, this has to be done after
    # instantiating the system since the time base must be fixed.
    period = m5.ticks.fromSeconds(1.0 / freq)
    while True:
        exit_event = m5.simulate(period)
        exit_cause = exit_event.getCause()

        if exit_cause == "simulate() limit reached":
            next_cpu = switcher.next()

            if verbose:
                print("Switching CPUs...")
                print("Next CPU: %s" % type(next_cpu))
            m5.drain()
            if current_cpu != next_cpu:
                m5.switchCpus(system, [ (current_cpu, next_cpu) ],
                              verbose=verbose)
            else:
                print("Source CPU and destination CPU are the same,"
                    " skipping...")
            current_cpu = next_cpu
        elif exit_cause == "target called exit()" or \
                exit_cause == "m5_exit instruction encountered":

            sys.exit(0)
        else:
            print("Test failed: Unknown exit cause: %s" % exit_cause)
            sys.exit(1)
Beispiel #34
0
# Checkpointing is not supported by APU model
if (options.checkpoint_dir != None or
    options.checkpoint_restore != None):
    fatal("Checkpointing not supported by apu model")

checkpoint_dir = None
m5.instantiate(checkpoint_dir)

# Map workload to this address space
host_cpu.workload[0].map(0x10000000, 0x200000000, 4096)

if options.fast_forward:
    print("Switch at instruction count: %d" % cpu_list[0].max_insts_any_thread)

exit_event = m5.simulate(maxtick)

if options.fast_forward:
    if exit_event.getCause() == "a thread reached the max instruction count":
        m5.switchCpus(system, switch_cpu_list)
        print("Switched CPUS @ tick %s" % (m5.curTick()))
        m5.stats.reset()
        exit_event = m5.simulate(maxtick - m5.curTick())
elif options.fast_forward_pseudo_op:
    while exit_event.getCause() == "switchcpu":
        # If we are switching *to* kvm, then the current stats are meaningful
        # Note that we don't do any warmup by default
        if type(switch_cpu_list[0][0]) == FutureCpuClass:
            print("Dumping stats...")
            m5.stats.dump()
        m5.switchCpus(system, switch_cpu_list)
Beispiel #35
0
    elif options.benchmark == 'LU_noncontig':
        system.cpu[i].workload = LU_noncontig()
    elif options.benchmark == 'Radix':
        system.cpu[i].workload = Radix()
    elif options.benchmark == 'Barnes':
        system.cpu[i].workload = Barnes()
    elif options.benchmark == 'FMM':
        system.cpu[i].workload = FMM()
    elif options.benchmark == 'OceanContig':
        system.cpu[i].workload = Ocean_contig()
    elif options.benchmark == 'OceanNoncontig':
        system.cpu[i].workload = Ocean_noncontig()
    elif options.benchmark == 'Raytrace':
        system.cpu[i].workload = Raytrace()
    elif options.benchmark == 'WaterNSquared':
        system.cpu[i].workload = Water_nsquared()
    elif options.benchmark == 'WaterSpatial':
        system.cpu[i].workload = Water_spatial()
    else:
        system.cpu[i].workload = process

root.system.mem_mode = 'timing'

m5.instantiate(root)

try:
    exit_event = m5.simulate(m5.MaxTick)
    print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
except:
    print 'Failed @ tick', m5.curTick()
system.monitor = CommMonitor()

# connect the traffic generator to the bus via a communication monitor
system.tgen.port = system.monitor.slave
system.monitor.master = system.membus.slave

# connect the system port even if it is not used in this example
system.system_port = system.membus.slave

# every period, dump and reset all stats
periodicStatDump(period)

root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'

m5.instantiate()

# Simulate for exactly as long as it takes to go through all the states
# This is why sim exists.
m5.simulate(nxt_state * period + idle_period)
print "--- Done DRAM low power sweep ---"
print "Fixed params - "
print "\tburst: %d, banks: %d, max stride: %d, itt min: %s ns" %  \
  (burst_size, nbr_banks, max_stride, itt_min)
print "Swept params - "
print "\titt max multiples input:", itt_max_multiples
print "\titt max values", itt_max_values
print "\tbank utilization values", bank_util_values
print "\tstride values:", stride_values
print "Traffic gen config file:", cfg_file_name
Beispiel #37
0
        tester.cpuDataPort = ruby_port.slave
    elif ruby_port.support_inst_reqs:
        tester.cpuInstPort = ruby_port.slave

    # Do not automatically retry stalled Ruby requests
    ruby_port.no_retry_on_stall = True

    #
    # Tell each sequencer this is the ruby tester so that it
    # copies the subblock back to the checker
    #
    ruby_port.using_ruby_tester = True

# -----------------------
# run simulation
# -----------------------

root = Root( full_system = False, system = system )
root.system.mem_mode = 'timing'

# Not much point in this being higher than the L1 latency
m5.ticks.setGlobalFrequency('1ns')

# instantiate configuration
m5.instantiate()

# simulate until program terminates
exit_event = m5.simulate(options.abs_max_tick)

print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause())
Beispiel #38
0
# generate path to input file
def inputpath(app, file=None):
    # input file has same name as app unless specified otherwise
    if not file:
        file = app
    return joinpath(test_progs, app, 'input', file)

# build configuration
sys.path.append(joinpath(tests_root, 'configs'))
test_filename = config
# for ruby configurations, remove the protocol name from the test filename
if re.search('-ruby', test_filename):
    test_filename = test_filename.split('-ruby')[0]+'-ruby'
execfile(joinpath(tests_root, 'configs', test_filename + '.py'))

# set default maxtick... script can override
# -1 means run forever
maxtick = m5.MaxTick

# tweak configuration for specific test
sys.path.append(joinpath(tests_root, category, name))
execfile(joinpath(tests_root, category, name, 'test.py'))

# instantiate configuration
m5.instantiate(root)

# simulate until program terminates
exit_event = m5.simulate(maxtick)

print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
Beispiel #39
0
def main():
    parser = argparse.ArgumentParser(
        description="Generic ARM big.LITTLE configuration")

    parser.add_argument("--restore-from", type=str, default=None,
                        help="Restore from checkpoint")
    parser.add_argument("--dtb", type=str, default=default_dtb,
                        help="DTB file to load")
    parser.add_argument("--kernel", type=str, default=default_kernel,
                        help="Linux kernel")
    parser.add_argument("--disk", action="append", type=str, default=[],
                        help="Disks to instantiate")
    parser.add_argument("--bootscript", type=str, default=default_rcs,
                        help="Linux bootscript")
    parser.add_argument("--atomic", action="store_true", default=False,
                        help="Use atomic CPUs")
    parser.add_argument("--kernel-init", type=str, default="/sbin/init",
                        help="Override init")
    parser.add_argument("--big-cpus", type=int, default=1,
                        help="Number of big CPUs to instantiate")
    parser.add_argument("--little-cpus", type=int, default=1,
                        help="Number of little CPUs to instantiate")
    parser.add_argument("--caches", action="store_true", default=False,
                        help="Instantiate caches")
    parser.add_argument("--last-cache-level", type=int, default=2,
                        help="Last level of caches (e.g. 3 for L3)")
    parser.add_argument("--big-cpu-clock", type=str, default="2GHz",
                        help="Big CPU clock frequency")
    parser.add_argument("--little-cpu-clock", type=str, default="1GHz",
                        help="Little CPU clock frequency")

    m5.ticks.fixGlobalFrequency()

    options = parser.parse_args()

    if options.atomic:
        cpu_config = { 'cpu' : AtomicSimpleCPU }
        big_cpu_config, little_cpu_config = cpu_config, cpu_config
    else:
        big_cpu_config = { 'cpu' : CpuConfig.get("arm_detailed"),
                           'l1i' : devices.L1I,
                           'l1d' : devices.L1D,
                           'wcache' : devices.WalkCache,
                           'l2' : devices.L2 }
        little_cpu_config = { 'cpu' : MinorCPU,
                              'l1i' : devices.L1I,
                              'l1d' : devices.L1D,
                              'wcache' : devices.WalkCache,
                              'l2' : devices.L2 }

    big_cpu_class = big_cpu_config['cpu']
    little_cpu_class = little_cpu_config['cpu']

    kernel_cmd = [
        "earlyprintk=pl011,0x1c090000",
        "console=ttyAMA0",
        "lpj=19988480",
        "norandmaps",
        "loglevel=8",
        "mem=%s" % default_mem_size,
        "root=/dev/vda1",
        "rw",
        "init=%s" % options.kernel_init,
        "vmalloc=768MB",
    ]

    root = Root(full_system=True)

    assert big_cpu_class.memory_mode() == little_cpu_class.memory_mode()
    disks = default_disk if len(options.disk) == 0 else options.disk
    system = createSystem(options.kernel, big_cpu_class.memory_mode(),
                          options.bootscript, disks=disks)

    root.system = system
    system.boot_osflags = " ".join(kernel_cmd)

    # big cluster
    if options.big_cpus > 0:
        system.bigCluster = CpuCluster()
        system.bigCluster.addCPUs(big_cpu_config, options.big_cpus,
                                  options.big_cpu_clock)


    # LITTLE cluster
    if options.little_cpus > 0:
        system.littleCluster = CpuCluster()
        system.littleCluster.addCPUs(little_cpu_config, options.little_cpus,
                                     options.little_cpu_clock)

    # add caches
    if options.caches:
        cluster_mem_bus = addCaches(system, options.last_cache_level)
    else:
        if big_cpu_class.require_caches():
            m5.util.panic("CPU model %s requires caches" % str(big_cpu_class))
        if little_cpu_class.require_caches():
            m5.util.panic("CPU model %s requires caches" %
                          str(little_cpu_class))
        cluster_mem_bus = system.membus

    # connect each cluster to the memory hierarchy
    for cluster in system._clusters:
        cluster.connectMemSide(cluster_mem_bus)

    # Linux device tree
    system.dtb_filename = SysPaths.binary(options.dtb)

    # Get and load from the chkpt or simpoint checkpoint
    if options.restore_from is not None:
        m5.instantiate(options.restore_from)
    else:
        m5.instantiate()

    # start simulation (and drop checkpoints when requested)
    while True:
        event = m5.simulate()
        exit_msg = event.getCause()
        if exit_msg == "checkpoint":
            print "Dropping checkpoint at tick %d" % m5.curTick()
            cpt_dir = os.path.join(m5.options.outdir, "cpt.%d" % m5.curTick())
            m5.checkpoint(os.path.join(cpt_dir))
            print "Checkpoint done."
        else:
            print exit_msg, " @ ", m5.curTick()
            break

    sys.exit(event.getCode())
Beispiel #40
0
#                  |          ^
#              +---v---+      | TLM World
#              |  TLM  |      | (see sc_target.*)
#              +-------+      v
#

# Create a system with a Crossbar and a TrafficGenerator as CPU:
system = System()
system.membus = IOXBar(width = 16)
system.physmem = SimpleMemory() # This must be instanciated, even if not needed
system.cpu = TrafficGen(config_file = "tgen.cfg")
system.clk_domain = SrcClockDomain(clock = '1.5GHz',
    voltage_domain = VoltageDomain(voltage = '1V'))

# Create a external TLM port:
system.tlm = ExternalSlave()
system.tlm.addr_ranges = [AddrRange('512MB')]
system.tlm.port_type = "tlm_slave"
system.tlm.port_data = "transactor"

# Route the connections:
system.cpu.port = system.membus.slave
system.system_port = system.membus.slave
system.membus.master = system.tlm.port

# Start the simulation:
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
m5.instantiate()
m5.simulate() #Simulation time specified later on commandline
Beispiel #41
0
    system.llc = NoncoherentCache(size='16MB',
                                  assoc=16,
                                  tag_latency=10,
                                  data_latency=10,
                                  sequential_access=True,
                                  response_latency=20,
                                  tgts_per_mshr=8,
                                  mshrs=64)
    last_subsys.xbar.master = system.llc.cpu_side
    system.llc.mem_side = system.physmem.port
else:
    last_subsys.xbar.master = system.physmem.port

root = Root(full_system=False, system=system)
if args.atomic:
    root.system.mem_mode = 'atomic'
else:
    root.system.mem_mode = 'timing'

# The system port is never used in the tester so merely connect it
# to avoid problems
root.system.system_port = last_subsys.xbar.slave

# Instantiate configuration
m5.instantiate()

# Simulate until program terminates
exit_event = m5.simulate(args.maxtick)

print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause())
Beispiel #42
0
def bigLITTLESwitch(testsys, bigLITTLE_switch_cpu_list, maxtick, switch_freq,
                    num_cpu):
    print "starting big.LITTLE switch loop"

    #Define core types and frequencies for each core (Initialized to the big core and max freq.)
    core_type = ['big_core'] * num_cpu
    cur_freq = [2100] * num_cpu

    #Define statistics field for each core and each freq.
    big_freq_list = [[800, 0], [900, 0], [1000, 0], [1100, 0], [1200, 0],
                     [1300, 0], [1400, 0], [1500, 0], [1600, 0], [1700, 0],
                     [1800, 0], [1900, 0], [2000, 0], [2100, 0]] * num_cpu
    Llittle_freq_list = [[400, 0], [500, 0], [600, 0], [700, 0], [800, 0],
                         [900, 0], [1000, 0], [1100, 0], [1200, 0], [1300, 0],
                         [1400, 0], [1500, 0]] * num_cpu

    #Define max. and min. freq for each core type
    big_min_freq = 800
    big_max_freq = 2100
    little_min_freq = 400
    little_max_freq = 1500

    #Define Switching threshold
    freq_up_thresh = 0.95
    freq_down_thresh = 0.85

    #For estimating utilization in big core
    big_past_total_cycles = [0] * num_cpu
    big_past_idle_cycles = [0] * num_cpu

    #For estimating utilization in LITTLE core
    little_past_total_cycles = [0] * num_cpu
    little_past_idle_cycles = [0] * num_cpu

    #To get utilization value
    current_total_cycles = 0
    current_idle_cycles = 0

    m5.setCpuIndex(bigLITTLE_switch_cpu_list)

    while True:
        #Simulate target architecture during 'switch_freq'
        #print "Running big.LITTLE cluster"
        exit_event = m5.simulate(switch_freq)
        exit_cause = exit_event.getCause()

        #If the simulation is end with other reason, print simulation cycles and exit the simulation loop
        if exit_cause != "simulate() limit reached":
            for cpu_idx in range(num_cpu):
                print "big Core %d DVFS Stat." % (cpu_idx)
                for idx in range(14):
                    print "%dMHz: %d" % (big_freq_list[idx + 14 * cpu_idx][0],
                                         big_freq_list[idx + 14 * cpu_idx][1])
                print "LITTLE Core %d DVFS Stat." % (cpu_idx)
                for idx in range(12):
                    print "%dMHz: %d" % (
                        Llittle_freq_list[idx + 12 * cpu_idx][0],
                        Llittle_freq_list[idx + 12 * cpu_idx][1])

            return exit_event

        #To access for loop index
        core_index = 0

        for old_cpu, new_cpu in bigLITTLE_switch_cpu_list:

            #DVFS Handling
            if core_type[core_index] == 'big_core':
                #Get busy and idle cycles during past quantum
                current_total_cycles = m5.getCurBusyCycles(0, core_index)
                current_idle_cycles = m5.getCurIdleCycles(0, core_index)

                #Calculate quantum cycles
                quantum_total_cycles = current_total_cycles - big_past_total_cycles[
                    core_index]
                quantum_idle_cycles = current_idle_cycles - big_past_idle_cycles[
                    core_index]

                big_past_total_cycles[core_index] = current_total_cycles
                big_past_idle_cycles[core_index] = current_idle_cycles

            else:
                current_total_cycles = m5.getCurBusyCycles(1, core_index)
                current_idle_cycles = m5.getCurBusyCycles(
                    1, core_index) - m5.getCurIdleCycles(1, core_index)

                #Calculate quantum cycles
                quantum_total_cycles = current_total_cycles - little_past_total_cycles[
                    core_index]
                quantum_idle_cycles = current_idle_cycles - little_past_idle_cycles[
                    core_index]

                little_past_total_cycles[core_index] = current_total_cycles
                little_past_idle_cycles[core_index] = current_idle_cycles

                #print  "%lf, %lf" & (old_cpu.idleCycles, old_cpu.tickCycles)

            #print "Core %d Type: %s, quantum_total_cycles: %lf, quantum_idle_cycles: %lf" % (core_index, core_type[core_index], quantum_total_cycles, quantum_idle_cycles)

            #Calculate core utilization
            if quantum_total_cycles != 0:
                core_utilization = (quantum_total_cycles -
                                    quantum_idle_cycles) / quantum_total_cycles

            #Current Core is big core (change)
            if core_type[core_index] == 'big_core':
                #Cycle Statistics
                for idx in range(14):
                    if big_freq_list[14 * core_index +
                                     idx][0] == cur_freq[core_index]:
                        big_freq_list[14 * core_index +
                                      idx][1] += quantum_total_cycles
#Frequency up-scaling
                if core_utilization >= freq_up_thresh and cur_freq[
                        core_index] != big_max_freq:
                    cur_freq[core_index] += 100
                #Frequency down-scaling
                elif core_utilization < freq_down_thresh and cur_freq[
                        core_index] != big_min_freq:
                    cur_freq[core_index] -= 100
                #Core switching
                elif core_utilization < freq_down_thresh and cur_freq[
                        core_index] == big_min_freq:
                    #print "Core %d Switch: big --> LITTLE" % (core_index)
                    switching_cpu_list = []
                    switching_cpu_index = 0
                    for old_cpu, new_cpu in bigLITTLE_switch_cpu_list:
                        if switching_cpu_index == core_index:
                            switching_cpu_list.append((old_cpu, new_cpu))

                            m5.switchCpus(testsys, switching_cpu_list)
                            core_type[core_index] = 'LITTLE_core'
                            cur_freq[core_index] = little_max_freq
                            bigLITTLE_switch_cpu_list[core_index] = (new_cpu,
                                                                     old_cpu)

                        switching_cpu_index += 1

            #Current Core is LITTLE core (change)
            if core_type[core_index] == 'LITTLE_core':
                #Cycle Statistics
                for idx in range(12):
                    if Llittle_freq_list[12 * core_index +
                                         idx][0] == cur_freq[core_index]:
                        Llittle_freq_list[12 * core_index +
                                          idx][1] += quantum_total_cycles
#Frequency up-scaling
                if core_utilization >= freq_up_thresh and cur_freq[
                        core_index] != little_max_freq:
                    cur_freq[core_index] += 100
                #Frequency down-scaling
                elif core_utilization < freq_down_thresh and cur_freq[
                        core_index] != little_min_freq:
                    cur_freq[core_index] -= 100
                #Core switching
                elif core_utilization >= freq_up_thresh and cur_freq[
                        core_index] == little_max_freq:
                    #print "Core Switch: LITTLE --> big"
                    switching_cpu_list = []
                    switching_cpu_index = 0
                    for old_cpu, new_cpu in bigLITTLE_switch_cpu_list:
                        if switching_cpu_index == core_index:
                            switching_cpu_list.append((old_cpu, new_cpu))

                            m5.switchCpus(testsys, switching_cpu_list)
                            core_type[core_index] = 'big_core'
                            cur_freq[core_index] = big_min_freq
                            bigLITTLE_switch_cpu_list[core_index] = (new_cpu,
                                                                     old_cpu)

                        switching_cpu_index += 1

#Update for loop index
            core_index += 1

        #Simulate last quantum and exit the simulation loop
        if (maxtick - m5.curTick()) <= switch_freq:
            exit_event = m5.simulate(maxtick - m5.curTick())
            for cpu_idx in range(num_cpu):
                print "big Core %d DVFS Stat." % (cpu_idx)
                for idx in range(14):
                    print "%dMHz: %d" % (big_freq_list[idx + 14 * cpu_idx][0],
                                         big_freq_list[idx + 14 * cpu_idx][1])
                print "LITTLE Core %d DVFS Stat." % (cpu_idx)
                for idx in range(12):
                    print "%dMHz: %d" % (
                        Llittle_freq_list[idx + 12 * cpu_idx][0],
                        Llittle_freq_list[idx + 12 * cpu_idx][1])

            return exit_event
Beispiel #43
0
def run_test(root, switcher=None, freq=1000, verbose=False):
    """Test runner for CPU switcheroo tests.

    The switcheroo test runner is used to switch CPUs in a system that
    has been prepared for CPU switching. Such systems should have
    multiple CPUs when they are instantiated, but only one should be
    switched in. Such configurations can be created using the
    base_config.BaseFSSwitcheroo class.

    A CPU switcher object is used to control switching. The default
    switcher sequentially switches between all CPUs in a system,
    starting with the CPU that is currently switched in.

    Unlike most other test runners, this one automatically configures
    the memory mode of the system based on the first CPU the switcher
    reports.

    Keyword Arguments:
      switcher -- CPU switcher implementation. See Sequential for
                  an example implementation.
      period -- Switching frequency in Hz.
      verbose -- Enable output at each switch (suppressed by default).
    """

    if switcher == None:
        switcher = Sequential(root.system.cpu)

    current_cpu = switcher.first()
    system = root.system
    system.mem_mode = type(current_cpu).memory_mode()

    # Suppress "Entering event queue" messages since we get tons of them.
    # Worse yet, they include the timestamp, which makes them highly
    # variable and unsuitable for comparing as test outputs.
    if not verbose:
        _m5.core.setLogLevel(_m5.core.LogLevel.WARN)

    # instantiate configuration
    m5.instantiate()

    # Determine the switching period, this has to be done after
    # instantiating the system since the time base must be fixed.
    period = m5.ticks.fromSeconds(1.0 / freq)
    while True:
        exit_event = m5.simulate(period)
        exit_cause = exit_event.getCause()

        if exit_cause == "simulate() limit reached":
            next_cpu = switcher.next()

            if verbose:
                print("Switching CPUs...")
                print("Next CPU: %s" % type(next_cpu))
            m5.drain()
            if current_cpu != next_cpu:
                m5.switchCpus(system, [(current_cpu, next_cpu)],
                              verbose=verbose)
            else:
                print("Source CPU and destination CPU are the same,"
                      " skipping...")
            current_cpu = next_cpu
        elif exit_cause == "target called exit()" or \
                exit_cause == "m5_exit instruction encountered":

            sys.exit(0)
        else:
            print("Test failed: Unknown exit cause: %s" % exit_cause)
            sys.exit(1)
Beispiel #44
0
np = 4
# create a traffic generator, and point it to the file we just created
system.tgen = [ TrafficGen(config_file = cfg_file_name) for i in xrange(np)]

# Config memory system with given HMC arch
MemConfig.config_mem(options, system)

if options.arch == "distributed":
    for i in xrange(np):
        system.tgen[i].port = system.membus.slave
    # connect the system port even if it is not used in this example
    system.system_port = system.membus.slave

if options.arch == "mixed":
    for i in xrange(int(np/2)):
        system.tgen[i].port = system.membus.slave
    # connect the system port even if it is not used in this example
    system.system_port = system.membus.slave


# run Forrest, run!
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'

m5.instantiate()
m5.simulate(10000000000)

m5.stats.dump()

print "Done!"
Beispiel #45
0
def run(options, root, testsys, cpu_class):
    if options.checkpoint_dir:
        cptdir = options.checkpoint_dir
    elif m5.options.outdir:
        cptdir = m5.options.outdir
    else:
        cptdir = getcwd()

    if options.fast_forward and options.checkpoint_restore != None:
        fatal("Can't specify both --fast-forward and --checkpoint-restore")

    if options.standard_switch and not options.caches:
        fatal("Must specify --caches when using --standard-switch")

    if options.standard_switch and options.repeat_switch:
        fatal("Can't specify both --standard-switch and --repeat-switch")

    if options.repeat_switch and options.take_checkpoints:
        fatal("Can't specify both --repeat-switch and --take-checkpoints")

    np = options.num_cpus
    switch_cpus = None

    if options.prog_interval:
        for i in xrange(np):
            testsys.cpu[i].progress_interval = options.prog_interval

    if options.maxinsts:
        for i in xrange(np):
            testsys.cpu[i].max_insts_any_thread = options.maxinsts

    if cpu_class:
        switch_cpus = [cpu_class(switched_out=True, cpu_id=(i))
                       for i in xrange(np)]

        for i in xrange(np):
            if options.fast_forward:
                testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
            switch_cpus[i].system =  testsys
            switch_cpus[i].workload = testsys.cpu[i].workload
            switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain
            switch_cpus[i].progress_interval = testsys.cpu[i].progress_interval
            # simulation period
            if options.maxinsts:
                switch_cpus[i].max_insts_any_thread = options.maxinsts
            # Add checker cpu if selected
            if options.checker:
                switch_cpus[i].addCheckerCpu()

        testsys.switch_cpus = switch_cpus
        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]

    if options.repeat_switch:
        switch_class = getCPUClass(options.cpu_type)[0]
        if switch_class.require_caches() and \
                not options.caches:
            print "%s: Must be used with caches" % str(switch_class)
            sys.exit(1)
        if not switch_class.support_take_over():
            print "%s: CPU switching not supported" % str(switch_class)
            sys.exit(1)

        repeat_switch_cpus = [switch_class(switched_out=True, \
                                               cpu_id=(i)) for i in xrange(np)]

        for i in xrange(np):
            repeat_switch_cpus[i].system = testsys
            repeat_switch_cpus[i].workload = testsys.cpu[i].workload
            repeat_switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain

            if options.maxinsts:
                repeat_switch_cpus[i].max_insts_any_thread = options.maxinsts

            if options.checker:
                repeat_switch_cpus[i].addCheckerCpu()

        testsys.repeat_switch_cpus = repeat_switch_cpus

        if cpu_class:
            repeat_switch_cpu_list = [(switch_cpus[i], repeat_switch_cpus[i])
                                      for i in xrange(np)]
        else:
            repeat_switch_cpu_list = [(testsys.cpu[i], repeat_switch_cpus[i])
                                      for i in xrange(np)]

    if options.standard_switch:
        switch_cpus = [TimingSimpleCPU(switched_out=True, cpu_id=(i))
                       for i in xrange(np)]
        switch_cpus_1 = [DerivO3CPU(switched_out=True, cpu_id=(i))
                        for i in xrange(np)]

        for i in xrange(np):
            switch_cpus[i].system =  testsys
            switch_cpus_1[i].system =  testsys
            switch_cpus[i].workload = testsys.cpu[i].workload
            switch_cpus_1[i].workload = testsys.cpu[i].workload
            switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain
            switch_cpus_1[i].clk_domain = testsys.cpu[i].clk_domain

            # if restoring, make atomic cpu simulate only a few instructions
            if options.checkpoint_restore != None:
                testsys.cpu[i].max_insts_any_thread = 1
            # Fast forward to specified location if we are not restoring
            elif options.fast_forward:
                testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
            # Fast forward to a simpoint (warning: time consuming)
            elif options.simpoint:
                if testsys.cpu[i].workload[0].simpoint == 0:
                    fatal('simpoint not found')
                testsys.cpu[i].max_insts_any_thread = \
                    testsys.cpu[i].workload[0].simpoint
            # No distance specified, just switch
            else:
                testsys.cpu[i].max_insts_any_thread = 1

            # warmup period
            if options.warmup_insts:
                switch_cpus[i].max_insts_any_thread =  options.warmup_insts

            # simulation period
            if options.maxinsts:
                switch_cpus_1[i].max_insts_any_thread = options.maxinsts

            # attach the checker cpu if selected
            if options.checker:
                switch_cpus[i].addCheckerCpu()
                switch_cpus_1[i].addCheckerCpu()

        testsys.switch_cpus = switch_cpus
        testsys.switch_cpus_1 = switch_cpus_1
        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
        switch_cpu_list1 = [(switch_cpus[i], switch_cpus_1[i]) for i in xrange(np)]

    # set the checkpoint in the cpu before m5.instantiate is called
    if options.take_checkpoints != None and \
           (options.simpoint or options.at_instruction):
        offset = int(options.take_checkpoints)
        # Set an instruction break point
        if options.simpoint:
            for i in xrange(np):
                if testsys.cpu[i].workload[0].simpoint == 0:
                    fatal('no simpoint for testsys.cpu[%d].workload[0]', i)
                checkpoint_inst = int(testsys.cpu[i].workload[0].simpoint) + offset
                testsys.cpu[i].max_insts_any_thread = checkpoint_inst
                # used for output below
                options.take_checkpoints = checkpoint_inst
        else:
            options.take_checkpoints = offset
            # Set all test cpus with the right number of instructions
            # for the upcoming simulation
            for i in xrange(np):
                testsys.cpu[i].max_insts_any_thread = offset

    if options.take_simpoint_checkpoints != None:
        simpoints, interval_length = parseSimpointAnalysisFile(options, testsys)

    checkpoint_dir = None
    if options.checkpoint_restore:
        cpt_starttick, checkpoint_dir = findCptDir(options, cptdir, testsys)
    m5.instantiate(checkpoint_dir)

    # Initialization is complete.  If we're not in control of simulation
    # (that is, if we're a slave simulator acting as a component in another
    #  'master' simulator) then we're done here.  The other simulator will
    # call simulate() directly. --initialize-only is used to indicate this.
    if options.initialize_only:
        return

    # Handle the max tick settings now that tick frequency was resolved
    # during system instantiation
    # NOTE: the maxtick variable here is in absolute ticks, so it must
    # include any simulated ticks before a checkpoint
    explicit_maxticks = 0
    maxtick_from_abs = m5.MaxTick
    maxtick_from_rel = m5.MaxTick
    maxtick_from_maxtime = m5.MaxTick
    if options.abs_max_tick:
        maxtick_from_abs = options.abs_max_tick
        explicit_maxticks += 1
    if options.rel_max_tick:
        maxtick_from_rel = options.rel_max_tick
        if options.checkpoint_restore:
            # NOTE: this may need to be updated if checkpoints ever store
            # the ticks per simulated second
            maxtick_from_rel += cpt_starttick
            if options.at_instruction or options.simpoint:
                warn("Relative max tick specified with --at-instruction or" \
                     " --simpoint\n      These options don't specify the " \
                     "checkpoint start tick, so assuming\n      you mean " \
                     "absolute max tick")
        explicit_maxticks += 1
    if options.maxtime:
        maxtick_from_maxtime = m5.ticks.fromSeconds(options.maxtime)
        explicit_maxticks += 1
    if explicit_maxticks > 1:
        warn("Specified multiple of --abs-max-tick, --rel-max-tick, --maxtime."\
             " Using least")
    maxtick = min([maxtick_from_abs, maxtick_from_rel, maxtick_from_maxtime])

    if options.checkpoint_restore != None and maxtick < cpt_starttick:
        fatal("Bad maxtick (%d) specified: " \
              "Checkpoint starts starts from tick: %d", maxtick, cpt_starttick)

    if options.standard_switch or cpu_class:
        if options.standard_switch:
            print "Switch at instruction count:%s" % \
                    str(testsys.cpu[0].max_insts_any_thread)
            exit_event = m5.simulate()
        elif cpu_class and options.fast_forward:
            print "Switch at instruction count:%s" % \
                    str(testsys.cpu[0].max_insts_any_thread)
            exit_event = m5.simulate()
        else:
            print "Switch at curTick count:%s" % str(10000)
            exit_event = m5.simulate(10000)
        print "Switched CPUS @ tick %s" % (m5.curTick())

        m5.switchCpus(testsys, switch_cpu_list)

        if options.standard_switch:
            print "Switch at instruction count:%d" % \
                    (testsys.switch_cpus[0].max_insts_any_thread)

            #warmup instruction count may have already been set
            if options.warmup_insts:
                exit_event = m5.simulate()
            else:
                exit_event = m5.simulate(options.standard_switch)
            print "Switching CPUS @ tick %s" % (m5.curTick())
            print "Simulation ends instruction count:%d" % \
                    (testsys.switch_cpus_1[0].max_insts_any_thread)
            m5.switchCpus(testsys, switch_cpu_list1)

    # If we're taking and restoring checkpoints, use checkpoint_dir
    # option only for finding the checkpoints to restore from.  This
    # lets us test checkpointing by restoring from one set of
    # checkpoints, generating a second set, and then comparing them.
    if (options.take_checkpoints or options.take_simpoint_checkpoints) \
        and options.checkpoint_restore:

        if m5.options.outdir:
            cptdir = m5.options.outdir
        else:
            cptdir = getcwd()

    if options.take_checkpoints != None :
        # Checkpoints being taken via the command line at <when> and at
        # subsequent periods of <period>.  Checkpoint instructions
        # received from the benchmark running are ignored and skipped in
        # favor of command line checkpoint instructions.
        exit_event = scriptCheckpoints(options, maxtick, cptdir)

    # Take SimPoint checkpoints
    elif options.take_simpoint_checkpoints != None:
        takeSimpointCheckpoints(simpoints, interval_length, cptdir)

    # Restore from SimPoint checkpoints
    elif options.restore_simpoint_checkpoint != None:
        restoreSimpointCheckpoint()

    else:
        if options.fast_forward:
            m5.stats.reset()
        print "**** REAL SIMULATION ****"

        # If checkpoints are being taken, then the checkpoint instruction
        # will occur in the benchmark code it self.
        if options.repeat_switch and maxtick > options.repeat_switch:
            exit_event = repeatSwitch(testsys, repeat_switch_cpu_list,
                                      maxtick, options.repeat_switch)
        else:
            exit_event = benchCheckpoints(options, maxtick, cptdir)

    print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())
    if options.checkpoint_at_end:
        m5.checkpoint(joinpath(cptdir, "cpt.%d"))

    if not m5.options.interactive:
        sys.exit(exit_event.getCode())
Beispiel #46
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__)

    parser.add_argument("commands_to_run",
                        metavar="command(s)",
                        nargs='*',
                        help="Command(s) to run")
    parser.add_argument("--cpu",
                        type=str,
                        choices=list(cpu_types.keys()),
                        default="atomic",
                        help="CPU model to use")
    parser.add_argument("--cpu-freq", type=str, default="4GHz")
    parser.add_argument("--num-cores",
                        type=int,
                        default=1,
                        help="Number of CPU cores")
    parser.add_argument("--mem-type",
                        default="DDR3_1600_8x8",
                        choices=ObjectList.mem_list.get_names(),
                        help="type of memory to use")
    parser.add_argument("--mem-channels",
                        type=int,
                        default=2,
                        help="number of memory channels")
    parser.add_argument("--mem-ranks",
                        type=int,
                        default=None,
                        help="number of memory ranks per channel")
    parser.add_argument("--mem-size",
                        action="store",
                        type=str,
                        default="2GB",
                        help="Specify the physical memory size")

    args = parser.parse_args()

    # Create a single root node for gem5's object hierarchy. There can
    # only exist one root node in the simulator at any given
    # time. Tell gem5 that we want to use syscall emulation mode
    # instead of full system mode.
    root = Root(full_system=False)

    # Populate the root node with a system. A system corresponds to a
    # single node with shared memory.
    root.system = create(args)

    # Instantiate the C++ object hierarchy. After this point,
    # SimObjects can't be instantiated anymore.
    m5.instantiate()

    # Start the simulator. This gives control to the C++ world and
    # starts the simulator. The returned event tells the simulation
    # script why the simulator exited.
    event = m5.simulate()

    # Print the reason for the simulation exit. Some exit codes are
    # requests for service (e.g., checkpoints) from the simulation
    # script. We'll just ignore them here and exit.
    print(event.getCause(), " @ ", m5.curTick())
    sys.exit(event.getCode())
Beispiel #47
0
                tester.port = xbar.slave
        else:
            # Single tester
            testers[0].port = next_cache.cpu_side


# Top level call to create the cache hierarchy, bottom up
make_cache_level(cachespec, cache_proto, len(cachespec), None)

# Connect the lowest level crossbar to the memory
last_subsys = getattr(system, "l%dsubsys0" % len(cachespec))
last_subsys.xbar.master = system.physmem.port

root = Root(full_system=False, system=system)
if options.atomic:
    root.system.mem_mode = "atomic"
else:
    root.system.mem_mode = "timing"

# The system port is never used in the tester so merely connect it
# to avoid problems
root.system.system_port = last_subsys.xbar.slave

# Instantiate configuration
m5.instantiate()

# Simulate until program terminates
exit_event = m5.simulate(options.maxtick)

print "Exiting @ tick", m5.curTick(), "because", exit_event.getCause()
Beispiel #48
0
# Set up the system
system.mem_mode = 'atomic'  # Use timing accesses
system.mem_ranges = [AddrRange('4GB')]  # Create an address range

# Create a DDR3 memory controller
system.mem_ctrls = [SimpleMemory(latency='0ns', bandwidth='0GB/s')]
system.mem_ctrls.range = system.mem_ranges[0]

# Set up the binary to load
system.kernel = binary
system.system_port = system.mem_ctrl.port

# Create the dinocpu verilator wrapper
system.dinocpu = VerilatorDinoCPU()

# Create the mem black box verilator wrapper
system.verilator_mem = DinoCPUCombMemBlackBox()

system.verilator_mem.inst_port = system.mem_ctrl.port
system.verilator_mem.data_port = system.mem_ctrl.port

# set up the root SimObject and start the simulation
root = Root(full_system=True, system=system)

# instantiate all of the objects we've created above
m5.instantiate()

print("Beginning simulation!")
exit_event = m5.simulate(200000)
print('Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause()))