def build_pardg5v_system(np): if buildEnv['TARGET_ISA'] == "x86": pardsys = makePARDg5VSystem(test_mem_mode, options.num_cpus, bm[0]) else: fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA']) # Set the cache line size for the entire system pardsys.cache_line_size = options.cacheline_size # Create a top-level voltage domain pardsys.voltage_domain = VoltageDomain(voltage=options.sys_voltage) # Create a source clock for the system and set the clock period pardsys.clk_domain = SrcClockDomain(clock=options.sys_clock, voltage_domain=pardsys.voltage_domain) # Create a CPU voltage domain pardsys.cpu_voltage_domain = VoltageDomain() # Create a source clock for the CPUs and set the clock period pardsys.cpu_clk_domain = SrcClockDomain( clock=options.cpu_clock, voltage_domain=pardsys.cpu_voltage_domain) if options.kernel is not None: pardsys.kernel = binary(options.kernel) if options.script is not None: pardsys.readfile = options.script pardsys.init_param = options.init_param # For now, assign all the CPUs to the same clock domain pardsys.cpu = [ TestCPUClass(clk_domain=pardsys.cpu_clk_domain, cpu_id=i) for i in xrange(np) ] if options.caches or options.l2cache: # By default the IOCache runs at the system clock pardsys.iocache = IOCache( addr_ranges=[AddrRange('3GB'), AddrRange(start='4GB', size='4GB')]) pardsys.iocache.cpu_side = pardsys.iobus.master pardsys.iocache.mem_side = pardsys.membus.slave else: pardsys.iobridge = Bridge( delay='50ns', ranges=[AddrRange('3GB'), AddrRange(start='4GB', size='4GB')]) pardsys.iobridge.slave = pardsys.iobus.master pardsys.iobridge.master = pardsys.membus.slave for i in xrange(np): pardsys.cpu[i].createThreads() CacheConfig.config_cache(options, pardsys) XMemConfig.config_mem(options, pardsys) return pardsys
def build_pardg5v_system(np): if buildEnv['TARGET_ISA'] == "x86": pardsys = makePARDg5VSystem(test_mem_mode, options.num_cpus, bm[0]) else: fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA']) # Set the cache line size for the entire system pardsys.cache_line_size = options.cacheline_size # Create a top-level voltage domain pardsys.voltage_domain = VoltageDomain(voltage = options.sys_voltage) # Create a source clock for the system and set the clock period pardsys.clk_domain = SrcClockDomain(clock = options.sys_clock, voltage_domain = pardsys.voltage_domain) # Create a CPU voltage domain pardsys.cpu_voltage_domain = VoltageDomain() # Create a source clock for the CPUs and set the clock period pardsys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock, voltage_domain = pardsys.cpu_voltage_domain) if options.kernel is not None: pardsys.kernel = binary(options.kernel) if options.script is not None: pardsys.readfile = options.script pardsys.init_param = options.init_param # For now, assign all the CPUs to the same clock domain pardsys.cpu = [TestCPUClass(clk_domain=pardsys.cpu_clk_domain, cpu_id=i) for i in xrange(np)] if options.caches or options.l2cache: # By default the IOCache runs at the system clock pardsys.iocache = IOCache(addr_ranges = [AddrRange('3GB'), AddrRange(start='4GB', size='4GB')]) pardsys.iocache.cpu_side = pardsys.iobus.master pardsys.iocache.mem_side = pardsys.membus.slave else: pardsys.iobridge = Bridge(delay='50ns', ranges = [AddrRange('3GB'), AddrRange(start='4GB', size='4GB')]) pardsys.iobridge.slave = pardsys.iobus.master pardsys.iobridge.master = pardsys.membus.slave for i in xrange(np): pardsys.cpu[i].createThreads() CacheConfig.config_cache(options, pardsys) XMemConfig.config_mem(options, pardsys) return pardsys
system.cpu[i].addCheckerCpu() if options.ruby: if not (options.cpu_type == "detailed" or options.cpu_type == "timing"): print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!" sys.exit(1) options.use_map = True Ruby.create_system(options, system) assert (options.num_cpus == len(system.ruby._cpu_ruby_ports)) for i in xrange(np): ruby_port = system.ruby._cpu_ruby_ports[i] # Create the interrupt controller and connect its ports to Ruby system.cpu[i].createInterruptController() system.cpu[i].interrupts.pio = ruby_port.master system.cpu[i].interrupts.int_master = ruby_port.slave system.cpu[i].interrupts.int_slave = ruby_port.master # Connect the cpu's cache ports to Ruby system.cpu[i].icache_port = ruby_port.slave system.cpu[i].dcache_port = ruby_port.slave else: system.system_port = system.membus.slave system.physmem.port = system.membus.master CacheConfig.config_cache(options, system) root = Root(full_system=False, system=system) Simulation.run(options, root, system, FutureClass, 1)
# Sanity check if options.fastmem: if TestCPUClass != AtomicSimpleCPU: fatal("Fastmem can only be used with atomic CPU!") if (options.caches or options.l2cache): fatal("You cannot use fastmem in combination with caches!") for i in xrange(np): if options.fastmem: test_sys.cpu[i].fastmem = True if options.checker: test_sys.cpu[i].addCheckerCpu() test_sys.cpu[i].createThreads() CacheConfig.config_cache(options, test_sys) MemConfig.config_mem(options, test_sys) if len(bm) == 2: if buildEnv['TARGET_ISA'] == 'alpha': drive_sys = makeLinuxAlphaSystem(drive_mem_mode, bm[1]) elif buildEnv['TARGET_ISA'] == 'mips': drive_sys = makeLinuxMipsSystem(drive_mem_mode, bm[1]) elif buildEnv['TARGET_ISA'] == 'sparc': drive_sys = makeSparcSystem(drive_mem_mode, bm[1]) elif buildEnv['TARGET_ISA'] == 'x86': drive_sys = makeLinuxX86System(drive_mem_mode, np, bm[1]) elif buildEnv['TARGET_ISA'] == 'arm': drive_sys = makeArmSystem(drive_mem_mode, options.machine_type, bm[1]) # Create a top-level voltage domain
def addCommonOptions(parser): # system options parser.add_option("--list-cpu-types", action="callback", callback=_listCpuTypes, help="List available CPU types") parser.add_option("--cpu-type", type="choice", default="atomic", choices=CpuConfig.cpu_names(), help="type of cpu to run with") parser.add_option("--checker", action="store_true") parser.add_option("-n", "--num-cpus", type="int", default=1) parser.add_option("--sys-voltage", action="store", type="string", default='1.0V', help="""Top-level voltage for blocks running at system power supply""") parser.add_option("--sys-clock", action="store", type="string", default='1GHz', help="""Top-level clock for blocks running at system speed""") parser.add_option("--cpu-clock", action="store", type="string", default='1GHz', help="Clock for blocks running at CPU speed") parser.add_option("--smt", action="store_true", default=False, help=""" Only used if multiple programs are specified. If true, then the number of threads per cpu is same as the number of programs.""") # Memory Options parser.add_option("--list-mem-types", action="callback", callback=_listMemTypes, help="List available memory types") parser.add_option("--mem-type", type="choice", default="DDR3_1600_x64", choices=MemConfig.mem_names(), help="type of memory to use") parser.add_option("--mem-channels", type="int", default=1, help="number of memory channels") parser.add_option("--mem-ranks", type="int", default=None, help="number of memory ranks per channel") parser.add_option("--mem-size", action="store", type="string", default="128MB", help="Specify the physical memory size (single memory)") parser.add_option( "--mem-latency", action="store", type="string", default="30ns", help="Specify the physical memory latency (simple_mem only)") parser.add_option("--is_perfect_bus", type="int", default=0) parser.add_option("-l", "--lpae", action="store_true") parser.add_option("-V", "--virtualisation", action="store_true") parser.add_option("--memchecker", action="store_true") # Cache Options parser.add_option("--external-memory-system", type="string", help="use external ports of this port_type for caches") parser.add_option("--caches", action="store_true") parser.add_option("--is_perfect_cache", type="int", default=0) parser.add_option("--is_perfect_l2_cache", type="int", default=0) parser.add_option("--is_perfect_l2_bus", type="int", default=0) parser.add_option("--enable_prefetchers", action="store_true") parser.add_option("--prefetcher-type", type="choice", default="tagged", choices=CacheConfig.prefetcher_names(), help="type of cache prefetcher to use") parser.add_option("--l2cache", action="store_true") parser.add_option("--fastmem", action="store_true") parser.add_option("--num-dirs", type="int", default=1) parser.add_option("--num-l2caches", type="int", default=1) parser.add_option("--num-l3caches", type="int", default=1) parser.add_option("--l1d_size", type="string", default="64kB") parser.add_option("--l1i_size", type="string", default="32kB") parser.add_option("--l2_size", type="string", default="2MB") parser.add_option("--l3_size", type="string", default="16MB") parser.add_option("--l1d_assoc", type="int", default=2) parser.add_option("--l1i_assoc", type="int", default=2) parser.add_option("--l2_assoc", type="int", default=8) parser.add_option("--l3_assoc", type="int", default=16) parser.add_option("--l1d_hit_latency", type="int", default="2") parser.add_option("--l1i_hit_latency", type="int", default="2") parser.add_option("--l2_hit_latency", type="int", default="20") parser.add_option("--cacheline_size", type="int", default=64) parser.add_option("--xbar_width", type="int", default=8) parser.add_option( "--record-dram-traffic", action="store_true", help="Record DRAM memory traffic packets to file (requires protobuf).") # Aladdin Options parser.add_option("--accel_cfg_file", default=None, help="Aladdin accelerator configuration file.") # Enable Ruby parser.add_option("--ruby", action="store_true") # Run duration options parser.add_option("-m", "--abs-max-tick", type="int", default=m5.MaxTick, metavar="TICKS", help="Run to absolute simulated tick " \ "specified including ticks from a restored checkpoint") parser.add_option("--rel-max-tick", type="int", default=None, metavar="TICKS", help="Simulate for specified number of" \ " ticks relative to the simulation start tick (e.g. if " \ "restoring a checkpoint)") parser.add_option("--maxtime", type="float", default=None, help="Run to the specified absolute simulated time in " \ "seconds") parser.add_option("-I", "--maxinsts", action="store", type="int", default=None, help="""Total number of instructions to simulate (default: run forever)""") parser.add_option("--work-item-id", action="store", type="int", help="the specific work id for exit & checkpointing") parser.add_option("--num-work-ids", action="store", type="int", help="Number of distinct work item types") parser.add_option("--work-begin-cpu-id-exit", action="store", type="int", help="exit when work starts on the specified cpu") parser.add_option("--work-end-exit-count", action="store", type="int", help="exit at specified work end count") parser.add_option("--work-begin-exit-count", action="store", type="int", help="exit at specified work begin count") parser.add_option("--init-param", action="store", type="int", default=0, help="""Parameter available in simulation with m5 initparam""") parser.add_option("--initialize-only", action="store_true", default=False, help="""Exit after initialization. Do not simulate time. Useful when gem5 is run as a library.""") # Simpoint options parser.add_option("--simpoint-profile", action="store_true", help="Enable basic block profiling for SimPoints") parser.add_option("--simpoint-interval", type="int", default=10000000, help="SimPoint interval in num of instructions") parser.add_option( "--take-simpoint-checkpoints", action="store", type="string", help="<simpoint file,weight file,interval-length,warmup-length>") parser.add_option("--restore-simpoint-checkpoint", action="store_true", help="restore from a simpoint checkpoint taken with " + "--take-simpoint-checkpoints") # Checkpointing options ###Note that performing checkpointing via python script files will override ###checkpoint instructions built into binaries. parser.add_option( "--take-checkpoints", action="store", type="string", help="<M,N> take checkpoints at tick M and every N ticks thereafter") parser.add_option("--max-checkpoints", action="store", type="int", help="the maximum number of checkpoints to drop", default=5) parser.add_option("--checkpoint-dir", action="store", type="string", help="Place all checkpoints in this absolute directory") parser.add_option("-r", "--checkpoint-restore", action="store", type="int", help="restore from checkpoint <N>") parser.add_option("--checkpoint-at-end", action="store_true", help="take a checkpoint at end of run") parser.add_option("--work-begin-checkpoint-count", action="store", type="int", help="checkpoint at specified work begin count") parser.add_option("--work-end-checkpoint-count", action="store", type="int", help="checkpoint at specified work end count") parser.add_option( "--work-cpus-checkpoint-count", action="store", type="int", help="checkpoint and exit when active cpu count is reached") parser.add_option("--restore-with-cpu", action="store", type="choice", default="atomic", choices=CpuConfig.cpu_names(), help="cpu type for restoring from a checkpoint") # CPU Switching - default switch model goes from a checkpoint # to a timing simple CPU with caches to warm up, then to detailed CPU for # data measurement parser.add_option( "--repeat-switch", action="store", type="int", default=None, help="switch back and forth between CPUs with period <N>") parser.add_option( "-s", "--standard-switch", action="store", type="int", default=None, help="switch from timing to Detailed CPU after warmup period of <N>") parser.add_option("-p", "--prog-interval", type="str", help="CPU Progress Interval") # Fastforwarding and simpoint related materials parser.add_option( "-W", "--warmup-insts", action="store", type="int", default=None, help="Warmup period in total instructions (requires --standard-switch)" ) parser.add_option( "--bench", action="store", type="string", default=None, help="base names for --take-checkpoint and --checkpoint-restore") parser.add_option( "-F", "--fast-forward", action="store", type="string", default=None, help="Number of instructions to fast forward before switching") parser.add_option( "-S", "--simpoint", action="store_true", default=False, help="""Use workload simpoints as an instruction offset for --checkpoint-restore or --take-checkpoint.""") parser.add_option( "--at-instruction", action="store_true", default=False, help="""Treat value of --checkpoint-restore or --take-checkpoint as a number of instructions.""") parser.add_option( "--spec-input", default="ref", type="choice", choices=["ref", "test", "train", "smred", "mdred", "lgred"], help="Input set size for SPEC CPU2000 benchmarks.") parser.add_option("--arm-iset", default="arm", type="choice", choices=["arm", "thumb", "aarch64"], help="ARM instruction set.") # Stats options. parser.add_option( "--enable-stats-dump", action="store_true", default=False, help="Dump stats if sim loop exits with cause \"dump statistics\".")
def build_test_system(np): cmdline = cmd_line_template() if buildEnv['TARGET_ISA'] == "alpha": test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby, cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "mips": test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0], cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "sparc": test_sys = makeSparcSystem(test_mem_mode, bm[0], cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "x86": test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0], options.ruby, cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "arm": test_sys = makeArmSystem( test_mem_mode, options.machine_type, options.num_cpus, bm[0], options.dtb_filename, bare_metal=options.bare_metal, cmdline=cmdline, external_memory=options.external_memory_system) if options.enable_context_switch_stats_dump: test_sys.enable_context_switch_stats_dump = True else: fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA']) # Set the cache line size for the entire system test_sys.cache_line_size = options.cacheline_size # Create a top-level voltage domain test_sys.voltage_domain = VoltageDomain(voltage=options.sys_voltage) # Create a source clock for the system and set the clock period test_sys.clk_domain = SrcClockDomain( clock=options.sys_clock, voltage_domain=test_sys.voltage_domain) # Create a CPU voltage domain test_sys.cpu_voltage_domain = VoltageDomain() # Create a source clock for the CPUs and set the clock period test_sys.cpu_clk_domain = SrcClockDomain( clock=options.cpu_clock, voltage_domain=test_sys.cpu_voltage_domain) if options.kernel is not None: test_sys.kernel = binary(options.kernel) if options.script is not None: test_sys.readfile = options.script if options.lpae: test_sys.have_lpae = True if options.virtualisation: test_sys.have_virtualization = True #change the bootloader here #print "change boot loader" #print test_sys.boot_loader test_sys.boot_loader = options.issd_bootloader #print test_sys.boot_loader test_sys.init_param = options.init_param # For now, assign all the CPUs to the same clock domain test_sys.cpu = [ TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i) for i in xrange(np) ] if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass): test_sys.vm = KvmVM() if options.ruby: # Check for timing mode because ruby does not support atomic accesses if not (options.cpu_type == "detailed" or options.cpu_type == "timing"): print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!" sys.exit(1) Ruby.create_system(options, True, test_sys, test_sys.iobus, test_sys._dma_ports) # Create a seperate clock domain for Ruby test_sys.ruby.clk_domain = SrcClockDomain( clock=options.ruby_clock, voltage_domain=test_sys.voltage_domain) # Connect the ruby io port to the PIO bus, # assuming that there is just one such port. test_sys.iobus.master = test_sys.ruby._io_port.slave for (i, cpu) in enumerate(test_sys.cpu): # # Tie the cpu ports to the correct ruby system ports # cpu.clk_domain = test_sys.cpu_clk_domain cpu.createThreads() cpu.createInterruptController() cpu.icache_port = test_sys.ruby._cpu_ports[i].slave cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave if buildEnv['TARGET_ISA'] == "x86": cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.interrupts.pio = test_sys.ruby._cpu_ports[i].master cpu.interrupts.int_master = test_sys.ruby._cpu_ports[i].slave cpu.interrupts.int_slave = test_sys.ruby._cpu_ports[i].master else: if options.caches or options.l2cache: # By default the IOCache runs at the system clock test_sys.iocache = IOCache(addr_ranges=test_sys.mem_ranges) test_sys.iocache.cpu_side = test_sys.iobus.master test_sys.iocache.mem_side = test_sys.membus.slave elif not options.external_memory_system: test_sys.iobridge = Bridge(delay='50ns', ranges=test_sys.mem_ranges) test_sys.iobridge.slave = test_sys.iobus.master test_sys.iobridge.master = test_sys.membus.slave # Sanity check if options.fastmem: if TestCPUClass != AtomicSimpleCPU: fatal("Fastmem can only be used with atomic CPU!") if (options.caches or options.l2cache): fatal("You cannot use fastmem in combination with caches!") if options.simpoint_profile: if not options.fastmem: # Atomic CPU checked with fastmem option already fatal( "SimPoint generation should be done with atomic cpu and fastmem" ) if np > 1: fatal( "SimPoint generation not supported with more than one CPUs" ) for i in xrange(np): if options.fastmem: test_sys.cpu[i].fastmem = True if options.simpoint_profile: test_sys.cpu[i].addSimPointProbe(options.simpoint_interval) if options.checker: test_sys.cpu[i].addCheckerCpu() test_sys.cpu[i].createThreads() CacheConfig.config_cache(options, test_sys) MemConfig.config_mem(options, test_sys) return test_sys
def build_test_system(np): if buildEnv['TARGET_ISA'] == "alpha": test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby) elif buildEnv['TARGET_ISA'] == "mips": test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0]) elif buildEnv['TARGET_ISA'] == "sparc": test_sys = makeSparcSystem(test_mem_mode, bm[0]) elif buildEnv['TARGET_ISA'] == "x86": test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0], options.ruby) elif buildEnv['TARGET_ISA'] == "arm": test_sys = makeArmSystem(test_mem_mode, options.machine_type, bm[0], options.dtb_filename, bare_metal=options.bare_metal, sdcard_image=options.sdcard_image) if options.enable_context_switch_stats_dump: test_sys.enable_context_switch_stats_dump = True else: fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA']) # Set the cache line size for the entire system test_sys.cache_line_size = options.cacheline_size # Create a top-level voltage domain test_sys.voltage_domain = VoltageDomain(voltage=options.sys_voltage) # Create a source clock for the system and set the clock period test_sys.clk_domain = SrcClockDomain( clock=options.sys_clock, voltage_domain=test_sys.voltage_domain) #Create a clk running contantly at 1.4GHz for L2 test_sys.clk_domain_const = SrcClockDomain( clock=["1.4GHz"], voltage_domain=test_sys.voltage_domain) # Create a CPU voltage domain test_sys.cpu_voltage_domain = VoltageDomain() # Create a source clock for the CPUs and set the clock period #test_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock, # voltage_domain = # test_sys.cpu_voltage_domain) #test_sys.cpu_clk_domain = SrcClockDomain(clock = ["3GHz","2GHz","1GHz"], test_sys.cpu_clk_domain = SrcClockDomain( clock=[ "1.4GHz", "1.3GHz", "1.2GHz", "1.1GHz", "1GHz", "0.9GHz", "0.8GHz", "0.7GHz", "0.6GHz", "0.5GHz", "0.4GHz", "0.3GHz", "0.2GHz" ], voltage_domain=test_sys.cpu_voltage_domain, domain_id=0) test_sys.cpu_clk_domain1 = SrcClockDomain( clock=[ "1.4GHz", "1.3GHz", "1.2GHz", "1.1GHz", "1GHz", "0.9GHz", "0.8GHz", "0.7GHz", "0.6GHz", "0.5GHz", "0.4GHz", "0.3GHz", "0.2GHz" ], voltage_domain=test_sys.cpu_voltage_domain, domain_id=1) test_sys.cpu_clk_domain2 = SrcClockDomain( clock=[ "1.4GHz", "1.3GHz", "1.2GHz", "1.1GHz", "1GHz", "0.9GHz", "0.8GHz", "0.7GHz", "0.6GHz", "0.5GHz", "0.4GHz", "0.3GHz", "0.2GHz" ], voltage_domain=test_sys.cpu_voltage_domain, domain_id=2) test_sys.cpu_clk_domain3 = SrcClockDomain( clock=[ "1.4GHz", "1.3GHz", "1.2GHz", "1.1GHz", "1GHz", "0.9GHz", "0.8GHz", "0.7GHz", "0.6GHz", "0.5GHz", "0.4GHz", "0.3GHz", "0.2GHz" ], voltage_domain=test_sys.cpu_voltage_domain, domain_id=3) if options.kernel is not None: test_sys.kernel = binary(options.kernel) if options.script is not None: test_sys.readfile = options.script if options.lpae: test_sys.have_lpae = True if options.virtualisation: test_sys.have_virtualization = True test_sys.init_param = options.init_param # For now, assign all the CPUs to the same clock domain #test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i) # for i in xrange(np)] test_sys.cpu = [ TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=0, socket_id=0), TestCPUClass(clk_domain=test_sys.cpu_clk_domain1, cpu_id=1, socket_id=1), TestCPUClass(clk_domain=test_sys.cpu_clk_domain2, cpu_id=2, socket_id=2), TestCPUClass(clk_domain=test_sys.cpu_clk_domain3, cpu_id=3, socket_id=3) ] if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass): test_sys.vm = KvmVM() test_sys.dvfs_handler.enable = True test_sys.dvfs_handler.transform_enable = True # We do want O3 CPU to transform test_sys.dvfs_handler.domains = [ test_sys.cpu_clk_domain, test_sys.cpu_clk_domain1, test_sys.cpu_clk_domain2, test_sys.cpu_clk_domain3 ] if options.ruby: # Check for timing mode because ruby does not support atomic accesses if not (options.cpu_type == "detailed" or options.cpu_type == "timing"): print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!" sys.exit(1) Ruby.create_system(options, test_sys, test_sys.iobus, test_sys._dma_ports) # Create a seperate clock domain for Ruby test_sys.ruby.clk_domain = SrcClockDomain( clock=options.ruby_clock, voltage_domain=test_sys.voltage_domain) for (i, cpu) in enumerate(test_sys.cpu): # # Tie the cpu ports to the correct ruby system ports # cpu.clk_domain = test_sys.cpu_clk_domain cpu.createThreads() cpu.createInterruptController() cpu.icache_port = test_sys.ruby._cpu_ports[i].slave cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave if buildEnv['TARGET_ISA'] == "x86": cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.interrupts.pio = test_sys.ruby._cpu_ports[i].master cpu.interrupts.int_master = test_sys.ruby._cpu_ports[i].slave cpu.interrupts.int_slave = test_sys.ruby._cpu_ports[i].master test_sys.ruby._cpu_ports[i].access_phys_mem = True # Create the appropriate memory controllers # and connect them to the IO bus test_sys.mem_ctrls = [ TestMemClass(range=r) for r in test_sys.mem_ranges ] for i in xrange(len(test_sys.mem_ctrls)): test_sys.mem_ctrls[i].port = test_sys.iobus.master else: if options.caches or options.l2cache: # By default the IOCache runs at the system clock test_sys.iocache = IOCache(addr_ranges=test_sys.mem_ranges) test_sys.iocache.cpu_side = test_sys.iobus.master test_sys.iocache.mem_side = test_sys.membus.slave else: test_sys.iobridge = Bridge(delay='50ns', ranges=test_sys.mem_ranges) test_sys.iobridge.slave = test_sys.iobus.master test_sys.iobridge.master = test_sys.membus.slave # Sanity check if options.fastmem: if TestCPUClass != AtomicSimpleCPU: fatal("Fastmem can only be used with atomic CPU!") if (options.caches or options.l2cache): fatal("You cannot use fastmem in combination with caches!") for i in xrange(np): if options.fastmem: test_sys.cpu[i].fastmem = True if options.checker: test_sys.cpu[i].addCheckerCpu() test_sys.cpu[i].createThreads() CacheConfig.config_cache(options, test_sys) MemConfig.config_mem(options, test_sys) return test_sys
def build_test_system(np): cmdline = cmd_line_template() if buildEnv['TARGET_ISA'] == "alpha": test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby, cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "mips": test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0], cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "sparc": test_sys = makeSparcSystem(test_mem_mode, bm[0], cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "x86": test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0], options.ruby, cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "arm": test_sys = makeArmSystem(test_mem_mode, options.machine_type, options.num_cpus, bm[0], options.dtb_filename, bare_metal=options.bare_metal, cmdline=cmdline, external_memory=options.external_memory_system) if options.enable_context_switch_stats_dump: test_sys.enable_context_switch_stats_dump = True else: fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA']) # Set the cache line size for the entire system test_sys.cache_line_size = options.cacheline_size # Create a top-level voltage domain test_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage) # Create a source clock for the system and set the clock period test_sys.clk_domain = SrcClockDomain(clock = options.sys_clock, voltage_domain = test_sys.voltage_domain) # Create a CPU voltage domain test_sys.cpu_voltage_domain = VoltageDomain() # Create a source clock for the CPUs and set the clock period test_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock, voltage_domain = test_sys.cpu_voltage_domain) if options.kernel is not None: test_sys.kernel = binary(options.kernel) if options.script is not None: test_sys.readfile = options.script if options.lpae: test_sys.have_lpae = True if options.virtualisation: test_sys.have_virtualization = True test_sys.init_param = options.init_param # For now, assign all the CPUs to the same clock domain test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i, function_trace=options.enable_trace) for i in xrange(np)] if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass): test_sys.vm = KvmVM() if options.ruby: # Check for timing mode because ruby does not support atomic accesses if not (options.cpu_type == "detailed" or options.cpu_type == "timing"): print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!" sys.exit(1) Ruby.create_system(options, True, test_sys, test_sys.iobus, test_sys._dma_ports) # Create a seperate clock domain for Ruby test_sys.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock, voltage_domain = test_sys.voltage_domain) # Connect the ruby io port to the PIO bus, # assuming that there is just one such port. test_sys.iobus.master = test_sys.ruby._io_port.slave for (i, cpu) in enumerate(test_sys.cpu): # # Tie the cpu ports to the correct ruby system ports # cpu.clk_domain = test_sys.cpu_clk_domain cpu.createThreads() cpu.createInterruptController() cpu.icache_port = test_sys.ruby._cpu_ports[i].slave cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave if buildEnv['TARGET_ISA'] == "x86": cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.interrupts[0].pio = test_sys.ruby._cpu_ports[i].master cpu.interrupts[0].int_master = test_sys.ruby._cpu_ports[i].slave cpu.interrupts[0].int_slave = test_sys.ruby._cpu_ports[i].master else: if options.caches or options.l2cache: # By default the IOCache runs at the system clock test_sys.iocache = IOCache(addr_ranges = test_sys.mem_ranges) test_sys.iocache.cpu_side = test_sys.iobus.master test_sys.iocache.mem_side = test_sys.membus.slave elif not options.external_memory_system: test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges) test_sys.iobridge.slave = test_sys.iobus.master test_sys.iobridge.master = test_sys.membus.slave # Sanity check if options.fastmem: if TestCPUClass != AtomicSimpleCPU: fatal("Fastmem can only be used with atomic CPU!") if (options.caches or options.l2cache): fatal("You cannot use fastmem in combination with caches!") if options.simpoint_profile: if not options.fastmem: # Atomic CPU checked with fastmem option already fatal("SimPoint generation should be done with atomic cpu and fastmem") if np > 1: fatal("SimPoint generation not supported with more than one CPUs") for i in xrange(np): if options.fastmem: test_sys.cpu[i].fastmem = True if options.simpoint_profile: test_sys.cpu[i].addSimPointProbe(options.simpoint_interval) if options.checker: test_sys.cpu[i].addCheckerCpu() test_sys.cpu[i].createThreads() # If elastic tracing is enabled when not restoring from checkpoint and # when not fast forwarding using the atomic cpu, then check that the # TestCPUClass is DerivO3CPU or inherits from DerivO3CPU. If the check # passes then attach the elastic trace probe. # If restoring from checkpoint or fast forwarding, the code that does this for # FutureCPUClass is in the Simulation module. If the check passes then the # elastic trace probe is attached to the switch CPUs. if options.elastic_trace_en and options.checkpoint_restore == None and \ not options.fast_forward: CpuConfig.config_etrace(TestCPUClass, test_sys.cpu, options) CacheConfig.config_cache(options, test_sys) MemConfig.config_mem(options, test_sys) return test_sys
def build_test_system(np): cmdline = cmd_line_template() if buildEnv['TARGET_ISA'] == "alpha": test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby, cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "mips": test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0], cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "sparc": test_sys = makeSparcSystem(test_mem_mode, bm[0], cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "x86": test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0], options.ruby, cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "arm": test_sys = makeArmSystem( test_mem_mode, options.machine_type, options.num_cpus, bm[0], options.dtb_filename, bare_metal=options.bare_metal, cmdline=cmdline, external_memory=options.external_memory_system) if options.enable_context_switch_stats_dump: test_sys.enable_context_switch_stats_dump = True else: fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA']) # Set the cache line size for the entire system test_sys.cache_line_size = options.cacheline_size # Create a top-level voltage domain test_sys.voltage_domain = VoltageDomain(voltage=options.sys_voltage) # Create a source clock for the system and set the clock period test_sys.clk_domain = SrcClockDomain( clock=options.sys_clock, voltage_domain=test_sys.voltage_domain) # Create a CPU voltage domain test_sys.cpu_voltage_domain = VoltageDomain() # Create a source clock for the CPUs and set the clock period test_sys.cpu_clk_domain = SrcClockDomain( clock=options.cpu_clock, voltage_domain=test_sys.cpu_voltage_domain) if options.accel_cfg_file: config = ConfigParser.SafeConfigParser() print options.accel_cfg_file config.read(options.accel_cfg_file) accels = config.sections() if not accels: fatal("No accelerators were specified!") datapaths = [] for accel in accels: memory_type = config.get(accel, 'memory_type').lower() # Accelerators need their own clock domain! cycleTime = config.getint(accel, "cycle_time") clock = "%1.3fGHz" % (1 / cycleTime) clk_domain = SrcClockDomain( clock=clock, voltage_domain=test_sys.cpu_voltage_domain) # Set the globally required parameters. datapath = HybridDatapath( clk_domain=clk_domain, benchName=config.get(accel, "bench_name"), traceFilesFolder=config.get(accel, "trace_files_folder"), configFileName=config.get(accel, "config_file_name"), acceleratorName="datapath%d" % config.getint(accel, "accelerator_id"), acceleratorId=config.getint(accel, "accelerator_id"), cycleTime=cycleTime, useDb=config.getboolean(accel, "use_db"), experimentName=config.get(accel, "experiment_name"), enableStatsDump=options.enable_stats_dump, executeStandalone=(np == 0)) datapath.dmaSetupOverhead = config.getint(accel, "dma_setup_overhead") datapath.maxDmaRequests = config.getint(accel, "max_dma_requests") datapath.multiChannelDMA = config.getboolean( accel, "dma_multi_channel") datapath.dmaChunkSize = config.getint(accel, "dma_chunk_size") datapath.pipelinedDma = config.getboolean(accel, "pipelined_dma") datapath.ignoreCacheFlush = config.getboolean( accel, "ignore_cache_flush") datapath.invalidateOnDmaStore = config.getboolean( accel, "invalidate_on_dma_store") if memory_type == "cache": options.cacheline_size = config.getint(accel, "cache_line_sz") datapath.cacheSize = config.get(accel, "cache_size") datapath.cacheBandwidth = config.get(accel, "cache_bandwidth") datapath.cacheQueueSize = config.get(accel, "cache_queue_size") datapath.cacheAssoc = config.getint(accel, "cache_assoc") datapath.cacheHitLatency = config.getint( accel, "cache_hit_latency") datapath.cacheLineSize = config.getint(accel, "cache_line_sz") datapath.cactiCacheConfig = config.get(accel, "cacti_cache_config") datapath.tlbEntries = config.getint(accel, "tlb_entries") datapath.tlbAssoc = config.getint(accel, "tlb_assoc") datapath.tlbHitLatency = config.getint(accel, "tlb_hit_latency") datapath.tlbMissLatency = config.getint( accel, "tlb_miss_latency") datapath.tlbCactiConfig = config.get(accel, "cacti_tlb_config") datapath.tlbPageBytes = config.getint(accel, "tlb_page_size") datapath.numOutStandingWalks = config.getint( accel, "tlb_max_outstanding_walks") datapath.tlbBandwidth = config.getint(accel, "tlb_bandwidth") if (memory_type != "cache" and memory_type != "spad"): fatal( "Aladdin configuration file specified invalid memory type %s for " "accelerator %s." % (memory_type, accel)) datapaths.append(datapath) test_sys.datapaths = datapaths camera = CameraModel(enabled=True, images_dir="images", imageBytes=691200) test_sys.camera = camera if options.kernel is not None: test_sys.kernel = binary(options.kernel) if options.script is not None: test_sys.readfile = options.script if options.lpae: test_sys.have_lpae = True if options.virtualisation: test_sys.have_virtualization = True test_sys.init_param = options.init_param # For now, assign all the CPUs to the same clock domain test_sys.cpu = [ TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i) for i in xrange(np) ] if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass): test_sys.vm = KvmVM() if options.ruby: # Check for timing mode because ruby does not support atomic accesses if not (options.cpu_type == "detailed" or options.cpu_type == "timing"): print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!" sys.exit(1) else: print "Running Ruby with %s CPU model" % options.cpu_type Ruby.create_system(options, True, test_sys, test_sys.iobus, test_sys._dma_ports) # Create a seperate clock domain for Ruby test_sys.ruby.clk_domain = SrcClockDomain( clock=options.ruby_clock, voltage_domain=test_sys.voltage_domain) # Connect the ruby io port to the PIO bus, # assuming that there is just one such port. test_sys.iobus.master = test_sys.ruby._io_port.slave test_sys.camera.pio = test_sys.iobus.master test_sys.camera.dma = test_sys.iobus.slave for (i, cpu) in enumerate(test_sys.cpu): # # Tie the cpu ports to the correct ruby system ports # cpu.clk_domain = test_sys.cpu_clk_domain cpu.createThreads() cpu.createInterruptController() cpu.icache_port = test_sys.ruby._cpu_ports[i].slave cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave if buildEnv['TARGET_ISA'] == "x86": cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.interrupts.pio = test_sys.ruby._cpu_ports[i].master cpu.interrupts.int_master = test_sys.ruby._cpu_ports[i].slave cpu.interrupts.int_slave = test_sys.ruby._cpu_ports[i].master elif buildEnv['TARGET_ISA'] == "arm": cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave else: if options.caches or options.l2cache: # By default the IOCache runs at the system clock test_sys.iocache = IOCache(addr_ranges=test_sys.mem_ranges) test_sys.iocache.cpu_side = test_sys.iobus.master test_sys.iocache.mem_side = test_sys.membus.slave elif not options.external_memory_system: test_sys.iobridge = Bridge(delay='50ns', ranges=test_sys.mem_ranges) test_sys.iobridge.slave = test_sys.iobus.master test_sys.iobridge.master = test_sys.membus.slave test_sys.camera.pio = test_sys.membus.master test_sys.camera.dma = test_sys.membus.slave # Sanity check if options.fastmem: if TestCPUClass != AtomicSimpleCPU: fatal("Fastmem can only be used with atomic CPU!") if (options.caches or options.l2cache): fatal("You cannot use fastmem in combination with caches!") if options.simpoint_profile: if not options.fastmem: # Atomic CPU checked with fastmem option already fatal( "SimPoint generation should be done with atomic cpu and fastmem" ) if np > 1: fatal( "SimPoint generation not supported with more than one CPUs" ) for i in xrange(np): if options.fastmem: test_sys.cpu[i].fastmem = True if options.simpoint_profile: test_sys.cpu[i].addSimPointProbe(options.simpoint_interval) if options.checker: test_sys.cpu[i].addCheckerCpu() test_sys.cpu[i].createThreads() CacheConfig.config_cache(options, test_sys) MemConfig.config_mem(options, test_sys) return test_sys
def build_test_system(np): if buildEnv['TARGET_ISA'] == "alpha": test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby) elif buildEnv['TARGET_ISA'] == "mips": test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0]) elif buildEnv['TARGET_ISA'] == "sparc": test_sys = makeSparcSystem(test_mem_mode, bm[0]) elif buildEnv['TARGET_ISA'] == "x86": test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0], options.ruby) elif buildEnv['TARGET_ISA'] == "arm": test_sys = makeArmSystem(test_mem_mode, options.machine_type, options.num_cpus, bm[0], options.dtb_filename, bare_metal=options.bare_metal) if options.enable_context_switch_stats_dump: test_sys.enable_context_switch_stats_dump = True else: fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA']) # Set the cache line size for the entire system test_sys.cache_line_size = options.cacheline_size # Create a top-level voltage domain test_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage) # Create a source clock for the system and set the clock period test_sys.clk_domain = SrcClockDomain(clock = options.sys_clock, voltage_domain = test_sys.voltage_domain) # Create a CPU voltage domain # test_sys.cpu_voltage_domain = VoltageDomain(voltage = ['1.0V', '0.9V', '0.8V']) test_sys.cpu_voltage_domain = VoltageDomain() # Create a source clock for the CPUs and set the clock period test_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock, voltage_domain = test_sys.cpu_voltage_domain, domain_id = 0) test_sys.dvfs_handler.domains = test_sys.cpu_clk_domain test_sys.dvfs_handler.enable = 1 if options.kernel is not None: test_sys.kernel = binary(options.kernel) if options.script is not None: test_sys.readfile = options.script if options.lpae: test_sys.have_lpae = True if options.virtualisation: test_sys.have_virtualization = True test_sys.init_param = options.init_param # For now, assign all the CPUs to the same clock domain test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i) for i in xrange(np)] if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass): test_sys.vm = KvmVM() if options.ruby: # Check for timing mode because ruby does not support atomic accesses if not (options.cpu_type == "detailed" or options.cpu_type == "timing"): print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!" sys.exit(1) Ruby.create_system(options, True, test_sys, test_sys.iobus, test_sys._dma_ports) # Create a seperate clock domain for Ruby test_sys.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock, voltage_domain = test_sys.voltage_domain) # Connect the ruby io port to the PIO bus, # assuming that there is just one such port. test_sys.iobus.master = test_sys.ruby._io_port.slave for (i, cpu) in enumerate(test_sys.cpu): # # Tie the cpu ports to the correct ruby system ports # cpu.clk_domain = test_sys.cpu_clk_domain cpu.createThreads() cpu.createInterruptController() cpu.icache_port = test_sys.ruby._cpu_ports[i].slave cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave if buildEnv['TARGET_ISA'] == "x86": cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.interrupts.pio = test_sys.ruby._cpu_ports[i].master cpu.interrupts.int_master = test_sys.ruby._cpu_ports[i].slave cpu.interrupts.int_slave = test_sys.ruby._cpu_ports[i].master else: if options.caches or options.l2cache: # By default the IOCache runs at the system clock test_sys.iocache = IOCache(addr_ranges = test_sys.mem_ranges) test_sys.iocache.cpu_side = test_sys.iobus.master test_sys.iocache.mem_side = test_sys.membus.slave else: test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges) test_sys.iobridge.slave = test_sys.iobus.master test_sys.iobridge.master = test_sys.membus.slave # Sanity check if options.fastmem: if TestCPUClass != AtomicSimpleCPU: fatal("Fastmem can only be used with atomic CPU!") if (options.caches or options.l2cache): fatal("You cannot use fastmem in combination with caches!") for i in xrange(np): if options.fastmem: test_sys.cpu[i].fastmem = True if options.checker: test_sys.cpu[i].addCheckerCpu() test_sys.cpu[i].createThreads() CacheConfig.config_cache(options, test_sys) MemConfig.config_mem(options, test_sys) return test_sys
def addNoISAOptions(parser): parser.add_option("-n", "--num-cpus", type="int", default=1) parser.add_option("--sys-voltage", action="store", type="string", default='1.0V', help="""Top-level voltage for blocks running at system power supply""") parser.add_option("--sys-clock", action="store", type="string", default='1GHz', help="""Top-level clock for blocks running at system speed""") # Memory Options parser.add_option("--list-mem-types", action="callback", callback=_listMemTypes, help="List available memory types") parser.add_option("--mem-type", type="choice", default="DDR3_1600_8x8", choices=MemConfig.mem_names(), help="type of memory to use") parser.add_option("--mem-channels", type="int", default=1, help="number of memory channels") parser.add_option("--mem-ranks", type="int", default=None, help="number of memory ranks per channel") parser.add_option("--mem-size", action="store", type="string", default="128MB", help="Specify the physical memory size (single memory)") parser.add_option("--memchecker", action="store_true") # Cache Options parser.add_option("--external-memory-system", type="string", help="use external ports of this port_type for caches") parser.add_option("--tlm-memory", type="string", help="use external port for SystemC TLM cosimulation") parser.add_option("--caches", action="store_true") parser.add_option("--l2cache", action="store_true") parser.add_option("--enable_prefetchers", action="store_true") parser.add_option("--prefetcher-type", type="choice", default="tagged", choices=CacheConfig.prefetcher_names(), help="type of cache prefetcher to use") parser.add_option("--num-dirs", type="int", default=1) parser.add_option("--num-l2caches", type="int", default=1) parser.add_option("--num-l3caches", type="int", default=1) parser.add_option("--l1d_size", type="string", default="64kB") parser.add_option("--l1i_size", type="string", default="32kB") parser.add_option("--l2_size", type="string", default="2MB") parser.add_option("--l3_size", type="string", default="16MB") parser.add_option("--l1d_assoc", type="int", default=2) parser.add_option("--l1i_assoc", type="int", default=2) parser.add_option("--l2_assoc", type="int", default=8) parser.add_option("--l3_assoc", type="int", default=16) parser.add_option("--l1d_hit_latency", type="int", default="2") parser.add_option("--l1i_hit_latency", type="int", default="2") parser.add_option("--l2_hit_latency", type="int", default="20") parser.add_option("--cacheline_size", type="int", default=64) parser.add_option("--xbar_width", type="int", default=8) parser.add_option( "--record-dram-traffic", action="store_true", help="Record DRAM memory traffic packets to file (requires protobuf).") # Aladdin Options parser.add_option("--accel_cfg_file", default=None, help="Aladdin accelerator configuration file.") # Enable Ruby parser.add_option("--ruby", action="store_true") # Run duration options parser.add_option("-m", "--abs-max-tick", type="int", default=m5.MaxTick, metavar="TICKS", help="Run to absolute simulated tick " "specified including ticks from a restored checkpoint") parser.add_option("--rel-max-tick", type="int", default=None, metavar="TICKS", help="Simulate for specified number of" " ticks relative to the simulation start tick (e.g. if " "restoring a checkpoint)") parser.add_option("--maxtime", type="float", default=None, help="Run to the specified absolute simulated time in " "seconds")
def build_test_system(np): cmdline = cmd_line_template() if buildEnv['TARGET_ISA'] == "alpha": test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby, cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "mips": test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0], cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "sparc": test_sys = makeSparcSystem(test_mem_mode, bm[0], cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "x86": test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0], options.ruby, cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "arm": test_sys = makeArmSystem(test_mem_mode, options.machine_type, options.num_cpus, bm[0], options.dtb_filename, bare_metal=options.bare_metal, cmdline=cmdline, external_memory=options.external_memory_system) if options.enable_context_switch_stats_dump: test_sys.enable_context_switch_stats_dump = True else: fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA']) # Set the cache line size for the entire system test_sys.cache_line_size = options.cacheline_size # Create a top-level voltage domain test_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage) # Create a source clock for the system and set the clock period test_sys.clk_domain = SrcClockDomain(clock = options.sys_clock, voltage_domain = test_sys.voltage_domain) # Create a CPU voltage domain test_sys.cpu_voltage_domain = VoltageDomain() # Create a source clock for the CPUs and set the clock period test_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock, voltage_domain = test_sys.cpu_voltage_domain) if options.kernel is not None: test_sys.kernel = binary(options.kernel) if options.script is not None: test_sys.readfile = options.script print "fs.py 131#: {}".format(test_sys.readfile) else: print "fs.py 133#: options.script is None." if options.lpae: test_sys.have_lpae = True if options.virtualisation: test_sys.have_virtualization = True test_sys.init_param = options.init_param # For now, assign all the CPUs to the same clock domain test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i) for i in xrange(np)] if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass): test_sys.vm = KvmVM() test_sys.gpu = NoMaliGpu( gpu_type="T760", ver_maj=0, ver_min=0, ver_status=1, int_job=118, int_mmu=119, int_gpu=120, pio_addr=0x2b400000, pio=test_sys.membus.master) if options.ruby: # Check for timing mode because ruby does not support atomic accesses if not (options.cpu_type == "detailed" or options.cpu_type == "timing"): print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!" sys.exit(1) Ruby.create_system(options, True, test_sys, test_sys.iobus, test_sys._dma_ports) # Create a seperate clock domain for Ruby test_sys.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock, voltage_domain = test_sys.voltage_domain) # Connect the ruby io port to the PIO bus, # assuming that there is just one such port. test_sys.iobus.master = test_sys.ruby._io_port.slave for (i, cpu) in enumerate(test_sys.cpu): # # Tie the cpu ports to the correct ruby system ports # cpu.clk_domain = test_sys.cpu_clk_domain cpu.createThreads() cpu.createInterruptController() cpu.icache_port = test_sys.ruby._cpu_ports[i].slave cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave if buildEnv['TARGET_ISA'] == "x86": cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.interrupts[0].pio = test_sys.ruby._cpu_ports[i].master cpu.interrupts[0].int_master = test_sys.ruby._cpu_ports[i].slave cpu.interrupts[0].int_slave = test_sys.ruby._cpu_ports[i].master else: if options.caches or options.l2cache: # By default the IOCache runs at the system clock test_sys.iocache = IOCache(addr_ranges = test_sys.mem_ranges) test_sys.iocache.cpu_side = test_sys.iobus.master test_sys.iocache.mem_side = test_sys.membus.slave elif not options.external_memory_system: test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges) test_sys.iobridge.slave = test_sys.iobus.master test_sys.iobridge.master = test_sys.membus.slave # Sanity check if options.fastmem: if TestCPUClass != AtomicSimpleCPU: fatal("Fastmem can only be used with atomic CPU!") if (options.caches or options.l2cache): fatal("You cannot use fastmem in combination with caches!") if options.simpoint_profile: if not options.fastmem: # Atomic CPU checked with fastmem option already fatal("SimPoint generation should be done with atomic cpu and fastmem") if np > 1: fatal("SimPoint generation not supported with more than one CPUs") for i in xrange(np): if options.fastmem: test_sys.cpu[i].fastmem = True if options.simpoint_profile: test_sys.cpu[i].addSimPointProbe(options.simpoint_interval) if options.checker: test_sys.cpu[i].addCheckerCpu() test_sys.cpu[i].createThreads() # If elastic tracing is enabled when not restoring from checkpoint and # when not fast forwarding using the atomic cpu, then check that the # TestCPUClass is DerivO3CPU or inherits from DerivO3CPU. If the check # passes then attach the elastic trace probe. # If restoring from checkpoint or fast forwarding, the code that does this for # FutureCPUClass is in the Simulation module. If the check passes then the # elastic trace probe is attached to the switch CPUs. if options.elastic_trace_en and options.checkpoint_restore == None and \ not options.fast_forward: CpuConfig.config_etrace(TestCPUClass, test_sys.cpu, options) CacheConfig.config_cache(options, test_sys) MemConfig.config_mem(options, test_sys) return test_sys
rsc = num_rsc TestCPUClass.issueWidth = rsc # Default: 8 TestCPUClass.fetchWidth = rsc # Default: 8 TestCPUClass.decodeWidth = rsc # Default: 8 TestCPUClass.dispatchWidth = rsc # Default: 8 TestCPUClass.renameWidth = rsc # Default: 8 TestCPUClass.issueWidth = rsc # Default: 8 TestCPUClass.commitWidth = rsc # Default: 8 TestCPUClass.wbWidth = rsc # Default: 8 TestCPUClass.RASSize = rsc * 2 # Default: 16 TestCPUClass.LQEntries = rsc * 4 # Default: 32 TestCPUClass.SQEntries = rsc * 4 # Default: 32 TestCPUClass.numIQEntries = rsc * 8 # Default: 64 TestCPUClass.numROBEntries = rsc * 24 # Default: 192 CacheConfig.new_config_cache(options, test_sys, num_bce, num_rsc) if options.caches or options.l2cache: if bm[0]: mem_size = bm[0].mem() else: mem_size = SysConfig().mem() # For x86, we need to poke a hole for interrupt messages to get back to the # CPU. These use a portion of the physical address space which has a # non-zero prefix in the top nibble. Normal memory accesses have a 0 # prefix. if buildEnv['TARGET_ISA'] == 'x86': test_sys.bridge.filter_ranges_a=[AddrRange(0, Addr.max >> 4)] else: test_sys.bridge.filter_ranges_a=[AddrRange(0, Addr.max)] test_sys.bridge.filter_ranges_b=[AddrRange(mem_size)]
def build_test_system(np): cmdline = cmd_line_template() if buildEnv['TARGET_ISA'] == "alpha": test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby, cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "mips": test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0], cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "sparc": test_sys = makeSparcSystem(test_mem_mode, bm[0], cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "x86": test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0], options.ruby, options, cmdline=cmdline) elif buildEnv['TARGET_ISA'] == "arm": test_sys = makeArmSystem(test_mem_mode, options.machine_type, options.num_cpus, bm[0], options.dtb_filename, options, bare_metal=options.bare_metal, cmdline=cmdline) if options.enable_context_switch_stats_dump: test_sys.enable_context_switch_stats_dump = True else: fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA']) # Set the cache line size for the entire system test_sys.cache_line_size = options.cacheline_size # Create a top-level voltage domain test_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage) # Create a source clock for the system and set the clock period test_sys.clk_domain = SrcClockDomain(clock = options.sys_clock, voltage_domain = test_sys.voltage_domain) # Create a clk running contantly at 3GHz for L2 test_sys.clk_domain_const = SrcClockDomain(clock = ["3GHz"], voltage_domain = test_sys.voltage_domain) # Create a CPU voltage domain #test_sys.cpu_voltage_domain = VoltageDomain(voltage = ['1V','0.9V','0.8V']) test_sys.cpu_voltage_domain = VoltageDomain() # lokeshjindal15 # Create a source clock for the CPUs and set the clock period # vailable frequency steps: 3.10 GHz, 3.10 GHz, 2.90 GHz, 2.80 GHz, 2.60 GHz, 2.40 GHz, 2.30 GHz, 2.10 GHz, 1.90 GHz, 1.80 GHz, 1.60 GHz, 1.50 GHz, 1.30 GHz, 1.10 GHz, 1000 MHz, 800 MHz haswell_pstates = ["3.10GHz", "2.90GHz", "2.80GHz", "2.60GHz", "2.40GHz", "2.30GHz", "2.10GHz", "1.90GHz", "1.80GHz", "1.60GHz", "1.50GHz", "1.30GHz", "1.10GHz", "1000MHz", "800MHz"] test_sys.cpu_clk_domain = SrcClockDomain(clock = haswell_pstates, voltage_domain = test_sys.cpu_voltage_domain, domain_id = 0) test_sys.cpu_clk_domain1 = SrcClockDomain(clock = haswell_pstates, voltage_domain = test_sys.cpu_voltage_domain, domain_id = 1) test_sys.cpu_clk_domain2 = SrcClockDomain(clock = haswell_pstates, voltage_domain = test_sys.cpu_voltage_domain, domain_id = 2) test_sys.cpu_clk_domain3 = SrcClockDomain(clock = haswell_pstates, voltage_domain = test_sys.cpu_voltage_domain, domain_id = 3) test_sys.dvfs_handler.transition_latency = '40us' test_sys.dvfs_handler.domains = [test_sys.cpu_clk_domain, test_sys.cpu_clk_domain1, test_sys.cpu_clk_domain2, test_sys.cpu_clk_domain3] test_sys.dvfs_handler.enable = 1 if options.kernel is not None: test_sys.kernel = binary(options.kernel) if options.script is not None: test_sys.readfile = options.script if options.lpae: test_sys.have_lpae = True if options.virtualisation: test_sys.have_virtualization = True test_sys.init_param = options.init_param # For now, assign all the CPUs to the same clock domain test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=0, socket_id=0), TestCPUClass(clk_domain=test_sys.cpu_clk_domain1, cpu_id=1, socket_id=1), TestCPUClass(clk_domain=test_sys.cpu_clk_domain2, cpu_id=2, socket_id=2), TestCPUClass(clk_domain=test_sys.cpu_clk_domain3, cpu_id=3, socket_id=3)] if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass): test_sys.vm = KvmVM() if options.ruby: # Check for timing mode because ruby does not support atomic accesses if not (options.cpu_type == "detailed" or options.cpu_type == "timing"): print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!" sys.exit(1) Ruby.create_system(options, True, test_sys, test_sys.iobus, test_sys._dma_ports) # Create a seperate clock domain for Ruby test_sys.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock, voltage_domain = test_sys.voltage_domain) # Connect the ruby io port to the PIO bus, # assuming that there is just one such port. test_sys.iobus.master = test_sys.ruby._io_port.slave for (i, cpu) in enumerate(test_sys.cpu): # # Tie the cpu ports to the correct ruby system ports # cpu.clk_domain = test_sys.cpu_clk_domain cpu.createThreads() cpu.createInterruptController() cpu.icache_port = test_sys.ruby._cpu_ports[i].slave cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave if buildEnv['TARGET_ISA'] == "x86": cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.interrupts.pio = test_sys.ruby._cpu_ports[i].master cpu.interrupts.int_master = test_sys.ruby._cpu_ports[i].slave cpu.interrupts.int_slave = test_sys.ruby._cpu_ports[i].master else: if options.caches or options.l2cache: # By default the IOCache runs at the system clock test_sys.iocache = IOCache(addr_ranges = test_sys.mem_ranges) test_sys.iocache.cpu_side = test_sys.iobus.master test_sys.iocache.mem_side = test_sys.membus.slave else: test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges) test_sys.iobridge.slave = test_sys.iobus.master test_sys.iobridge.master = test_sys.membus.slave # Sanity check if options.fastmem: if TestCPUClass != AtomicSimpleCPU: fatal("Fastmem can only be used with atomic CPU!") if (options.caches or options.l2cache): fatal("You cannot use fastmem in combination with caches!") if options.simpoint_profile: if not options.fastmem: # Atomic CPU checked with fastmem option already fatal("SimPoint generation should be done with atomic cpu and fastmem") if np > 1: fatal("SimPoint generation not supported with more than one CPUs") for i in xrange(np): if options.fastmem: test_sys.cpu[i].fastmem = True if options.simpoint_profile: test_sys.cpu[i].addSimPointProbe(options.simpoint_interval) if options.checker: test_sys.cpu[i].addCheckerCpu() test_sys.cpu[i].createThreads() CacheConfig.config_cache(options, test_sys) MemConfig.config_mem(options, test_sys) return test_sys
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options) CPUClass.clock = '2GHz' CPUClass.numThreads = numThreads; np = options.num_cpus system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)], physmem = PhysicalMemory(range=AddrRange("512MB")), membus = Bus(), mem_mode = test_mem_mode) system.physmem.port = system.membus.port CacheConfig.config_cache(options, system) for i in xrange(np): system.cpu[i].workload = process if options.fastmem: system.cpu[0].physmem_port = system.physmem.port root = Root(system = system) what_val_num = options.whatval if (what_val_num == 65): what_type = "All0" elif (what_val_num == 64):
rsc = num_rsc TestCPUClass.issueWidth = rsc # Default: 8 TestCPUClass.fetchWidth = rsc # Default: 8 TestCPUClass.decodeWidth = rsc # Default: 8 TestCPUClass.dispatchWidth = rsc # Default: 8 TestCPUClass.renameWidth = rsc # Default: 8 TestCPUClass.issueWidth = rsc # Default: 8 TestCPUClass.commitWidth = rsc # Default: 8 TestCPUClass.wbWidth = rsc # Default: 8 TestCPUClass.RASSize = rsc * 2 # Default: 16 TestCPUClass.LQEntries = rsc * 4 # Default: 32 TestCPUClass.SQEntries = rsc * 4 # Default: 32 TestCPUClass.numIQEntries = rsc * 8 # Default: 64 TestCPUClass.numROBEntries = rsc * 24 # Default: 192 CacheConfig.new_config_cache(options, test_sys, num_bce, num_rsc) if options.caches or options.l2cache: if bm[0]: mem_size = bm[0].mem() else: mem_size = SysConfig().mem() # For x86, we need to poke a hole for interrupt messages to get back to the # CPU. These use a portion of the physical address space which has a # non-zero prefix in the top nibble. Normal memory accesses have a 0 # prefix. if buildEnv['TARGET_ISA'] == 'x86': test_sys.bridge.filter_ranges_a = [AddrRange(0, Addr.max >> 4)] else: test_sys.bridge.filter_ranges_a = [AddrRange(0, Addr.max)] test_sys.bridge.filter_ranges_b = [AddrRange(mem_size)]
elif buildEnv['TARGET_ISA'] == "arm": test_sys = makeArmSystem(test_mem_mode, options.machine_type, bm[0], bare_metal=options.bare_metal) else: fatal("incapable of building non-alpha or non-sparc full system!") if options.kernel is not None: test_sys.kernel = binary(options.kernel) if options.script is not None: test_sys.readfile = options.script test_sys.cpu = [TestCPUClass(cpu_id=i) for i in xrange(np)] CacheConfig.config_cache(options, test_sys) if options.caches or options.l2cache: if bm[0]: mem_size = bm[0].mem() else: mem_size = SysConfig().mem() # For x86, we need to poke a hole for interrupt messages to get back to the # CPU. These use a portion of the physical address space which has a # non-zero prefix in the top nibble. Normal memory accesses have a 0 # prefix. if buildEnv['TARGET_ISA'] == 'x86': test_sys.bridge.filter_ranges_a=[AddrRange(0, Addr.max >> 4)] else: test_sys.bridge.filter_ranges_a=[AddrRange(0, Addr.max)] test_sys.bridge.filter_ranges_b=[AddrRange(mem_size)]
def build_test_system(np): if buildEnv['TARGET_ISA'] == "alpha": test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby) elif buildEnv['TARGET_ISA'] == "mips": test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0]) elif buildEnv['TARGET_ISA'] == "sparc": test_sys = makeSparcSystem(test_mem_mode, bm[0]) elif buildEnv['TARGET_ISA'] == "x86": test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0], options.ruby) elif buildEnv['TARGET_ISA'] == "arm": test_sys = makeArmSystem(test_mem_mode, options.machine_type, bm[0], options.dtb_filename, bare_metal=options.bare_metal) if options.enable_context_switch_stats_dump: test_sys.enable_context_switch_stats_dump = True else: fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA']) # Set the cache line size for the entire system test_sys.cache_line_size = options.cacheline_size # Create a top-level voltage domain test_sys.voltage_domain = VoltageDomain(voltage=options.sys_voltage) # Create a source clock for the system and set the clock period test_sys.clk_domain = SrcClockDomain( clock=options.sys_clock, voltage_domain=test_sys.voltage_domain) # Create a CPU voltage domain test_sys.cpu_voltage_domain = VoltageDomain() # Create a source clock for the CPUs and set the clock period test_sys.cpu_clk_domain = SrcClockDomain( clock=options.cpu_clock, voltage_domain=test_sys.cpu_voltage_domain) #my frequencies # test_sys.cpufreq3GHz = SrcClockDomain(clock = '3GHz', # voltage_domain = VoltageDomain(voltage = '1.2V')) ####################################################################################################### ####################################################################################################### test_sys.cpufreq2GHz = SrcClockDomain( clock='2GHz', voltage_domain=VoltageDomain(voltage='1.1V')) test_sys.cpufreq1900MHz = SrcClockDomain( clock='1.9GHz', voltage_domain=VoltageDomain(voltage='1.03V')) test_sys.cpufreq1800MHz = SrcClockDomain( clock='1.8GHz', voltage_domain=VoltageDomain(voltage='0.95V')) test_sys.cpufreq1700MHz = SrcClockDomain( clock='1.7GHz', voltage_domain=VoltageDomain(voltage='0.92V')) test_sys.cpufreq1600MHz = SrcClockDomain( clock='1.6GHz', voltage_domain=VoltageDomain(voltage='0.88V')) test_sys.cpufreq1500MHz = SrcClockDomain( clock='1.5GHz', voltage_domain=VoltageDomain(voltage='0.82V')) test_sys.cpufreq1400MHz = SrcClockDomain( clock='1.4GHz', voltage_domain=VoltageDomain(voltage='0.8V')) test_sys.cpufreq1300MHz = SrcClockDomain( clock='1.3GHz', voltage_domain=VoltageDomain(voltage='0.8V')) test_sys.cpufreq1200MHz = SrcClockDomain( clock='1.2GHz', voltage_domain=VoltageDomain(voltage='0.8V')) test_sys.cpufreq1100MHz = SrcClockDomain( clock='1.1GHz', voltage_domain=VoltageDomain(voltage='0.8V')) test_sys.cpufreq1GHz = SrcClockDomain( clock='1GHz', voltage_domain=VoltageDomain(voltage='0.8V')) test_sys.cpufreq800MHz = SrcClockDomain( clock='800MHz', voltage_domain=VoltageDomain(voltage='0.8V')) test_sys.cpufreq500MHz = SrcClockDomain( clock='500MHz', voltage_domain=VoltageDomain(voltage='0.8V')) ####################################################################################################### ####################################################################################################### if options.kernel is not None: test_sys.kernel = binary(options.kernel) if options.script is not None: test_sys.readfile = options.script if options.lpae: test_sys.have_lpae = True if options.virtualisation: test_sys.have_virtualization = True test_sys.init_param = options.init_param # For now, assign all the CPUs to the same clock domain test_sys.cpu = [ TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i) for i in xrange(np) ] ####################################################################################################### ####################################################################################################### TIMING_CPU = 0 OOO_CPU = 1 #my cpulists########################################################################################### ####################################################################################################### ####################################################################################################### if TIMING_CPU: test_sys.FREQ_2GHz = [ TimingSimpleCPU(switched_out=True, clk_domain=test_sys.cpufreq2GHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1900MHz = [ TimingSimpleCPU(switched_out=True, clk_domain=test_sys.cpufreq1900MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1800MHz = [ TimingSimpleCPU(switched_out=True, clk_domain=test_sys.cpufreq1900MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1700MHz = [ TimingSimpleCPU(switched_out=True, clk_domain=test_sys.cpufreq1700MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1600MHz = [ TimingSimpleCPU(switched_out=True, clk_domain=test_sys.cpufreq1600MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1500MHz = [ TimingSimpleCPU(switched_out=True, clk_domain=test_sys.cpufreq1500MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1400MHz = [ TimingSimpleCPU(switched_out=True, clk_domain=test_sys.cpufreq1400MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1300MHz = [ TimingSimpleCPU(switched_out=True, clk_domain=test_sys.cpufreq1300MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1200MHz = [ TimingSimpleCPU(switched_out=True, clk_domain=test_sys.cpufreq1200MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1100MHz = [ TimingSimpleCPU(switched_out=True, clk_domain=test_sys.cpufreq1100MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1GHz = [ TimingSimpleCPU(switched_out=True, clk_domain=test_sys.cpufreq1GHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_800MHz = [ TimingSimpleCPU(switched_out=True, clk_domain=test_sys.cpufreq800MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_500MHz = [ TimingSimpleCPU(switched_out=True, clk_domain=test_sys.cpufreq500MHz, cpu_id=i) for i in xrange(np) ] ####################################################################################################### ####################################################################################################### if OOO_CPU: test_sys.FREQ_2GHz = [ DerivO3CPU(switched_out=True, clk_domain=test_sys.cpufreq2GHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1900MHz = [ DerivO3CPU(switched_out=True, clk_domain=test_sys.cpufreq1900MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1800MHz = [ DerivO3CPU(switched_out=True, clk_domain=test_sys.cpufreq1900MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1700MHz = [ DerivO3CPU(switched_out=True, clk_domain=test_sys.cpufreq1700MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1600MHz = [ DerivO3CPU(switched_out=True, clk_domain=test_sys.cpufreq1600MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1500MHz = [ DerivO3CPU(switched_out=True, clk_domain=test_sys.cpufreq1500MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1400MHz = [ DerivO3CPU(switched_out=True, clk_domain=test_sys.cpufreq1400MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1300MHz = [ DerivO3CPU(switched_out=True, clk_domain=test_sys.cpufreq1300MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1200MHz = [ DerivO3CPU(switched_out=True, clk_domain=test_sys.cpufreq1200MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1100MHz = [ DerivO3CPU(switched_out=True, clk_domain=test_sys.cpufreq1100MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_1GHz = [ DerivO3CPU(switched_out=True, clk_domain=test_sys.cpufreq1GHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_800MHz = [ DerivO3CPU(switched_out=True, clk_domain=test_sys.cpufreq800MHz, cpu_id=i) for i in xrange(np) ] test_sys.FREQ_500MHz = [ DerivO3CPU(switched_out=True, clk_domain=test_sys.cpufreq500MHz, cpu_id=i) for i in xrange(np) ] ####################################################################################################### ####################################################################################################### # test_sys.FREQ_2GHz = [DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq2GHz, cpu_id=0), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq2GHz, cpu_id=1), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq2GHz, cpu_id=2), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq2GHz, cpu_id=3)] # test_sys.FREQ_1900MHz = [DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq1900MHz, cpu_id=0), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1900MHz, cpu_id=1), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1900MHz, cpu_id=2), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1900MHz, cpu_id=3)] # test_sys.FREQ_1800MHz = [DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq1800MHz, cpu_id=0), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1800MHz, cpu_id=1), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1800MHz, cpu_id=2), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1800MHz, cpu_id=3)] # test_sys.FREQ_1700MHz = [DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq1700MHz, cpu_id=0), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1700MHz, cpu_id=1), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1700MHz, cpu_id=2), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1700MHz, cpu_id=3)] # test_sys.FREQ_1600MHz = [DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq1600MHz, cpu_id=0), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1600MHz, cpu_id=1), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1600MHz, cpu_id=2), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1600MHz, cpu_id=3)] # test_sys.FREQ_1500MHz = [DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq1500MHz, cpu_id=0), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1500MHz, cpu_id=1), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1500MHz, cpu_id=2), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1500MHz, cpu_id=3)] # test_sys.FREQ_1400MHz = [DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq1400MHz, cpu_id=0), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1400MHz, cpu_id=1), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1400MHz, cpu_id=2), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1400MHz, cpu_id=3)] # # test_sys.FREQ_1300MHz = [DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq1300MHz, cpu_id=0), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1300MHz, cpu_id=1), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1300MHz, cpu_id=2), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1300MHz, cpu_id=3)] # test_sys.FREQ_1200MHz = [DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq1200MHz, cpu_id=0), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1200MHz, cpu_id=1), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1200MHz, cpu_id=2), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1200MHz, cpu_id=3)] # test_sys.FREQ_1100MHz = [DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq1100MHz, cpu_id=0), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1100MHz, cpu_id=1), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1100MHz, cpu_id=2), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1100MHz, cpu_id=3)] # test_sys.FREQ_1GHz = [DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq1GHz, cpu_id=0), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1GHz, cpu_id=1), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1GHz, cpu_id=2), # DerivO3CPU (switched_out = True,clk_domain=test_sys.cpufreq1GHz, cpu_id=3)] # test_sys.FREQ_800MHz= [DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq800MHz, cpu_id=0), # DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq800MHz, cpu_id=1), # DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq800MHz, cpu_id=2), # DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq800MHz, cpu_id=3)] # test_sys.FREQ_500MHz= [DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq500MHz, cpu_id=0), # DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq500MHz, cpu_id=1), # DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq500MHz, cpu_id=2), # DerivO3CPU(switched_out = True,clk_domain=test_sys.cpufreq500MHz, cpu_id=3)] ####################################################################################################### ####################################################################################################### # test_sys.FREQ_2GHz = [TimingSimpleCPU(switched_out = True,clk_domain=test_sys.cpufreq2GHz, cpu_id=0), # TimingSimpleCPU(switched_out = True,clk_domain=test_sys.cpufreq2GHz, cpu_id=1), # TimingSimpleCPU(switched_out = True,clk_domain=test_sys.cpufreq2GHz, cpu_id=2), # TimingSimpleCPU(switched_out = True,clk_domain=test_sys.cpufreq2GHz, cpu_id=3)] # test_sys.FREQ_1GHz = [TimingSimpleCPU(switched_out = True,clk_domain=test_sys.cpufreq1GHz, cpu_id=0), # TimingSimpleCPU(switched_out = True,clk_domain=test_sys.cpufreq1GHz, cpu_id=1), # TimingSimpleCPU(switched_out = True,clk_domain=test_sys.cpufreq1GHz, cpu_id=2), # TimingSimpleCPU(switched_out = True,clk_domain=test_sys.cpufreq1GHz, cpu_id=3)] # test_sys.FREQ_500MHz= [TimingSimpleCPU(switched_out = True,clk_domain=test_sys.cpufreq500MHz, cpu_id=0), # TimingSimpleCPU(switched_out = True,clk_domain=test_sys.cpufreq500MHz, cpu_id=1), # TimingSimpleCPU(switched_out = True,clk_domain=test_sys.cpufreq500MHz, cpu_id=2), # TimingSimpleCPU(switched_out = True,clk_domain=test_sys.cpufreq500MHz, cpu_id=3)] if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass): test_sys.vm = KvmVM() if options.ruby: # Check for timing mode because ruby does not support atomic accesses if not (options.cpu_type == "detailed" or options.cpu_type == "timing"): print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!" sys.exit(1) Ruby.create_system(options, test_sys, test_sys.iobus, test_sys._dma_ports) #new code # system.system_port = system.ruby._sys_port_proxy # Create a seperate clock domain for Ruby test_sys.ruby.clk_domain = SrcClockDomain( clock=options.ruby_clock, voltage_domain=test_sys.voltage_domain) for (i, cpu) in enumerate(test_sys.cpu): # # Tie the cpu ports to the correct ruby system ports # cpu.clk_domain = test_sys.cpu_clk_domain cpu.createThreads() cpu.createInterruptController() cpu.icache_port = test_sys.ruby._cpu_ports[i].slave cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave if buildEnv['TARGET_ISA'] == "x86": cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave cpu.interrupts.pio = test_sys.ruby._cpu_ports[i].master cpu.interrupts.int_master = test_sys.ruby._cpu_ports[i].slave cpu.interrupts.int_slave = test_sys.ruby._cpu_ports[i].master test_sys.ruby._cpu_ports[i].access_phys_mem = True for i in range(0, np): test_sys.FREQ_2GHz[i].clk_domain = test_sys.cpufreq2GHz test_sys.FREQ_1900MHz[i].clk_domain = test_sys.cpufreq1900MHz test_sys.FREQ_1800MHz[i].clk_domain = test_sys.cpufreq1800MHz test_sys.FREQ_1700MHz[i].clk_domain = test_sys.cpufreq1700MHz test_sys.FREQ_1600MHz[i].clk_domain = test_sys.cpufreq1600MHz test_sys.FREQ_1500MHz[i].clk_domain = test_sys.cpufreq1500MHz test_sys.FREQ_1400MHz[i].clk_domain = test_sys.cpufreq1400MHz test_sys.FREQ_1300MHz[i].clk_domain = test_sys.cpufreq1300MHz test_sys.FREQ_1200MHz[i].clk_domain = test_sys.cpufreq1200MHz test_sys.FREQ_1100MHz[i].clk_domain = test_sys.cpufreq1100MHz test_sys.FREQ_1GHz[i].clk_domain = test_sys.cpufreq1GHz test_sys.FREQ_800MHz[i].clk_domain = test_sys.cpufreq800MHz test_sys.FREQ_500MHz[i].clk_domain = test_sys.cpufreq500MHz # Create the appropriate memory controllers # and connect them to the IO bus test_sys.mem_ctrls = [ TestMemClass(range=r) for r in test_sys.mem_ranges ] for i in xrange(len(test_sys.mem_ctrls)): test_sys.mem_ctrls[i].port = test_sys.iobus.master else: if options.caches or options.l2cache: # By default the IOCache runs at the system clock test_sys.iocache = IOCache(addr_ranges=test_sys.mem_ranges) test_sys.iocache.cpu_side = test_sys.iobus.master test_sys.iocache.mem_side = test_sys.membus.slave else: test_sys.iobridge = Bridge(delay='50ns', ranges=test_sys.mem_ranges) test_sys.iobridge.slave = test_sys.iobus.master test_sys.iobridge.master = test_sys.membus.slave # Sanity check if options.fastmem: if TestCPUClass != AtomicSimpleCPU: fatal("Fastmem can only be used with atomic CPU!") if (options.caches or options.l2cache): fatal("You cannot use fastmem in combination with caches!") for i in xrange(np): if options.fastmem: test_sys.cpu[i].fastmem = True if options.checker: test_sys.cpu[i].addCheckerCpu() test_sys.cpu[i].createThreads() CacheConfig.config_cache(options, test_sys) MemConfig.config_mem(options, test_sys) # test_sys.mycpu=DerivO3CPU(switched_out = True) #used in non ruby test_sys.mycpu1=AtomicSimpleCPU(switched_out = True) # test_sys.mycpu2=DerivO3CPU(switched_out = True) # test_sys.mycpu3=TimingSimpleCPU(switched_out = True) return test_sys