Esempio n. 1
0
def create_sim(cwd, simtool, gui, defines):
    sim = Simulator(name=simtool, gui=gui, cwd=cwd)
    sim.incdirs += ["../src/tb", "../src/rtl", cwd]
    sim.sources += ["../src/rtl/sqrt.v", "../src/tb/tb_sqrt.sv"]
    sim.defines += defines
    sim.top = "tb_sqrt"
    return sim
Esempio n. 2
0
def play():
    print('Move ball with WASD. Add mines with scroll wheel.')
    print('Press P to pause/unpause or ESC to exit.')

    random.seed(314159)

    world = get_simple_world()
    window = Window(int(world.width), int(world.height))
    mine_input = get_mine_input(world, window)
    ball_input = BallPlayer(window)
    sim = Simulator(world, window, True, False, 60, 1.0 / 200.0, 1.0 / 4.0)
    sim.run(mine_input, (ball_input, ))
Esempio n. 3
0
class TestAssurance:
    def __init__(self, owner=ALICE):
        self.owner = owner
        self.sim = Simulator(founders)
        # The eth_ent.
        code = compile_serpent('assurance_ent.se')
        self.eth_ent = self.sim.load_contract(owner, code) # Alice is recipient.

        assert self.storage(0) == int(owner.address, 16)
        assert self.storage(1) == int(owner.address, 16)
        assert self.storage(2) == 10
        assert self.storage(3) == 20000
        assert self.storage(4) == 5

    def tx(self, fr, v=0, d=[]):
        return self.sim.tx(fr, self.eth_ent, v, d)

    def storage(self, index):
        return self.sim.get_storage_data(self.eth_ent, index)

    def status(self):
        return "open"

    def expect_funded(self):
        print("NOTE: think it funded.")

    def expect_refunded(self):
        for v in founders:  # Check if money back modulo gas.
            assert INITIAL - founders[v] < max_gas_cost #WRONG
        print("NOTE: think it refunded.")
        
    def contribute(self, who, v):  # Contribute.
        i = self.storage(4)
        assert self.tx(who, v) == []
        if self.status() == "open":
            assert self.storage(4) == i + 2
            assert self.storage(i) == int(who.address, 16)
            assert self.storage(i + 1) == v
        elif self.status() == "end_funded":
            self.expect_funded()
        else:
            self.expect_refunded()
        # TODO check increase in balance

    def refund(self):
        assert self.tx(self.owner, 0, ["refund"]) == []
        if self.status() == "open" or self.status() == "refunded":
            self.expect_refunded()
        else:
            self.expect_funded()
Esempio n. 4
0
def test_evolve_sim():
    particles = [Particle( 0.3, 0.5, +1),
                Particle( 0.0, -0.5, -1),
                Particle(-0.1, -0.4, +3)]
    sim = Simulator(particles)
    sim.evolve(0.1)
    p0, p1, p2 = particles

    def fequal(a, b, eps=1e-5):
        return abs(a - b) < eps

    assert fequal(p0.x, 0.210269)
    assert fequal(p0.y, 0.543863)
    assert fequal(p1.x, -0.099334)
    assert fequal(p1.y, -0.490034)
    assert fequal(p2.x, 0.191358)
    assert fequal(p2.y, -0.365227)
Esempio n. 5
0
def test(tmpdir,
         bridge,
         reset,
         simtool,
         defines=[],
         gui=False,
         pytest_run=True):
    # create sim
    tb_dir = path_join(TEST_DIR, 'test_lb_bridge')
    beh_dir = path_join(TEST_DIR, 'beh')
    sim = Simulator(name=simtool, gui=gui, cwd=tmpdir)
    sim.incdirs += [tmpdir, tb_dir, beh_dir]
    sim.sources += [path_join(tb_dir, 'tb.sv')]
    sim.sources += beh_dir.glob('*.sv')
    sim.defines += defines
    sim.top = 'tb'
    sim.setup()
    # prepare test
    dut_src, dut_config = gen_bridge(tmpdir, bridge, reset)
    sim.sources += [dut_src]
    sim.defines += [
        'DUT_DATA_W=%d' % dut_config['data_width'].value,
        'DUT_ADDR_W=%d' % dut_config['address_width'].value,
        'DUT_%s' % bridge.upper(),
        'RESET_ACTIVE=%d' % ('pos' in reset),
    ]
    # run sim
    sim.run()
    if pytest_run:
        assert sim.is_passed
Esempio n. 6
0
 def setup_class(cls):
     logging.disable(
         logging.INFO
     )  # disable overzealous DEBUG logging of pyethereum.processblock
     cls.code = compile_serpent('examples/mutuala.se')
     cls.sim = Simulator({
         cls.ALICE.address: 10**18,
         cls.BOB.address: 10**18
     })
Esempio n. 7
0
    def __init__(self, owner=ALICE):
        self.owner = owner
        self.sim = Simulator(founders)
        # The eth_ent.
        code = compile_serpent('assurance_ent.se')
        self.eth_ent = self.sim.load_contract(owner, code) # Alice is recipient.

        assert self.storage(0) == int(owner.address, 16)
        assert self.storage(1) == int(owner.address, 16)
        assert self.storage(2) == 10
        assert self.storage(3) == 20000
        assert self.storage(4) == 5
Esempio n. 8
0
import time
from sim import Simulator
from input import parse_raw_itch_file

# parse_raw_itch_file('AAPL', 'data/S020118-v50.txt', 'data')
start = time.time()
simulator = Simulator('AAPL', '20180201')
simulator.run_simulation()
print(f'{time.time() - start}')
Esempio n. 9
0
 def setup_class(cls):
     cls.code = load_serpent('examples/namecoin.se')
     cls.sim = Simulator({cls.ALICE.address: 10**18})
Esempio n. 10
0
def __usage_iosim():
    print("""\
Usage: iosim.py [<options>] <trace path>
  <options>
   -h: help(this message)
   -c <size in blks>
   -t <storage type>: all, default(no prefetch, lru), prefetch, ml, rule
   -T <timestamp range>
  <options for rule>
   -p: enable per-process reference history (default: disabled)
   -b <count>: reference history count, (default: 1)
  <options for ml>
   -p: enable per-process reference history (default: enabled)
   -G <width(time) x height(lba)>: grid dimension (default: 5x10)
   -u <sec>: width unit for time (default: 0.005)
   -L <lba max>
   -m <model path>: for storage ml only
   -M <model type>: for storage ml only
""")

if __name__ == "__main__":
    from sim import Simulator

    logger.init("iosim")

    sim = Simulator()

    conf.parse(__usage_iosim)
    sim.run()
Esempio n. 11
0
        # constants.LEADER_REPORT,
        # constants.BOUNDED_LEADER_REPORT,
    ]
    utility_types = [
        constants.LINEAR_UTILITY,
    ]

    mutation_rule = constants.CYCLE_ONE_AGENT

    # logging_mode = constants.DEBUG_LOGGING
    logging_mode = None
    # logging_mode = constants.LOG_DROPOUT

    sim = Simulator({
        "agent_options": agent_options,
        "num_rounds": num_rounds,
        "num_times": num_times,
        "initial_param": initial_param,
        "decision_types": decision_types,
        "utility_types": utility_types,
        "logging_mode": logging_mode,
        "bounded_percent": bounded_perc,
        "suppress_percent": suppress_perc,
        "mutation_rule": mutation_rule,
    })


    start_time = time.time()
    sim.start()
    print("--- %s seconds ---" % (time.time() - start_time))
Esempio n. 12
0
    grafica.communicate("Ventana plot, creada desde el Emulador PAE!", 0.001)
except subprocess.TimeoutExpired:
    pass

# AX12 motors, left and right
AX12 = {
    MOTOR_ID_L: AX(MOTOR_ID_L),
    MOTOR_ID_R: AX(MOTOR_ID_R),
}
# AXS1 sensor modules
AXS1 = AX(SENSOR_ID)

# Load file with obstacles
habitacion = World(fichero_habitacion)
# Create simulator
simulador = Simulator(habitacion, AX12, AXS1)

logging.info("Robots en (%d, %d) y angulo %d" %
             (simulador.x, simulador.y, simulador.theta))

# Initial demo values
AX12[MOTOR_ID_L][AX_registers.GOAL_SPEED_L] = V_inicial_demo_L & 0xFF
AX12[MOTOR_ID_L][AX_registers.GOAL_SPEED_H] = (V_inicial_demo_L >> 8) & 0x07
AX12[MOTOR_ID_R][AX_registers.GOAL_SPEED_L] = V_inicial_demo_R & 0xFF
AX12[MOTOR_ID_R][AX_registers.GOAL_SPEED_H] = (V_inicial_demo_R >> 8) & 0x07

# Create graphical application
root = tk.Tk()
root.title("EMULADOR ROBOT PAE")
app = TkApplication(simulador, AX12, AXS1, root=root)
root.lift()
Esempio n. 13
0
player = RLPlayer(600, 600, 5)

if mode == "train":
    player.train(2000)

    torch.save(player.policy_net.state_dict(), "policy_net.model")
    torch.save(player.target_net.state_dict(), "target_net.model")

elif mode == "inference":
    player.policy_net.load_state_dict(torch.load("./policy_net.model"))
    player.policy_net.eval()

    player.target_net.load_state_dict(torch.load("./target_net.model"))
    player.target_net.eval()

    sim = Simulator()
    sim.initialize()
    last_screen = sim.get_screen()

    pygame.key.set_repeat(100, 0)

    while True:
        for event in pygame.event.get():
            if event.type == QUIT:
                pygame.quit()
                sys.exit()

            elif event.type == KEYDOWN:
                current_screen = sim.get_screen()
                state = current_screen - last_screen
Esempio n. 14
0
from sim import Simulator

# Conversion Constants
TORAD = math.pi / 180
TODEG = 180 / math.pi
TOMPS = 0.514444
TOKNTS = 1 / TOMPS
IMAX = 0.479970000000000 - 0.1001
IMIN = 0 + 0.1001

# Test of the simulator object

# Initalisation :
delta_t = 0.1
t_history = 10
simulation = Simulator.Simulator(20, delta_t)

# Note that we create one single speed calculator that we use to generate different experiences because it
# saves the state of the flow and the previous incidences
# Be carefull to generate experiences in a chronological way !!!
speedCalculator = Hysteresis()

taustep = 2
# be carefull with initialisation : due to delay we must initialize the taustep+1 first angles
tau = taustep * simulation.time_step
simulation.hdg[0:taustep + 1] = 5 * TORAD

# Wind Generation
WS = 7
mean = 0
std = 0.1 * TORAD  # 2.5*TORAD
Esempio n. 15
0
if __name__ == "__main__":
    #data = dl.get_norm_data('https://poloniex.com/public?command=returnChartData&currencyPair=BTC_ETH&start=1435699200&end=9999999999&period=14400')
    #orig_data = dl.get_data('https://poloniex.com/public?command=returnChartData&currencyPair=BTC_ETH&start=1435699200&end=9999999999&period=14400')
    '''
    data = dl.get_norm_data('btc_eth_lowtrend.npy')[1000:2000]
    orig_data = dl.get_data('btc_eth_lowtrend.npy')[1000:2000]
    '''
    orig_data, data = dl.test_data_sin(250)
    state_size = len(data[0]) + 2  # last 2 are current assets (usd, crypt)
    action_size = 4  # [Buy, Sell, Hold, % to buy/sell]

    agent = DQNAgent(state_size, action_size)

    scores, episodes = [], []

    sim = Simulator(orig_data, data)
    #viz = Visualizer(

    for e in range(EPISODES):
        score = 0

        while not sim.sim_done():
            state = sim.state  # Get state
            action = agent.get_action(state)

            # Simulate trading
            #-----------
            max_idx = np.argmax(action[:3])  # Choose buy/sell/hold
            reward, done = sim.step(max_idx, action[3])
            next_state = sim.state  # Get new state
            #-----------
Esempio n. 16
0
 def setup_class(cls):
     cls.code = compile_serpent('examples/datafeed.se')
     cls.sim = Simulator({cls.ALICE.address: 10**18})
#!/usr/bin/python3

import math

import Box2D
import pyglet
import pyglet.graphics as g

from config import *
from sim import Simulator

# TODO add toggle for fast forward

simulator = Simulator()


class Renderer(pyglet.window.Window):
    def __init__(self):
        super().__init__(*WINDOW_SIZE)
        self.running = True

        self.current_generation = None
        self.generation_label = pyglet.text.Label()

        self.ticker = None
        self.toggle_fast_forward()  # sets ticker

        # background colour
        pyglet.graphics.glClearColor(0.05, 0.05, 0.07, 1)
        if RENDER_TEMPERATURE:
            self.world_temp_backdrop = prerender_world_temp()
Esempio n. 18
0
def simulator():        #refrences the simulator class where the AI play against eachother
        s_gui = Simulator()        
        s_gui.intialise_dynamic()
        sim = s_gui
Esempio n. 19
0
 def setup_class(cls):
     cls.code = compile_lll('examples/keyval_publisher.lll')
     cls.sim = Simulator({cls.ALICE.address: 10**18})
Esempio n. 20
0
t_sprung = 5
#ufnc = stepfnc(t_sprung, 1)  # Eingangsfunktion mit Zeitpunkt, Sprunghöhe

f0 = 5
Lambda = 1 / f0  # Taktzeit des PRBS Signals
dt = 5e-3  # Schrittweite des Ergebnisvektors

A = 3
ufnc, u_, N = prbsfnc(
    A, Lambda)  # PRBS Signal mit Amplitude, Periodendauer, Bitintervall

PID = [3, 1, 1, 5]  # Parameter des PID Reglers - T_i, T_d, T_n, K

Fa = 1 / dt  # Abtastfrequenz
t_max = Lambda * N * 4
t, b_out, S, IN, S_noise = Simulator(dt, t_max, ufnc, PT1**o, True, *PID,
                                     False)
y = b_out[S]
u = np.array(b_out[IN])

# SYSTEMIDENTIFIKATION --------------------------------------------------------

System = 'PT1'

# PARAMETERIDENTIFIKATION -----------------------------------------------------

if System == '':
    K_e, T_e = area_method(t, t_sprung, b_out[S])
    print("K_e = " + str(K_e))
    print("T_e = " + str(T_e))

    K_e, T_e = area_method(t, t_sprung, b_out[S_noise])
Esempio n. 21
0
#!/usr/bin/env python

from imem import Instr_Mem
from dmem import Data_Mem
from sim import Simulator

imem = Instr_Mem("imem.txt")
dmem = Data_Mem("dmem.txt")
sim = Simulator(imem.output_array(), dmem.output_array())

print "Simulation for Small16 Processor has begun.\n" + \
  "Data memory is set in dmem.txt\n" + \
  "Instruction memory is set in imem.txt\n" + \
  "\nInstructions to use Simulator object:\n" + \
  "sim.step(N)            Steps program N instructions, default is 1.\n" + \
  "sim.run()              Steps program until finished.\n" + \
  "sim.restart(N)         Restarts the program and steps N instructions.\n" + \
  "sim.output_reg()       Outputs register values.\n" + \
  "sim.output_dmem(M, N)  Outputs data memory starting at index M with range N.\n" + \
  "sim.output_imem(M, N)  Outputs instruction memory starting at index M with range N.\n" + \
  "sim.output_instr_cnt() Outputs the number of times each instruction has been executed."
Esempio n. 22
0
        if check_firmware(args.p): raise 
        cpu = construct.CPU(platform, fw=args.p, asm_file=args.f)
        io_map = cpu.b.io_map
        load_ware = firmware.available[args.p](io_map)
        #print(load_ware.code())
        fw = load_ware.assemble()
        print(fw)
        print("Length ",len(fw))
        data = char_convert(fw)
        #print(data)
        load(data)

    if args.action == "simulate":
        print("Simulate")
        if check_firmware(args.p): raise 
        s = Simulator(fw=args.p,asm_file=args.f)
        s.run()

    if args.action == "gatesim":
        print("Gateware Simulation")
        if check_firmware(args.p): raise 
        design = construct.simCPU(platform, fw=args.p, asm_file=args.f)
        fragment = Fragment.get(design, platform)
        f = open("test.vcd", "w")
        dut = design.b.serial_port
        data = []
        if args.l is None:
            st = "the quick brown fox jumps over the lazy dog"
            data = sim_data.str_data(st)
        else:
            # clean the meta sub
Esempio n. 23
0
 def setup_class(cls):
     cls.code = load_serpent('../voting.se')
     cls.sim = Simulator({
         cls.ALICE.address: 10**18,
         cls.BOB.address: 10**18
     })
Esempio n. 24
0
 def setup_class(cls):
     cls.code = "610070515b525b61000c37f26000601f5561001a515b525b6100143961002e5861000e515b525b61000c37f26002600035025b525b54602052f2600060645c03f06000545b60005b54516020526020602060205b525b016001520360005255516005525460200103600060005360645c03f150535b525b54602052f2".decode(
         'hex')
     cls.sim = Simulator({cls.ALICE.address: 10**18})
Esempio n. 25
0
 def setup_class(cls):
     cls.code = compile_serpent('examples/subcurrency.se')
     cls.sim = Simulator({
         cls.ALICE.address: 10**18,
         cls.BOB.address: 10**18
     })
Esempio n. 26
0
    windowsize = 20

    #load_agent = False
    agent = DQN(windowsize, state_size, action_size)
    '''
    agent.load_state()
    perturb = torch.from_numpy(np.random.rand(10,10) / 1)
    agent.model.state_dict()['linear2.weight'] += perturb.float()
    perturb2 = torch.from_numpy(np.random.rand(4,10) / 1)
    agent.model.state_dict()['linear3.weight'] += perturb2.float()
    print(agent.model.state_dict())
    '''

    losses, scores, episodes = [], [], []

    sim = Simulator(orig_data, data, windowsize=windowsize)

    for e in range(EPISODES):
        # Write actions to log file
        score = 0
        state = Tensor(sim.reset())

        while not sim.sim_done():
            #state = Tensor(sim.state) # Get state
            action = agent.get_action(state)

            # Simulate trading
            #-----------
            max_idx = np.argmax(action[:3])  # Choose buy/sell/hold
            next_state, reward, done = sim.step(max_idx, action[3])
            next_state = Tensor(next_state)