def Rectifier(y, y_dx, x, leaky_val, fix_min, fix_max, fix_res): """Rectified linear unit (ReLU) and derivative model using fixbv type. :param y: return max(0, x) as fixbv :param y_dx: return d/dx max(0, x) as fixbv :param x: input value as fixbv :param leaky_val: factor for leaky ReLU, 0.0 without :param fix_min: fixbv min value :param fix_max: fixbv max value :param fix_res: fixbv resolution """ # internal values zero = fixbv(0.0, min=fix_min, max=fix_max, res=fix_res) one = fixbv(1.0, min=fix_min, max=fix_max, res=fix_res) leaky = fixbv(leaky_val, min=fix_min, max=fix_max, res=fix_res) # modules @always_comb def relu(): if x > zero: y.next = x else: y.next = fixbv(leaky * x.val, min=fix_min, max=fix_max, res=fix_res) @always_comb def relu_dx(): if x > zero: y_dx.next = one else: y_dx.next = leaky return relu, relu_dx
def stimulus(): zero = fixbv(0.0, min=fix_min, max=fix_max, res=fix_res) yield clk.posedge # random initialization for j in range(embedding_dim): word_emb[j].next = fixbv(random.uniform(0.0, emb_spread), min=fix_min, max=fix_max, res=fix_res) context_emb[j].next = fixbv(random.uniform(0.0, emb_spread), min=fix_min, max=fix_max, res=fix_res) # iterate to converge for i in range(n): yield clk.negedge print "%4s mse: %f, y: %f, word: %s, context: %s" % ( now(), error, y, [float(el.val) for el in word_emb ], [float(el.val) for el in context_emb]) if error == zero: break # transfer new values for j in range(embedding_dim): word_emb[j].next = new_word_emb[j] context_emb[j].next = new_context_emb[j] raise StopSimulation()
def test_module_round_overflow(): x = Signal( fixbv(10.1875, min=-16, max=16, res=2**-5, round_mode='round', overflow_mode='ring')) y = Signal( fixbv(0, min=-8, max=8, res=2**-2, round_mode='round', overflow_mode='ring')) def _test(): tbdut = m_round_overflow(x, y) @instance def tbstim(): print(x, y) yield delay(10) print(x, y) assert float(y) == -5.75 return tbdut, tbstim Simulation(_test()).run()
def updated_context(): diff = fixbv(y - y_actual, min=fix_min, max=fix_max, res=fix_res) for j in range(embedding_dim): y_dcontext = fixbv(y_dcontext_list[j], min=fix_min, max=fix_max, res=fix_res) delta = fixbv(rate * diff * y_dcontext, min=fix_min, max=fix_max, res=fix_res) new = fixbv(context_emb[j] - delta, min=fix_min, max=fix_max, res=fix_res) new_context_embv.next[(j + 1) * fix_width:j * fix_width] = new[:]
def test_int_frac(): x = fixbv(3.75, min=-4, max=4, res=0.125) print(bin(x._val)) assert x.int() == 3 assert x.frac() == 6 x = fixbv(-4.625, min=-8, max=8, res=0.125) print(bin(x._val)) assert x.int() == -5 assert x.frac() == 3
def stimulus(): yield clk.negedge for i in range(n): # new values word_emb[0].next = fixbv(step_word * i - step_word * n // 2, min=fix_min, max=fix_max, res=fix_res) context_emb[0].next = fixbv(step_context * i, min=fix_min, max=fix_max, res=fix_res) yield clk.negedge print "%3s word: %s, context: %s, mse: %f, y: %f, new_word: %s, new_context: %s" % (now(), [ float(el.val) for el in word_emb ], [ float(el.val) for el in context_emb ], error, y, [ float(el.val) for el in new_word_emb ], [ float(el.val) for el in new_context_emb ]) raise StopSimulation()
def DotProduct(y, y_da_vec, y_db_vec, a_vec, b_vec, dim, fix_min, fix_max, fix_res): """Vector dot product and derivative model using fixbv type. :param y: return dot(a_vec, b_vec) as fixbv :param y_da_vec: return d/da dot(a_vec, b_vec) as vector of fixbv :param y_db_vec: return d/db dot(a_vec, b_vec) as vector of fixbv :param a_vec: vector of fixbv :param b_vec: vector of fixbv :param dim: vector dimensionality :param fix_min: fixbv min value :param fix_max: fixbv max value :param fix_res: fixbv resolution """ fix_width = len(a_vec) // dim fixd_min = -fix_min**2 * 2 fixd_max = -fixd_min fixd_res = fix_res**2 # internal values a_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(dim) ] b_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(dim) ] for j in range(dim): a_list[j].assign(a_vec((j + 1) * fix_width, j * fix_width)) b_list[j].assign(b_vec((j + 1) * fix_width, j * fix_width)) # modules @always_comb def dot(): y_sum = fixbv(0.0, min=fixd_min, max=fixd_max, res=fixd_res) for j in range(dim): y_sum[:] = y_sum + a_list[j] * b_list[j] y.next = fixbv(y_sum, min=fix_min, max=fix_max, res=fix_res) @always_comb def dot_da(): y_da_vec.next = b_vec @always_comb def dot_db(): y_db_vec.next = a_vec return dot, dot_da, dot_db
def wcprod_dcontext(): for j in range(embedding_dim): prod = fixbv(y_relu_dx * y_dot_dcontext_list[j], min=fix_min, max=fix_max, res=fix_res) y_dcontext_vec.next[(j + 1) * fix_width:j * fix_width] = prod[:]
def convert(target=toVerilog, directory="./ex-target"): """Convert design to Verilog or VHDL.""" leaky_val = 0.01 fix_min = -2**7 fix_max = -fix_min fix_res = 2**-8 # signals y = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) y_dx = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) x = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) # covert to HDL code target.directory = directory target(Rectifier, y, y_dx, x, leaky_val, fix_min, fix_max, fix_res)
def relu(): if x > zero: y.next = x else: y.next = fixbv(leaky * x.val, min=fix_min, max=fix_max, res=fix_res)
def convert(target=toVerilog, directory="./ex-target"): """Convert design to Verilog or VHDL.""" embedding_dim = 3 leaky_val = 0.01 rate_val = 0.1 fix_min = -2**7 fix_max = -fix_min fix_res = 2**-8 fix_width = 1 + 7 + 8 # signals y = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) error = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) new_word_embv = Signal(intbv(0)[embedding_dim * fix_width:]) new_context_embv = Signal(intbv(0)[embedding_dim * fix_width:]) new_word_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] new_context_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] for j in range(embedding_dim): new_word_emb[j].assign(new_word_embv((j + 1) * fix_width, j * fix_width)) new_context_emb[j].assign(new_context_embv((j + 1) * fix_width, j * fix_width)) y_actual = Signal(fixbv(1.0, min=fix_min, max=fix_max, res=fix_res)) word_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(embedding_dim) ] word_embv = ConcatSignal(*reversed(word_emb)) context_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(embedding_dim) ] context_embv = ConcatSignal(*reversed(context_emb)) # covert to HDL code target.directory = directory target(WordContextUpdated, y, error, new_word_embv, new_context_embv, y_actual, word_embv, context_embv, embedding_dim, leaky_val, rate_val, fix_min, fix_max, fix_res)
def test_module_align(): x = Signal(fixbv(0.125, min=-32, max=32, res=1 / 2**8)) y = Signal(fixbv(0.25, min=-128, max=128, res=1 / 2**32)) z = Signal(fixbv(0, min=-16, max=16, res=1 / 2**16)) def _test(): tbdut = m_align(x, y, z) @instance def tbstim(): print(x, y, z) yield delay(10) print(x, y, z) assert float(z) == 0.34375 return tbdut, tbstim Simulation(_test()).run()
def bench(self, fixop): x = Signal( fixbv(0.125, min=-8, max=8, res=2**-5, round_mode='round', overflow_mode='saturate')) y = Signal( fixbv(-2.25, min=-8, max=8, res=2**-6, round_mode='round', overflow_mode='saturate')) z = Signal( fixbv(1.125, min=-8, max=8, res=2**-7, round_mode='round', overflow_mode='saturate')) w = Signal( fixbv(0, min=-8, max=8, res=2**-4, round_mode='round', overflow_mode='saturate')) w_v = Signal( fixbv(0, round_mode='round', overflow_mode='saturate')[8, 3, 4]) fixop_inst = fixop(x, y, z, w).convert(hdl='verilog') fixop_v_inst = fixop(x, y, z, w) @instance def stimulus(): print(w, w_v) yield delay(10) print(w, w_v) assert w == w_v assert float(w) == -2.125 return stimulus
def bench(self, fixop): x = Signal(fixbv(0.125, min=-8, max=8, res=2**-4)) y = Signal(fixbv(-2.25, min=-8, max=8, res=2**-4)) z = Signal(fixbv(1.125, min=-8, max=8, res=2**-4)) w = Signal(fixbv(0, min=-8, max=8, res=2**-4)) w_v = Signal(fixbv(0)[8, 3, 4]) fixop_inst = fixop(x, y, z, w).convert(hdl='verilog') fixop_v_inst = fixop(x, y, z, w) @instance def stimulus(): print(w, w_v) yield delay(10) print(w, w_v) assert w == w_v assert float(w) == -3.25 return stimulus
def test_zero(n=10, step=0.5): """Testing bench around zero.""" leaky_val = 0.01 fix_min = -2**7 fix_max = -fix_min fix_res = 2**-8 # signals y = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) y_dx = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) x = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) clk = Signal(bool(False)) # modules relu = Rectifier(y, y_dx, x, leaky_val, fix_min, fix_max, fix_res) # test stimulus HALF_PERIOD = delay(5) @always(HALF_PERIOD) def clk_gen(): clk.next = not clk @instance def stimulus(): yield clk.negedge for i in range(n): # new values x.next = fixbv(step * i - step * n / 2.0, min=fix_min, max=fix_max, res=fix_res) yield clk.negedge print "%3s x: %f, y: %f, y_dx: %f" % (now(), x, y, y_dx) raise StopSimulation() return clk_gen, stimulus, relu
def test_module_more(): w = Signal(fixbv(0.5, min=-128, max=128, res=1/2**32)) x = Signal(fixbv(0.125, min=-128, max=128, res=1/2**32)) y = Signal(fixbv(0.125, min=-128, max=128, res=1/2**32)) z = Signal(fixbv(0, min=-128, max=128, res=1/2**32)) def _test(): tbdut = m_more(w, x, y, z) @instance def tbstim(): print(w,x,y,z) yield delay(10) print(w,x,y,z) return tbdut, tbstim Simulation(_test()).run()
def test_module_more(): w = Signal(fixbv(0.5, min=-128, max=128, res=1 / 2**32)) x = Signal(fixbv(0.125, min=-128, max=128, res=1 / 2**32)) y = Signal(fixbv(0.125, min=-128, max=128, res=1 / 2**32)) z = Signal(fixbv(0, min=-128, max=128, res=1 / 2**32)) def _test(): tbdut = m_more(w, x, y, z) @instance def tbstim(): print(w, x, y, z) yield delay(10) print(w, x, y, z) assert float(z) == 0.015625 return tbdut, tbstim Simulation(_test()).run()
def convert(target=toVerilog, directory="./ex-target"): """Convert design to Verilog or VHDL.""" embedding_dim = 3 leaky_val = 0.01 fix_min = -2**7 fix_max = -fix_min fix_res = 2**-8 fix_width = 1 + 7 + 8 # signals y = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) y_dword_vec = Signal(intbv(0)[embedding_dim * fix_width:]) y_dcontext_vec = Signal(intbv(0)[embedding_dim * fix_width:]) y_dword_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] y_dcontext_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] for j in range(embedding_dim): y_dword_list[j].assign(y_dword_vec((j + 1) * fix_width, j * fix_width)) y_dcontext_list[j].assign( y_dcontext_vec((j + 1) * fix_width, j * fix_width)) word_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(embedding_dim) ] word_embv = ConcatSignal(*reversed(word_emb)) context_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(embedding_dim) ] context_embv = ConcatSignal(*reversed(context_emb)) # covert to HDL code target.directory = directory target(WordContextProduct, y, y_dword_vec, y_dcontext_vec, word_embv, context_embv, embedding_dim, leaky_val, fix_min, fix_max, fix_res)
def stimulus(): yield clk.negedge for i in range(n): # new values x.next = fixbv(step * i - step * n / 2.0, min=fix_min, max=fix_max, res=fix_res) yield clk.negedge print "%3s x: %f, y: %f, y_dx: %f" % (now(), x, y, y_dx) raise StopSimulation()
def test_dim0(n=10, step_word=0.5, step_context=0.5): """Testing bench around zero in dimension 0.""" embedding_dim = 3 leaky_val = 0.01 rate_val = 0.1 fix_min = -2**7 fix_max = -fix_min fix_res = 2**-8 fix_width = 1 + 7 + 8 # signals y = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) error = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) new_word_embv = Signal(intbv(0)[embedding_dim * fix_width:]) new_context_embv = Signal(intbv(0)[embedding_dim * fix_width:]) new_word_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] new_context_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] for j in range(embedding_dim): new_word_emb[j].assign(new_word_embv((j + 1) * fix_width, j * fix_width)) new_context_emb[j].assign(new_context_embv((j + 1) * fix_width, j * fix_width)) y_actual = Signal(fixbv(1.0, min=fix_min, max=fix_max, res=fix_res)) word_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(embedding_dim) ] word_embv = ConcatSignal(*reversed(word_emb)) context_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(embedding_dim) ] context_embv = ConcatSignal(*reversed(context_emb)) clk = Signal(bool(False)) # modules wcupdated = WordContextUpdated(y, error, new_word_embv, new_context_embv, y_actual, word_embv, context_embv, embedding_dim, leaky_val, rate_val, fix_min, fix_max, fix_res) # test stimulus HALF_PERIOD = delay(5) @always(HALF_PERIOD) def clk_gen(): clk.next = not clk @instance def stimulus(): yield clk.negedge for i in range(n): # new values word_emb[0].next = fixbv(step_word * i - step_word * n // 2, min=fix_min, max=fix_max, res=fix_res) context_emb[0].next = fixbv(step_context * i, min=fix_min, max=fix_max, res=fix_res) yield clk.negedge print "%3s word: %s, context: %s, mse: %f, y: %f, new_word: %s, new_context: %s" % (now(), [ float(el.val) for el in word_emb ], [ float(el.val) for el in context_emb ], error, y, [ float(el.val) for el in new_word_emb ], [ float(el.val) for el in new_context_emb ]) raise StopSimulation() return clk_gen, stimulus, wcupdated
def convert(target=toVerilog, directory="./ex-target"): """Convert design to Verilog or VHDL.""" dim = 3 fix_min = -2**7 fix_max = -fix_min fix_res = 2**-8 fix_width = 1 + 7 + 8 # signals y = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) y_da_vec = Signal(intbv(0)[dim * fix_width:]) y_db_vec = Signal(intbv(0)[dim * fix_width:]) y_da_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(dim) ] y_db_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(dim) ] for j in range(dim): y_da_list[j].assign(y_da_vec((j + 1) * fix_width, j * fix_width)) y_db_list[j].assign(y_db_vec((j + 1) * fix_width, j * fix_width)) a_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(dim) ] a_vec = ConcatSignal(*reversed(a_list)) b_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(dim) ] b_vec = ConcatSignal(*reversed(b_list)) # covert to HDL code target.directory = directory target(DotProduct, y, y_da_vec, y_db_vec, a_vec, b_vec, dim, fix_min, fix_max, fix_res)
def stimulus(): zero = fixbv(0.0, min=fix_min, max=fix_max, res=fix_res) yield clk.posedge # random initialization for j in range(embedding_dim): word_emb[j].next = fixbv(random.uniform(0.0, emb_spread), min=fix_min, max=fix_max, res=fix_res) context_emb[j].next = fixbv(random.uniform(0.0, emb_spread), min=fix_min, max=fix_max, res=fix_res) # iterate to converge for i in range(n): yield clk.negedge print "%4s mse: %f, y: %f, word: %s, context: %s" % (now(), error, y, [ float(el.val) for el in word_emb ], [ float(el.val) for el in context_emb ]) if error == zero: break # transfer new values for j in range(embedding_dim): word_emb[j].next = new_word_emb[j] context_emb[j].next = new_context_emb[j] raise StopSimulation()
def stimulus(): yield clk.negedge for i in range(n): # new values a_list[0].next = fixbv(step_a * i - step_a * n // 2, min=fix_min, max=fix_max, res=fix_res) b_list[0].next = fixbv(step_b * i, min=fix_min, max=fix_max, res=fix_res) yield clk.negedge print "%3s a_list: %s, b_list: %s, y: %f, y_da: %s, y_db: %s" % ( now(), [float(el.val) for el in a_list], [float(el.val) for el in b_list], y, [float(el.val) for el in y_da_list], [float(el.val) for el in y_db_list]) raise StopSimulation()
def test_module_add(): x = Signal(fixbv(3.14159, min=-8, max=8, res=1e-5)) y = Signal(fixbv(3.14159, min=-8, max=8, res=1e-5)) z = Signal(fixbv(0, min=-8, max=8, res=1e-5)) def _test(): tbdut = m_add(x, y, z) @instance def tbstim(): print(x,y,z) yield delay(10) print(x,y,z) assert float(z) > 6 assert float(z) < 7 err = abs(2*math.pi - float(z)) # @todo: need to quantify what the expected error is assert err < 1e-4 return tbdut, tbstim Simulation(_test()).run()
def stimulus(): yield clk.negedge for i in range(n): # new values word_emb[0].next = fixbv(step_word * i - step_word * n // 2, min=fix_min, max=fix_max, res=fix_res) context_emb[0].next = fixbv(step_context * i, min=fix_min, max=fix_max, res=fix_res) yield clk.negedge print "%3s word: %s, context: %s, mse: %f, y: %f, new_word: %s, new_context: %s" % ( now(), [float(el.val) for el in word_emb], [ float(el.val) for el in context_emb ], error, y, [float(el.val) for el in new_word_emb ], [float(el.val) for el in new_context_emb]) raise StopSimulation()
def test_module_add(): x = Signal(fixbv(3.14159, min=-8, max=8, res=1e-5)) y = Signal(fixbv(3.14159, min=-8, max=8, res=1e-5)) z = Signal(fixbv(0, min=-8, max=8, res=1e-5)) def _test(): tbdut = m_add(x, y, z) @instance def tbstim(): print(x, y, z) yield delay(10) print(x, y, z) assert float(z) > 6 assert float(z) < 7 err = abs(2 * math.pi - float(z)) # @todo: need to quantify what the expected error is assert err < 1e-4 return tbdut, tbstim Simulation(_test()).run()
def test_equalities(): x = fixbv(3.14159, min=-8, max=8, res=1e-5) y = fixbv(3.14159, min=-8, max=8, res=1e-5) z = fixbv(0, min=-8, max=8, res=1e-5) w = fixbv(0, min=-16, max=16, res=2**-16) assert x == y assert x >= y assert y <= x assert z < x assert x > z assert x != z with pytest.raises(AssertionError) as excinfo: if x == w: print("nope, this shouldn't work") # @todo: now this is an issue, when intbv is in a Signal and # pass the operators down it will be intbv == Signal x = Signal(fixbv(3.14159, min=-8, max=8, res=1e-5)) y = Signal(fixbv(3.14159, min=-8, max=8, res=1e-5)) z = Signal(fixbv(0, min=-8, max=8, res=1e-5)) w = Signal(fixbv(0, min=-16, max=16, res=2**-16)) # these tests currrently fail, need to usderstand why assert x == y assert x >= y assert y <= x assert z < x assert x > z assert x != z # none of the following should work because 'x' and 'w' are # different types. They need to be the same widths before # the comparisons. with pytest.raises(AssertionError) as excinfo: if x == w: print("nope, this shoudln't work") with pytest.raises(AssertionError) as excinfo: if x < w: print("nope, this shoudln't work") with pytest.raises(AssertionError) as excinfo: if w > x: print("nope, this shoudln't work") with pytest.raises(AssertionError) as excinfo: if x <= w: print("nope, this shoudln't work") with pytest.raises(AssertionError) as excinfo: if w >= x: print("nope, this shoudln't work") with pytest.raises(AssertionError) as excinfo: if x != w: print("nope, this shoudln't work")
def test_basic(): # test all exact single bit values for (16,0,15) for f in range(1, 16): x = fixbv(2**-f)[16, 0, 15] y = fixbv(-2**-f)[16, 0, 15] assert float(x) == 2**-f, \ "%f != %f, %04x != %04x" % (2.**-f, float(x), x, 0x8000 >> f) assert bin(x._val,16) == bin(0x8000 >> f, 16), \ "%s != %s for f == %d" % (bin(x._val, 16), bin(0x8000 >> f, 16), f) assert float(y) == -2**-f assert bin(y._val,16) == bin(-0x8000 >> f, 16), \ "%s" % (bin(y._val, 16)) # Test all exact single bit values for W128.0 for f in range(1, 128): x = fixbv(2**-f, min=-1, max=1, res=2**-127) y = fixbv(-2**-f, min=-1, max=1, res=2**-127) assert float(x) == 2**-f assert bin(x, 128) == bin(0x80000000000000000000000000000000 >> f, 128) assert float(y) == -2**-f assert bin(y, 128) == bin(-0x80000000000000000000000000000000 >> f, 128) assert x > y assert y < x assert min(x, y) == min(y, x) == y assert max(x, y) == max(y, x) == x assert x != y x = fixbv(3.14159)[18, 3] y = fixbv(-1.4142 - 1.161802 - 2.71828)[18, 3] assert x != y #assert --x == x assert abs(y) > abs(x) assert abs(x) < abs(y) assert x == x and y == y # Create a (8,3) fixed-point object value == 2.5 x = fixbv(2.5, min=-8, max=8, res=1. / 32) assert float(x) == 2.5 assert x._val == 0x50
def test_basic(): # test all exact single bit values for (16,0,15) for f in range(1,16): x = fixbv(2**-f)[16,0,15] y = fixbv(-2**-f)[16,0,15] assert float(x) == 2**-f, \ "%f != %f, %04x != %04x" % (2.**-f, float(x), x, 0x8000 >> f) assert bin(x._val,16) == bin(0x8000 >> f, 16), \ "%s != %s for f == %d" % (bin(x._val, 16), bin(0x8000 >> f, 16), f) assert float(y) == -2**-f assert bin(y._val,16) == bin(-0x8000 >> f, 16), \ "%s" % (bin(y._val, 16)) # Test all exact single bit values for W128.0 for f in range(1,128): x = fixbv(2**-f, min=-1, max=1, res=2**-127) y = fixbv(-2**-f, min=-1, max=1, res=2**-127) assert float(x) == 2**-f assert bin(x,128) == bin(0x80000000000000000000000000000000 >> f, 128) assert float(y) == -2**-f assert bin(y,128) == bin(-0x80000000000000000000000000000000 >> f, 128) assert x > y assert y < x assert min(x,y) == min(y,x) == y assert max(x,y) == max(y,x) == x assert x != y x = fixbv(3.14159)[18,3] y = fixbv(-1.4142 - 1.161802 - 2.71828)[18,3] assert x != y #assert --x == x assert abs(y) > abs(x) assert abs(x) < abs(y) assert x == x and y == y # Create a (8,3) fixed-point object value == 2.5 x = fixbv(2.5, min=-8, max=8, res=1./32) assert float(x) == 2.5 assert x._val == 0x50
def convert(target=toVerilog, directory="./ex-target"): """Convert design to Verilog or VHDL.""" embedding_dim = 3 leaky_val = 0.01 rate_val = 0.1 fix_min = -2**7 fix_max = -fix_min fix_res = 2**-8 fix_width = 1 + 7 + 8 # signals y = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) error = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) new_word_embv = Signal(intbv(0)[embedding_dim * fix_width:]) new_context_embv = Signal(intbv(0)[embedding_dim * fix_width:]) new_word_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] new_context_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] for j in range(embedding_dim): new_word_emb[j].assign( new_word_embv((j + 1) * fix_width, j * fix_width)) new_context_emb[j].assign( new_context_embv((j + 1) * fix_width, j * fix_width)) y_actual = Signal(fixbv(1.0, min=fix_min, max=fix_max, res=fix_res)) word_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(embedding_dim) ] word_embv = ConcatSignal(*reversed(word_emb)) context_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(embedding_dim) ] context_embv = ConcatSignal(*reversed(context_emb)) # covert to HDL code target.directory = directory target(WordContextUpdated, y, error, new_word_embv, new_context_embv, y_actual, word_embv, context_embv, embedding_dim, leaky_val, rate_val, fix_min, fix_max, fix_res)
def test_equalities(): x = fixbv(3.14159, min=-8, max=8, res=1e-5) y = fixbv(3.14159, min=-8, max=8, res=1e-5) z = fixbv(0, min=-8, max=8, res=1e-5) w = fixbv(0, min=-16, max=16, res=2**-16) u = fixbv(-2.7183, min=-8, max=8, res=1e-5) v = fixbv(-2.7183, min=-16, max=16, res=1e-7) assert x == y assert x >= y assert y <= x assert z < x assert x > z assert x != z assert x > w assert y >= w assert z == w assert z >= w assert z <= w assert x > u assert u <= y assert w >= v assert v < x # with pytest.raises(AssertionError) as excinfo: # if x == w: # print("nope, this shouldn't work") # @todo: now this is an issue, when intbv is in a Signal and # pass the operators down it will be intbv == Signal x = Signal(fixbv(3.14159, min=-8, max=8, res=1e-5)) y = Signal(fixbv(3.14159, min=-8, max=8, res=1e-5)) z = Signal(fixbv(0, min=-8, max=8, res=1e-5)) w = Signal(fixbv(0, min=-16, max=16, res=2**-16)) u = Signal(fixbv(-2.7183, min=-8, max=8, res=1e-5)) v = Signal(fixbv(-2.7183, min=-16, max=16, res=1e-7)) # these tests currrently fail, need to usderstand why assert x == y assert x >= y assert y <= x assert z < x assert x > z assert x != z assert x > w assert y >= w assert z == w assert z >= w assert z <= w assert x > u assert u <= y assert w >= v assert v < x
def test_dim0(n=10, step_a=0.5, step_b=0.5): """Testing bench around zero in dimension 0.""" dim = 3 fix_min = -2**7 fix_max = -fix_min fix_res = 2**-8 fix_width = 1 + 7 + 8 # signals y = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) y_da_vec = Signal(intbv(0)[dim * fix_width:]) y_db_vec = Signal(intbv(0)[dim * fix_width:]) y_da_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(dim) ] y_db_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(dim) ] for j in range(dim): y_da_list[j].assign(y_da_vec((j + 1) * fix_width, j * fix_width)) y_db_list[j].assign(y_db_vec((j + 1) * fix_width, j * fix_width)) a_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(dim) ] a_vec = ConcatSignal(*reversed(a_list)) b_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(dim) ] b_vec = ConcatSignal(*reversed(b_list)) clk = Signal(bool(False)) # modules dot = DotProduct(y, y_da_vec, y_db_vec, a_vec, b_vec, dim, fix_min, fix_max, fix_res) # test stimulus HALF_PERIOD = delay(5) @always(HALF_PERIOD) def clk_gen(): clk.next = not clk @instance def stimulus(): yield clk.negedge for i in range(n): # new values a_list[0].next = fixbv(step_a * i - step_a * n // 2, min=fix_min, max=fix_max, res=fix_res) b_list[0].next = fixbv(step_b * i, min=fix_min, max=fix_max, res=fix_res) yield clk.negedge print "%3s a_list: %s, b_list: %s, y: %f, y_da: %s, y_db: %s" % ( now(), [float(el.val) for el in a_list], [float(el.val) for el in b_list], y, [float(el.val) for el in y_da_list], [float(el.val) for el in y_db_list]) raise StopSimulation() return clk_gen, stimulus, dot
def WordContextUpdated(y, error, new_word_embv, new_context_embv, y_actual, word_embv, context_embv, embedding_dim, leaky_val, rate_val, fix_min, fix_max, fix_res): """Word-context embeddings updated model. :param y: return relu(dot(word_emb, context_emb)) as fixbv :param error: return MSE prediction error as fixbv :param new_word_embv: return updated word embedding vector of fixbv :param new_context_embv: return updated context embedding vector of fixbv :param y_actual: actual training value as fixbv :param word_embv: word embedding vector of fixbv :param context_embv: context embedding vector of fixbv :param embedding_dim: embedding dimensionality :param leaky_val: factor for leaky ReLU, 0.0 without :param rate_val: learning rate factor :param fix_min: fixbv min value :param fix_max: fixbv max value :param fix_res: fixbv resolution """ fix_width = len(word_embv) // embedding_dim # internal values one = fixbv(1.0, min=fix_min, max=fix_max, res=fix_res) rate = fixbv(rate_val, min=fix_min, max=fix_max, res=fix_res) word_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] context_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] for j in range(embedding_dim): word_emb[j].assign(word_embv((j + 1) * fix_width, j * fix_width)) context_emb[j].assign(context_embv((j + 1) * fix_width, j * fix_width)) y_dword_vec = Signal(intbv(0)[embedding_dim * fix_width:]) y_dcontext_vec = Signal(intbv(0)[embedding_dim * fix_width:]) y_dword_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] y_dcontext_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] for j in range(embedding_dim): y_dword_list[j].assign(y_dword_vec((j + 1) * fix_width, j * fix_width)) y_dcontext_list[j].assign(y_dcontext_vec((j + 1) * fix_width, j * fix_width)) # modules wcprod = WordContextProduct(y, y_dword_vec, y_dcontext_vec, word_embv, context_embv, embedding_dim, leaky_val, fix_min, fix_max, fix_res) @always_comb def mse(): diff = fixbv(y - y_actual, min=fix_min, max=fix_max, res=fix_res) error.next = fixbv(diff * diff, min=fix_min, max=fix_max, res=fix_res) @always_comb def updated_word(): diff = fixbv(y - y_actual, min=fix_min, max=fix_max, res=fix_res) for j in range(embedding_dim): y_dword = fixbv(y_dword_list[j], min=fix_min, max=fix_max, res=fix_res) delta = fixbv(rate * diff * y_dword, min=fix_min, max=fix_max, res=fix_res) new = fixbv(word_emb[j] - delta, min=fix_min, max=fix_max, res=fix_res) new_word_embv.next[(j + 1) * fix_width:j * fix_width] = new[:] @always_comb def updated_context(): diff = fixbv(y - y_actual, min=fix_min, max=fix_max, res=fix_res) for j in range(embedding_dim): y_dcontext = fixbv(y_dcontext_list[j], min=fix_min, max=fix_max, res=fix_res) delta = fixbv(rate * diff * y_dcontext, min=fix_min, max=fix_max, res=fix_res) new = fixbv(context_emb[j] - delta, min=fix_min, max=fix_max, res=fix_res) new_context_embv.next[(j + 1) * fix_width:j * fix_width] = new[:] return wcprod, mse, updated_word, updated_context
def test_math(): x = fixbv(0.5)[16, 0] y = fixbv(0.25)[16, 0] z = fixbv(0)[16, 0] #print(x, y, z) #w = x + y #print(w, type(w)) z[:] = x + y print(z, type(z), x + y) assert float(z) == 0.75 x = fixbv(3.5, min=-8, max=8, res=2**-5) y = fixbv(-5.25, min=-8, max=8, res=2**-5) iW = x._W + y._W print(iW) z = fixbv(0)[iW[:]] z[:] = x + y assert float(z) == -1.75 z[:] = y - x assert float(z) == -8.75 z[:] = x - y assert float(z) == 8.75 x = fixbv(3.141592)[19, 4] y = fixbv(1.618033)[19, 4] print(float(x), int(x), repr(x)) print(float(y), int(y), repr(y)) iW = x._W * y._W print(iW) z = fixbv(0)[iW[:]] wl, iwl, fwl = z._W[:] print(repr(z), z._max, z._min, z._nrbits, "iwl, fwl", iwl, fwl) z[:] = x * y print(repr(x), repr(y)) print(float(z), int(z), repr(z)) assert float(z) > 5. x = fixbv(3.5, min=-8, max=8, res=2**-5) z = fixbv(0)[(x * x).format] print(x, z) z[:] = x * x assert float(z) == 12.25 z[:] = x**2 assert float(z) == 12.25 z = fixbv(0)[(x * x * x).format] z[:] = x * x * x assert float(z) == 42.875 z[:] = x**3 assert float(z) == 42.875 # Point alignment x = fixbv(2.25, min=-4, max=4, res=2**-5) y = fixbv(1.5, min=-2, max=2, res=2**-8) z = fixbv(0)[12, 4, 7] z[:] = x + y assert float(z) == 3.75 z[:] = x - y assert float(z) == 0.75 z[:] = x * y assert float(z) == 3.375 x = fixbv(9.5, min=-16, max=16, res=0.25) y = fixbv(-3.25, min=-4, max=4, res=2**-4) z = fixbv(0)[11, 6, 4] z[:] = x + y assert float(z) == 6.25 z[:] = x - y assert float(z) == 12.75 z[:] = y - x assert float(z) == -12.75 z[:] = x * y assert float(z) == -30.875
def WordContextUpdated(y, error, new_word_embv, new_context_embv, y_actual, word_embv, context_embv, embedding_dim, leaky_val, rate_val, fix_min, fix_max, fix_res): """Word-context embeddings updated model. :param y: return relu(dot(word_emb, context_emb)) as fixbv :param error: return MSE prediction error as fixbv :param new_word_embv: return updated word embedding vector of fixbv :param new_context_embv: return updated context embedding vector of fixbv :param y_actual: actual training value as fixbv :param word_embv: word embedding vector of fixbv :param context_embv: context embedding vector of fixbv :param embedding_dim: embedding dimensionality :param leaky_val: factor for leaky ReLU, 0.0 without :param rate_val: learning rate factor :param fix_min: fixbv min value :param fix_max: fixbv max value :param fix_res: fixbv resolution """ fix_width = len(word_embv) // embedding_dim # internal values one = fixbv(1.0, min=fix_min, max=fix_max, res=fix_res) rate = fixbv(rate_val, min=fix_min, max=fix_max, res=fix_res) word_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] context_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] for j in range(embedding_dim): word_emb[j].assign(word_embv((j + 1) * fix_width, j * fix_width)) context_emb[j].assign(context_embv((j + 1) * fix_width, j * fix_width)) y_dword_vec = Signal(intbv(0)[embedding_dim * fix_width:]) y_dcontext_vec = Signal(intbv(0)[embedding_dim * fix_width:]) y_dword_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] y_dcontext_list = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] for j in range(embedding_dim): y_dword_list[j].assign(y_dword_vec((j + 1) * fix_width, j * fix_width)) y_dcontext_list[j].assign( y_dcontext_vec((j + 1) * fix_width, j * fix_width)) # modules wcprod = WordContextProduct(y, y_dword_vec, y_dcontext_vec, word_embv, context_embv, embedding_dim, leaky_val, fix_min, fix_max, fix_res) @always_comb def mse(): diff = fixbv(y - y_actual, min=fix_min, max=fix_max, res=fix_res) error.next = fixbv(diff * diff, min=fix_min, max=fix_max, res=fix_res) @always_comb def updated_word(): diff = fixbv(y - y_actual, min=fix_min, max=fix_max, res=fix_res) for j in range(embedding_dim): y_dword = fixbv(y_dword_list[j], min=fix_min, max=fix_max, res=fix_res) delta = fixbv(rate * diff * y_dword, min=fix_min, max=fix_max, res=fix_res) new = fixbv(word_emb[j] - delta, min=fix_min, max=fix_max, res=fix_res) new_word_embv.next[(j + 1) * fix_width:j * fix_width] = new[:] @always_comb def updated_context(): diff = fixbv(y - y_actual, min=fix_min, max=fix_max, res=fix_res) for j in range(embedding_dim): y_dcontext = fixbv(y_dcontext_list[j], min=fix_min, max=fix_max, res=fix_res) delta = fixbv(rate * diff * y_dcontext, min=fix_min, max=fix_max, res=fix_res) new = fixbv(context_emb[j] - delta, min=fix_min, max=fix_max, res=fix_res) new_context_embv.next[(j + 1) * fix_width:j * fix_width] = new[:] return wcprod, mse, updated_word, updated_context
def test_round_overflow(): # round mode: round x = fixbv(10.1875, min=-16, max=16, res=2**-5, round_mode='round', overflow_mode='ring') y = fixbv(-3.14, min=-16, max=16, res=2**-6, round_mode='round', overflow_mode='ring') z = fixbv(-2.125, min=-16, max=16, res=2**-6, round_mode='round', overflow_mode='ring') w = fixbv(0, min=-16, max=16, res=2**-2, round_mode='round', overflow_mode='ring') w[:] = x assert float(w) == 10.25 w[:] = y assert float(w) == -3.25 w[:] = z assert float(w) == -2 # round mode: nearest x = fixbv(10.625, min=-16, max=16, res=2**-5, round_mode='nearest', overflow_mode='ring') y = fixbv(-3.14, min=-16, max=16, res=2**-6, round_mode='nearest', overflow_mode='ring') z = fixbv(-2.125, min=-16, max=16, res=2**-6, round_mode='nearest', overflow_mode='ring') w = fixbv(0, min=-16, max=16, res=2**-2, round_mode='nearest', overflow_mode='ring') w[:] = x assert float(w) == 10.75 w[:] = y assert float(w) == -3.25 w[:] = z assert float(w) == -2.25 # round mode: floor x = fixbv(10.625, min=-16, max=16, res=2**-5, round_mode='floor', overflow_mode='ring') y = fixbv(-3.14, min=-16, max=16, res=2**-6, round_mode='floor', overflow_mode='ring') z = fixbv(-2.125, min=-16, max=16, res=2**-6, round_mode='floor', overflow_mode='ring') w = fixbv(0, min=-16, max=16, res=2**-2, round_mode='floor', overflow_mode='ring') w[:] = x assert float(w) == 10.5 w[:] = y assert float(w) == -3.25 w[:] = z assert float(w) == -2.25 # overflow mode: ring x = fixbv(10.1875, min=-16, max=16, res=2**-5, round_mode='round', overflow_mode='ring') y = fixbv(-2., min=-16, max=16, res=2**-6, round_mode='round', overflow_mode='ring') z = fixbv(-6.125, min=-16, max=16, res=2**-6, round_mode='round', overflow_mode='ring') w = fixbv(0, min=-4, max=4, res=2**-8, round_mode='round', overflow_mode='ring') w[:] = x assert float(w) == 2.1875 w[:] = y assert float(w) == -2. w[:] = z assert float(w) == 1.875 # overflow mode: saturate x = fixbv(10.1875, min=-16, max=16, res=2**-5, round_mode='round', overflow_mode='saturate') y = fixbv(-2., min=-16, max=16, res=2**-6, round_mode='round', overflow_mode='saturate') z = fixbv(-6.125, min=-16, max=16, res=2**-6, round_mode='round', overflow_mode='saturate') w = fixbv(0, min=-4, max=4, res=2**-8, round_mode='round', overflow_mode='saturate') w[:] = x assert float(w) == 4 - 2**-8 w[:] = y assert float(w) == -2. w[:] = z assert float(w) == -4
def mse(): diff = fixbv(y - y_actual, min=fix_min, max=fix_max, res=fix_res) error.next = fixbv(diff * diff, min=fix_min, max=fix_max, res=fix_res)
def test_converge(n=50, emb_spread=0.1, rand_seed=42): """Testing bench for covergence.""" embedding_dim = 3 leaky_val = 0.01 rate_val = 0.1 fix_min = -2**7 fix_max = -fix_min fix_res = 2**-8 fix_width = 1 + 7 + 8 # signals y = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) error = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) new_word_embv = Signal(intbv(0)[embedding_dim * fix_width:]) new_context_embv = Signal(intbv(0)[embedding_dim * fix_width:]) new_word_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] new_context_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] for j in range(embedding_dim): new_word_emb[j].assign( new_word_embv((j + 1) * fix_width, j * fix_width)) new_context_emb[j].assign( new_context_embv((j + 1) * fix_width, j * fix_width)) y_actual = Signal(fixbv(1.0, min=fix_min, max=fix_max, res=fix_res)) word_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(embedding_dim) ] word_embv = ConcatSignal(*reversed(word_emb)) context_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(embedding_dim) ] context_embv = ConcatSignal(*reversed(context_emb)) clk = Signal(bool(False)) # modules wcupdated = WordContextUpdated(y, error, new_word_embv, new_context_embv, y_actual, word_embv, context_embv, embedding_dim, leaky_val, rate_val, fix_min, fix_max, fix_res) # test stimulus random.seed(rand_seed) HALF_PERIOD = delay(5) @always(HALF_PERIOD) def clk_gen(): clk.next = not clk @instance def stimulus(): zero = fixbv(0.0, min=fix_min, max=fix_max, res=fix_res) yield clk.posedge # random initialization for j in range(embedding_dim): word_emb[j].next = fixbv(random.uniform(0.0, emb_spread), min=fix_min, max=fix_max, res=fix_res) context_emb[j].next = fixbv(random.uniform(0.0, emb_spread), min=fix_min, max=fix_max, res=fix_res) # iterate to converge for i in range(n): yield clk.negedge print "%4s mse: %f, y: %f, word: %s, context: %s" % ( now(), error, y, [float(el.val) for el in word_emb ], [float(el.val) for el in context_emb]) if error == zero: break # transfer new values for j in range(embedding_dim): word_emb[j].next = new_word_emb[j] context_emb[j].next = new_context_emb[j] raise StopSimulation() return clk_gen, stimulus, wcupdated
def test_converge(n=50, emb_spread=0.1, rand_seed=42): """Testing bench for covergence.""" embedding_dim = 3 leaky_val = 0.01 rate_val = 0.1 fix_min = -2**7 fix_max = -fix_min fix_res = 2**-8 fix_width = 1 + 7 + 8 # signals y = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) error = Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) new_word_embv = Signal(intbv(0)[embedding_dim * fix_width:]) new_context_embv = Signal(intbv(0)[embedding_dim * fix_width:]) new_word_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] new_context_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for j in range(embedding_dim) ] for j in range(embedding_dim): new_word_emb[j].assign(new_word_embv((j + 1) * fix_width, j * fix_width)) new_context_emb[j].assign(new_context_embv((j + 1) * fix_width, j * fix_width)) y_actual = Signal(fixbv(1.0, min=fix_min, max=fix_max, res=fix_res)) word_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(embedding_dim) ] word_embv = ConcatSignal(*reversed(word_emb)) context_emb = [ Signal(fixbv(0.0, min=fix_min, max=fix_max, res=fix_res)) for _ in range(embedding_dim) ] context_embv = ConcatSignal(*reversed(context_emb)) clk = Signal(bool(False)) # modules wcupdated = WordContextUpdated(y, error, new_word_embv, new_context_embv, y_actual, word_embv, context_embv, embedding_dim, leaky_val, rate_val, fix_min, fix_max, fix_res) # test stimulus random.seed(rand_seed) HALF_PERIOD = delay(5) @always(HALF_PERIOD) def clk_gen(): clk.next = not clk @instance def stimulus(): zero = fixbv(0.0, min=fix_min, max=fix_max, res=fix_res) yield clk.posedge # random initialization for j in range(embedding_dim): word_emb[j].next = fixbv(random.uniform(0.0, emb_spread), min=fix_min, max=fix_max, res=fix_res) context_emb[j].next = fixbv(random.uniform(0.0, emb_spread), min=fix_min, max=fix_max, res=fix_res) # iterate to converge for i in range(n): yield clk.negedge print "%4s mse: %f, y: %f, word: %s, context: %s" % (now(), error, y, [ float(el.val) for el in word_emb ], [ float(el.val) for el in context_emb ]) if error == zero: break # transfer new values for j in range(embedding_dim): word_emb[j].next = new_word_emb[j] context_emb[j].next = new_context_emb[j] raise StopSimulation() return clk_gen, stimulus, wcupdated
def dot(): y_sum = fixbv(0.0, min=fixd_min, max=fixd_max, res=fixd_res) for j in range(dim): y_sum[:] = y_sum + a_list[j] * b_list[j] y.next = fixbv(y_sum, min=fix_min, max=fix_max, res=fix_res)
def test_math(): x = fixbv(0.5)[16,0] y = fixbv(0.25)[16,0] z = fixbv(0)[16,0] #print(x, y, z) #w = x + y #print(w, type(w)) z[:] = x + y print(z, type(z), x+y) assert float(z) == 0.75 x = fixbv(3.5, min=-8, max=8, res=2**-5) y = fixbv(-5.25, min=-8, max=8, res=2**-5) iW = x._W + y._W print(iW) z = fixbv(0)[iW[:]] z[:] = x + y assert float(z) == -1.75 z[:] = y - x assert float(z) == -8.75 z[:] = x - y assert float(z) == 8.75 x = fixbv(3.141592)[19,4] y = fixbv(1.618033)[19,4] print(float(x), int(x), repr(x)) print(float(y), int(y), repr(y)) iW = x._W * y._W print(iW) z = fixbv(0)[iW[:]] wl,iwl,fwl = z._W[:] print(repr(z), z._max, z._min, z._nrbits, "iwl, fwl", iwl, fwl) z[:] = x * y print(repr(x), repr(y)) print(float(z), int(z), repr(z)) assert float(z) > 5. x = fixbv(3.5, min=-8, max=8, res=2**-5) z = fixbv(0)[(x*x).format] print(x, z) z[:] = x * x assert float(z) == 12.25 z[:] = x**2 assert float(z) == 12.25 z = fixbv(0)[(x*x*x).format] z[:] = x * x * x assert float(z) == 42.875 z[:] = x**3 assert float(z) == 42.875