def test_conv1d(): """ Test a single 1D convolution """ eg_vals = xparams({"n": 10, "k": 3, "p": 1}) s1_ops = [ pl.OpInfo( "MxV", [ RD_a( "[n,k,p] -> { S1[o1] -> in[j] : 0 <= o1 < ((n - k + 2*p) + 1) and o1 <= j < o1 + k }" ), WR_a( "[n,k,p] -> { S1[o1] -> out[j] : 0 <= o1 < ((n - k + 2*p) + 1) and j = o1 }" ), ], ), ] stage1 = pl.Stage(pl.StageInfo(s1_ops), eg_vals) objs_info = { "in": ObjectInfo(shape=(eg_vals.n, ), padding=eg_vals.p), "out": ObjectInfo(shape=eg_vals.eval("(n-k+1,)"), padding=eg_vals.p), } pline = pl.Pipeline([stage1], objs_info, execute_ops=True) conv1_ps = conv.Conv1DParams( i=conv.Conv1DInParams(w=eg_vals["n"], d=1), f=conv.Conv1DFiltParams(w=eg_vals["k"], d=1, l=1), p=1, s=1, p_out=0, ) # Set filters filters1 = np.random.rand(*conv1_ps.get_filters_shape()) filters1_m = filters1.reshape(conv1_ps.eval("(f.l, f.d*f.w)")) cconf = pl.CoreConf(filters1_m) # Set input image1 = np.random.rand(*conv1_ps.get_input_shape()) image1 = np.pad(image1, conv1_ps.get_input_padding()) inp = pline.get_object("in") inp[...] = image1 pline.configure([cconf]) for _ in range(conv1_ps.o.w): pline.tick() out = pline.get_object("out") # Verify results output_simple = conv.conv1d_simple(image1, filters1, conv1_ps) # NB: conv1d_simple considers the depth dimension while our access # relations above do not np.testing.assert_allclose(output_simple[0, :], out)
def get_params(): params = xparams() # IN: input size (w/o padding) # F1: filter size # P1: padding params.update({"IN": 10, "F1": 3, "P1": 1, "S1": 1}) # O1: output 1 size params.compute("O1", "(IN - F1 + 2*P1) // S1 + 1") params.compute("O2", "O1") # params.update({"F2": 3, "P2": 1, "S2": 1}) params.compute("O3", "(O1 - F2 + 2*P2) // S2 + 1") params.compute("OUT", "max(O2,O3)") return params
def test_mxv(): """ Test a single MxV operation """ params = xparams({"n": 128}) s_ops = [ pl.OpInfo( "MxV", [ RD_a("{{ S[i] -> x[j] : i = 0 and 0 <= j < {n} }}".format( **params)), WR_a("{{ S[i] -> y[j] : i = 0 and 0 <= j < {n} }}".format( **params)), ], ) ] stage = pl.Stage(pl.StageInfo(s_ops)) # Objects objs_info = { "x": ObjectInfo(shape=(params.n, )), "y": ObjectInfo(shape=(params.n, )), } # Initialize matrix, and create core configuration # np.random.seed(666) m_shape = params.eval("(n,n)") m = np.random.rand(*m_shape) cconf = pl.CoreConf(m) # Initalize pipeline pline = pl.Pipeline([stage], objs_info, execute_ops=True) x = pline.get_object("x") x[...] = np.random.rand(params.n) # Configure pipeline pline.configure([cconf]) # Execute a single tick and compare results pline.tick() y = pline.get_object("y") assert np.array_equal(y, np.matmul(m, x))
def test_conv1d_conv1d(): # TODO: enable execute_ops = True, and compare results # A 1D-convolution with one layer (simplest case) # # For N=12, K=3, zero padding, the code looks simething like this: # # Stage s1: # for o1 ← range(0, 10) { # in2[o1,:] ← MXV(in1[o1:(o1 + 3),:]) # } # Stage s2: # for o2 ← range(0, 8) { # out2[o2,:] ← MXV(in2[o2:(o2 + 3),:]) # } # # Example values # N: in1 size # K: kernel size # P: padding eg_vals = xparams({"n": 10, "k": 3, "p": 1}) s1_ops = [ pl.OpInfo( "MxV", [ RD_a( "[n,k,p] -> { S1[o1] -> in1[j] : 0 <= o1 < ((n - k + 2*p) + 1) and o1 <= j < o1 + k }" ), WR_a( "[n,k,p] -> { S1[o1] -> in2[j] : 0 <= o1 < ((n - k + 2*p) + 1) and j = o1 + p}" ), ], ), ] stage1 = pl.Stage(pl.StageInfo(s1_ops), eg_vals) s2_ops = [ pl.OpInfo( "MxV", [ RD_a( "[n,k,p] -> { S2[o2] -> in2[j] : 0 <= o2 < (n-k+2*p) and o2 <= j < o2 + k }" ), ], ), ] stage2 = pl.Stage(pl.StageInfo(s2_ops), eg_vals) objs_info = { "in1": ObjectInfo(shape=(eg_vals.n, ), padding=eg_vals.p), "in2": ObjectInfo(shape=(eg_vals.eval("n-k+2*p+1"), ), padding=eg_vals.p), } pprint(objs_info) pline = pl.Pipeline([stage1, stage2], objs_info) for i in range(13): pline.tick()