def OpInfo_ADD( conv_domain: isl.Map, shape: typing.Tuple[int, ...], in1_id: str, in2_id: str, out_id: str, ) -> OpInfo: """ OpInfo for an ADD operation """ (b, d, h, w) = shape assert b == 1 # Batch is expected to be 1 if True: # Reconstruct the domain from shape and verify that everyting is in order tn = conv_domain.get_tuple_name() xdom = isl_set_from_shape(tn, ["oh", "ow"], (h, w)) # NB: This assertion might eventually fail if we introduce striding or # other complications. I just leave it as a sanity check for now. assert xdom == conv_domain accesses = [] for (obj_id, mk_acc) in ((in1_id, RD_a), (in2_id, RD_a), (out_id, WR_a)): # compute range obj_vs = ["%s_%s" % (obj_id, x) for x in ("d", "h", "w")] rng = isl_set_from_names(obj_id, obj_vs) rel = isl.Map.from_domain_and_range(conv_domain, rng) # w,h dimensions eqs = [ { obj_vs[1]: 1, "oh": -1 }, { obj_vs[2]: 1, "ow": -1 }, ] for eq in eqs: con_eq = isl.Constraint.eq_from_names(rel.space, eq) rel = rel.add_constraint(con_eq) # d dimension ineqs = [ { 1: d - 1, obj_vs[0]: -1 }, { 1: 0, obj_vs[0]: 1 }, ] for ineq in ineqs: con_ineq = isl.Constraint.ineq_from_names(rel.space, ineq) rel = rel.add_constraint(con_ineq) accesses.append(mk_acc(rel)) return pl.OpInfo("ADD", accesses)
def test_conv1d(): """ Test a single 1D convolution """ eg_vals = xparams({"n": 10, "k": 3, "p": 1}) s1_ops = [ pl.OpInfo( "MxV", [ RD_a( "[n,k,p] -> { S1[o1] -> in[j] : 0 <= o1 < ((n - k + 2*p) + 1) and o1 <= j < o1 + k }" ), WR_a( "[n,k,p] -> { S1[o1] -> out[j] : 0 <= o1 < ((n - k + 2*p) + 1) and j = o1 }" ), ], ), ] stage1 = pl.Stage(pl.StageInfo(s1_ops), eg_vals) objs_info = { "in": ObjectInfo(shape=(eg_vals.n, ), padding=eg_vals.p), "out": ObjectInfo(shape=eg_vals.eval("(n-k+1,)"), padding=eg_vals.p), } pline = pl.Pipeline([stage1], objs_info, execute_ops=True) conv1_ps = conv.Conv1DParams( i=conv.Conv1DInParams(w=eg_vals["n"], d=1), f=conv.Conv1DFiltParams(w=eg_vals["k"], d=1, l=1), p=1, s=1, p_out=0, ) # Set filters filters1 = np.random.rand(*conv1_ps.get_filters_shape()) filters1_m = filters1.reshape(conv1_ps.eval("(f.l, f.d*f.w)")) cconf = pl.CoreConf(filters1_m) # Set input image1 = np.random.rand(*conv1_ps.get_input_shape()) image1 = np.pad(image1, conv1_ps.get_input_padding()) inp = pline.get_object("in") inp[...] = image1 pline.configure([cconf]) for _ in range(conv1_ps.o.w): pline.tick() out = pline.get_object("out") # Verify results output_simple = conv.conv1d_simple(image1, filters1, conv1_ps) # NB: conv1d_simple considers the depth dimension while our access # relations above do not np.testing.assert_allclose(output_simple[0, :], out)
def OpInfo_ID(shape: typing.Tuple[int, ...], s_id: str, inp_id: str, out_id: str) -> OpInfo: """ Identity operation """ idx_names = ["%s_i%d" % (s_id, i) for (i, _) in enumerate(shape)] # Create domain for relations xdom = isl_set_from_shape(s_id, idx_names, shape) accesses = [] for (obj_id, mk_acc) in ((inp_id, RD_a), (out_id, WR_a)): obj_names = ["%s_i%d" % (obj_id, i) for (i, _) in enumerate(shape)] rng = isl_set_from_names(obj_id, obj_names) rel = isl.Map.from_domain_and_range(xdom, rng) for (idx_n, obj_n) in zip(idx_names, obj_names): rel = rel.add_constraint( isl.Constraint.eq_from_names(rel.space, { idx_n: 1, obj_n: -1 })) accesses.append(mk_acc(rel)) return pl.OpInfo("ID", accesses)
def test_mxv(): """ Test a single MxV operation """ params = xparams({"n": 128}) s_ops = [ pl.OpInfo( "MxV", [ RD_a("{{ S[i] -> x[j] : i = 0 and 0 <= j < {n} }}".format( **params)), WR_a("{{ S[i] -> y[j] : i = 0 and 0 <= j < {n} }}".format( **params)), ], ) ] stage = pl.Stage(pl.StageInfo(s_ops)) # Objects objs_info = { "x": ObjectInfo(shape=(params.n, )), "y": ObjectInfo(shape=(params.n, )), } # Initialize matrix, and create core configuration # np.random.seed(666) m_shape = params.eval("(n,n)") m = np.random.rand(*m_shape) cconf = pl.CoreConf(m) # Initalize pipeline pline = pl.Pipeline([stage], objs_info, execute_ops=True) x = pline.get_object("x") x[...] = np.random.rand(params.n) # Configure pipeline pline.configure([cconf]) # Execute a single tick and compare results pline.tick() y = pline.get_object("y") assert np.array_equal(y, np.matmul(m, x))
def OpInfo_CONV(conv_ps: conv.Conv2DParams, s_id: str, vin_id: str, vout_id: str) -> OpInfo: """ OpInfo for a CONV operation """ rd_a = ("{{ {SID}[oh,ow] -> {VID}[id,ih,iw] " ": 0 <= oh < {OH} " "and 0 <= ow < {OW} " "and 0 <= id < {ID} " "and oh <= ih < oh + {FH} " "and ow <= iw < ow + {FW} " "}}".format( ID=conv_ps.i.d, OH=conv_ps.o.h, OW=conv_ps.o.w, FH=conv_ps.f.h, FW=conv_ps.f.w, SID=s_id, VID=vin_id, )) wr_a = ("{{ {SID}[oh,ow] -> {VID}[ik,ih,iw] " ": 0 <= oh < {OH} " "and 0 <= ow < {OW} " "and 0 <= ik < {FL} " "and ih = oh + {P} " "and iw = ow + {P} " "}}".format( OH=conv_ps.o.h, OW=conv_ps.o.w, FL=conv_ps.f.l, P=conv_ps.p_out, SID=s_id, VID=vout_id, )) return pl.OpInfo("MxV", [RD_a(rd_a), WR_a(wr_a)])
def test_residual_1d(): # CONV1D ---> CONV1D ---> ADD # | ^ # | | # +---------- + # # Stage S1: # - MxV (CONV1D) # - PARAMS: P1, F1 # - INPUT: IN # - OUTPUT: O1 # # Stage S2: # - MxV (CONV1D) # - PARAMS: P2, F2 # - INPUT: O1 # - OUTPUT: O3 (internal) # - ADD: # - INPUT: O1, O3 (internal) # - OUTPUT: OUT # # cross-stage Objects: # IN: WRITER: NONE, READER: S1/MxV # O1: WRITER: S1/MxV, READER: S2/MxV # OUT: WRITER: S2/ADD, READER: NONE # # Objects have a single writer and reader # Stages might read or write more than one objects params = get_params() s1_ops = [ pl.OpInfo( "MxV", [ RD_a( "{{ S1[s1] -> IN[i1] : 0 <= s1 < {O1} and s1 <= i1 < s1 + {F1} }}" .format(**params)), WR_a( "{{ S1[s1] -> O1[o1] : 0 <= s1 < {O1} and o1 = s1 + {P2} }}" .format(**params)), ], ) ] s2_ops = [ pl.OpInfo( "MxV", [ RD_a( "{{ S2[s2] -> O1[o1] : 0 <= s2 < {O3} and s2 <= o1 < s2 + {F2}}}" .format(**params)), WR_a("{{ S2[s2] -> O3[o3] : 0 <= s2 < {O3} and o3 = s2 }}". format(**params)), ], ), pl.OpInfo( "ADD", [ RD_a("{{ S2[s2] -> O1[o1] : 0 <= s2 < {O3} and o1 = s2 }}". format(**params)), RD_a("{{ S2[s2] -> O3[o3] : 0 <= s2 < {O3} and o3 = s2 }}". format(**params)), WR_a("{{ S2[s2] -> OUT[out] : 0 <= s2 < {O3} and out = s2 }}". format(**params)), ], ), ] s2 = pl.Stage(pl.StageInfo(s2_ops)) assert s2.si.ro_objs == set(("O1", )) assert s2.si.wo_objs == set(("OUT", )) assert s2.si.rw_objs == set(("O3", )) s1 = pl.Stage(pl.StageInfo(s1_ops)) assert s1.si.ro_objs == set(("IN", )) assert s1.si.wo_objs == set(("O1", )) assert s1.si.rw_objs == set() conv1_ps = conv.Conv1DParams( i=conv.Conv1DInParams(w=params.IN, d=1), f=conv.Conv1DFiltParams(w=params.F1, d=1, l=1), p=params.P1, s=params.S1, p_out=params.P2, ) conv2_ps = conv.Conv1DParams( i=conv1_ps.o.to_in(), f=conv.Conv1DFiltParams(w=params.F2, d=1, l=1), p=params.P2, s=params.S2, p_out=0, ) objs_info = { # 'IN': (params.eval("IN + 2*P1"), ), # 'O1': (params.eval("O1 + 2*P2"), ), # 'O3': (params.O3, ), # 'OUT': (params.OUT,), "IN": ObjectInfo(shape=(params.IN, ), padding=params.P1), "O1": ObjectInfo(shape=(params.O1, ), padding=params.P2), "O3": ObjectInfo(shape=(params.O3, ), padding=0), "OUT": ObjectInfo(shape=(params.OUT, ), padding=0), } pprint(objs_info) pline = pl.Pipeline([s1, s2], objs_info, execute_ops=True, loop_inp_limit=1) pprint(params) filters1 = np.random.rand(*conv1_ps.get_filters_shape()) filters1_m = filters1.reshape(conv1_ps.eval("(f.l, f.d*f.w)")) cconf1 = pl.CoreConf(filters1_m) filters2 = np.random.rand(*conv2_ps.get_filters_shape()) filters2_m = filters2.reshape(conv2_ps.eval("(f.l, f.d*f.w)")) cconf2 = pl.CoreConf(filters2_m) image = np.random.rand(*conv1_ps.get_input_shape()) image = np.pad(image, conv1_ps.get_input_padding()) inp = pline.get_object("IN") inp[...] = image pline.configure([cconf1, cconf2]) print_info = False for iters in pline.tick_gen(): if print_info: print("*" * 80) for (s, i) in iters.items(): if print_info: print("%s: %s" % (s, i)) if print_info: print("*" * 80) print("%s> DONE" % ("-" * 30, )) pline_out = pline.get_object("OUT") pline_o1 = pline.get_object("O1") pline_o3 = pline.get_object("O3") o1 = conv.conv1d_simple(image, filters1, conv1_ps) o2 = np.copy(o1) o1 = np.pad(o1, conv2_ps.get_input_padding()) np.testing.assert_allclose(o1[0, :], pline_o1, err_msg="O1 does not match") o3 = conv.conv1d_simple(o1, filters2, conv2_ps) out = o3 + o2 np.testing.assert_allclose(o3[0, :], pline_o3, err_msg="O3 does not match") np.testing.assert_allclose(out[0, :], pline_out, err_msg="OUT does not match")
def test_conv1d_conv1d(): # TODO: enable execute_ops = True, and compare results # A 1D-convolution with one layer (simplest case) # # For N=12, K=3, zero padding, the code looks simething like this: # # Stage s1: # for o1 ← range(0, 10) { # in2[o1,:] ← MXV(in1[o1:(o1 + 3),:]) # } # Stage s2: # for o2 ← range(0, 8) { # out2[o2,:] ← MXV(in2[o2:(o2 + 3),:]) # } # # Example values # N: in1 size # K: kernel size # P: padding eg_vals = xparams({"n": 10, "k": 3, "p": 1}) s1_ops = [ pl.OpInfo( "MxV", [ RD_a( "[n,k,p] -> { S1[o1] -> in1[j] : 0 <= o1 < ((n - k + 2*p) + 1) and o1 <= j < o1 + k }" ), WR_a( "[n,k,p] -> { S1[o1] -> in2[j] : 0 <= o1 < ((n - k + 2*p) + 1) and j = o1 + p}" ), ], ), ] stage1 = pl.Stage(pl.StageInfo(s1_ops), eg_vals) s2_ops = [ pl.OpInfo( "MxV", [ RD_a( "[n,k,p] -> { S2[o2] -> in2[j] : 0 <= o2 < (n-k+2*p) and o2 <= j < o2 + k }" ), ], ), ] stage2 = pl.Stage(pl.StageInfo(s2_ops), eg_vals) objs_info = { "in1": ObjectInfo(shape=(eg_vals.n, ), padding=eg_vals.p), "in2": ObjectInfo(shape=(eg_vals.eval("n-k+2*p+1"), ), padding=eg_vals.p), } pprint(objs_info) pline = pl.Pipeline([stage1, stage2], objs_info) for i in range(13): pline.tick()