def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # same parameters as CIFAR's first convolution self.conv_ps = conv.Conv2DParams( i=conv.Conv2DInParams(w=32, h=32, d=3), f=conv.Conv2DFiltParams(w=3, h=3, d=3, l=16), p=1, s=1, p_out=0, )
def test_conv2d(): conv1_ps = conv.Conv2DParams( i=conv.Conv2DInParams(w=32, h=32, d=3), f=conv.Conv2DFiltParams(w=3, h=3, d=3, l=16), p=1, s=1, p_out=0, ) s1_ops = [OpInfo_CONV(conv1_ps, s_id="S1", vin_id="V1", vout_id="V2")] stage1 = pl.Stage(pl.StageInfo(s1_ops)) objs_info = { "V1": conv1_ps.get_input_objectinfo(), "V2": conv1_ps.get_output_objectinfo(), } p = pl.Pipeline([stage1], objs_info, execute_ops=True) # Set filters filters1 = np.random.rand(*conv1_ps.get_filters_shape()) filters_m = filters1.reshape(conv1_ps.eval("(f.l, f.d*f.h*f.w)")) cconf = pl.CoreConf(filters_m) # Set input image1 = np.random.rand(*conv1_ps.get_input_shape()) image1 = np.pad(image1, conv1_ps.get_input_padding()) vals1 = p.get_object("V1") vals1[...] = image1 # Configure pipeline p.configure([cconf]) # Execute piepline for _ in range(conv1_ps.o.h * conv1_ps.o.w): p.tick() vals2 = p.get_object("V2") # Verify results output_simple = conv.conv2d_simple(image1, filters1, conv1_ps) output_mxv = conv.conv2d_mxv(image1, filters1, conv1_ps) np.testing.assert_allclose(output_simple, output_mxv) np.testing.assert_array_equal(output_mxv, vals2)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # same parameters as CIFAR's convolutions conv1_padding = 1 conv2_padding = 1 conv1_ps = self.conv1_ps = conv.Conv2DParams( i=conv.Conv2DInParams(w=32, h=32, d=3), f=conv.Conv2DFiltParams(w=3, h=3, d=3, l=1), p=conv1_padding, p_out=conv2_padding, s=1, ) self.conv2_ps = conv.Conv2DParams( i=conv1_ps.o.to_in(), f=conv.Conv2DFiltParams(w=3, h=3, d=conv1_ps.f.l, l=1), p=conv2_padding, p_out=0, s=1, )
def test_conv2d_conv2d(): conv1_padding = 1 conv2_padding = 1 conv1_ps = conv.Conv2DParams( i=conv.Conv2DInParams(w=32, h=32, d=3), f=conv.Conv2DFiltParams(w=3, h=3, d=3, l=1), p=conv1_padding, p_out=conv2_padding, s=1, ) conv2_ps = conv.Conv2DParams( i=conv1_ps.o.to_in(), f=conv.Conv2DFiltParams(w=3, h=3, d=conv1_ps.f.l, l=1), p=conv2_padding, p_out=0, s=1, ) s1_ops = [ OpInfo_CONV(conv1_ps, s_id="S1", vin_id="V1", vout_id="V2"), ] stage1 = pl.Stage(pl.StageInfo(s1_ops)) s2_ops = [ OpInfo_CONV(conv2_ps, s_id="S2", vin_id="V2", vout_id="V3"), ] stage2 = pl.Stage(pl.StageInfo(s2_ops)) objs_info = { "V1": conv1_ps.get_input_objectinfo(), "V2": conv2_ps.get_input_objectinfo(), "V3": conv2_ps.get_output_objectinfo(), } p = pl.Pipeline([stage1, stage2], objs_info, execute_ops=True) filters1 = np.random.rand(*conv1_ps.get_filters_shape()) filters_m1 = filters1.reshape(conv1_ps.eval("(f.l, f.d*f.h*f.w)")) cconf1 = pl.CoreConf(filters_m1) filters2 = np.random.rand(*conv2_ps.get_filters_shape()) filters_m2 = filters2.reshape(conv2_ps.eval("(f.l, f.d*f.h*f.w)")) cconf2 = pl.CoreConf(filters_m2) image = np.random.rand(*conv1_ps.get_input_shape()) image = np.pad(image, conv1_ps.get_input_padding()) p.configure([cconf1, cconf2]) vals1 = p.get_object("V1") print("vals1.shape=%s image.shape=%s" % (vals1.shape, image.shape)) pprint(objs_info) vals1[...] = image while True: iters = p.tick() print("*" * 80) for (s, i) in iters.items(): print("%s: %s" % (s, i)) print("*" * 80) # input() if iters["S2"] == (0, conv2_ps.o.h - 1, conv2_ps.o.w - 1): break vals3 = p.get_object("V3") pprint(vals3.shape) output1 = conv.conv2d_simple(image, filters1, conv1_ps) output1 = np.pad(output1, conv2_ps.get_input_padding()) output2 = conv.conv2d_simple(output1, filters2, conv2_ps) np.testing.assert_allclose(output2, vals3) print("DONE!")
def test_onnx_residual_2d(): # Create the following ONNX graph # (this is what onnx_mk_simple_residual does) # # CONV2D ---> CONV2D ---> ADD # | ^ # | | # +--------------- + # # CONV2D # input: in # output: v1 # weights: w1 # CONV2D # input: v1 # output: v2 # weights: w2 # ADD # input: v1,v2 # output: out conv1_padding = 1 conv2_padding = 1 conv1_ps = conv.Conv2DParams( i=conv.Conv2DInParams(w=32, h=32, d=3), f=conv.Conv2DFiltParams(w=3, h=3, d=3, l=1), p=conv1_padding, p_out=conv2_padding, s=1, ) conv2_ps = conv.Conv2DParams( i=conv1_ps.o.to_in(), f=conv.Conv2DFiltParams(w=3, h=3, d=conv1_ps.f.l, l=1), p=conv2_padding, p_out=0, s=1, ) # create simple model with residual path onnx_m = onnx_mk_simple_residual(conv1_ps, conv2_ps) # create random input inp = onnx_rand_input(onnx_m) # Execute using onnxruntime onnx.save(onnx_m, "simple_residual_2d.onnx") sess = onnxrt.InferenceSession("simple_residual_2d.onnx") out = sess.run(None, inp) # Parse onnx graph, and create a pipeline graph = OnnxGraph(onnx_m) pprint(graph.partitions) pline = graph.get_pipeline() # set inputs for (inp_name, inp_data) in inp.items(): obj_info = graph.objs_info[inp_name] assert inp_data.shape == (1, ) + obj_info.shape # NB: batching # data = np.random.rand(*obj_info.shape) data = inp_data[0] data = np.pad(data, obj_info.padding) obj = pline.get_object(inp_name) obj[...] = data # Execute the pipeline print_info = False for iters in pline.tick_gen(): if print_info: print("*" * 80) for (s, i) in iters.items(): if print_info: print("%s: %s" % (s, i)) if print_info: print("*" * 80) print("%s> DONE" % ("-" * 30, )) # Get pipeline results pline_out = pline.get_object("out") pline_v1 = pline.get_object("v1") pline_v2 = pline.get_object("v2") # Execute using manual ops in_m = np.pad(inp["in"][0], graph.objs_info["in"].padding) w1_m = np.array(graph.init_tvs["w1"].float_data).reshape( conv1_ps.get_filters_shape()) v1_m = conv.conv2d_simple(in_m, w1_m, conv1_ps) v1_m = np.pad(v1_m, graph.objs_info["v1"].padding) np.testing.assert_allclose(v1_m, pline_v1, err_msg="pipeline v1 does not match manual v1") w2_m = np.array(graph.init_tvs["w2"].float_data).reshape( conv2_ps.get_filters_shape()) v2_m = conv.conv2d_simple(v1_m, w2_m, conv2_ps) v2_m = np.pad(v2_m, graph.objs_info["v2"].padding) np.testing.assert_allclose(v2_m, pline_v2, err_msg="pipeline v2 does not match manual v2") np.testing.assert_allclose(out[0][0, :], pline_out, err_msg="OUT does not match", rtol=1e-06) return graph
def onnx_conv_get_params(graph: onnx.GraphProto, node): """ Create a Conv2DParams structure from an ONNX Conv node """ if node.op_type != "Conv": raise TypeError("Expecting type 'Conv', but got type:'%s'" ( node.op_type, )) attrs = dict((x.name, x) for x in node.attribute) # Padding: for now, fail if there are different padding for different # dimensions pads = attrs["pads"].ints p = pads[0] if not all([p == x for x in pads[1:]]): raise NotImplementedError("pads: %s not supported" % (pads, )) # filter size is a bit tricky. # One option might be kernel_shape, but this does not include the full # information (e.g., it can be 3x3 which does not include the number # of layers. Instead, we use the weights to defer this information. # Input is not in the initializer data, while weights are init_names = set(x.name for x in graph.initializer) (input_name, ) = (x for x in node.input if x not in init_names) (weights_name, ) = (x for x in node.input if x in init_names) # Try to find the input in the inputs or value info part of the graph for vi in chain(graph.value_info, graph.input): if vi.name == input_name: inp = vi break else: raise AssertionError("Did not find input. Bailing out") # Try to find the weights in the initializer part of the graph for vi in graph.initializer: if vi.name == weights_name: weights = vi break else: raise AssertionError( "Did not find weights in initalizer data. Bailing out") # https://github.com/onnx/onnx/blob/master/docs/Operators.md#Conv: # The weight tensor that will be used in the convolutions; has size (M x # C/group x kH x kW), where C is the number of channels, and kH and kW # are the height and width of the kernel, and M is the number of feature # maps. For more than 2 dimensions, the kernel shape will be (M x # C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the # dimension of the kernel. (...) f = conv.Conv2DFiltParams( w=weights.dims[-1], h=weights.dims[-2], d=weights.dims[-3], l=weights.dims[-4], ) # https://github.com/onnx/onnx/blob/master/docs/Operators.md#Conv: # Input data tensor from previous layer; has size (N x C x H x W), where # N is the batch size, C is the number of channels, and H and W are the # height and width. Note that this is for the 2D image. Otherwise the # size is (N x C x D1 x D2 ... x Dn). Optionally, if dimension # denotation is in effect, the operation expects input data tensor to # arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, # DATA_FEATURE, DATA_FEATURE ...]. # NB: We ignore the batch size i = conv.Conv2DInParams( w=inp.type.tensor_type.shape.dim[-1].dim_value, h=inp.type.tensor_type.shape.dim[-2].dim_value, d=inp.type.tensor_type.shape.dim[-3].dim_value, ) conv_ps = conv.Conv2DParams( i=i, f=f, p=p, # TODO: deal with strides s=1, # p_out has to be set later p_out=None, ) # print("%s" % (conv_ps,)) return conv_ps