def test_generate_output_stripe_configs_disable_striping(stripe_factors): subgraph = cs.TESubgraph([], None) part_1 = cs.InlinePart( subgraph, [ cs.Propagator( [[2, 0, 0], [0, 2, 0], [0, 0, 1]], [0, 0], ), ], ) tensor_1 = cs.Tensor([800, 800], "uint8") tensor_2 = cs.Tensor([400, 400], "uint8") part_1.set_input(0, tensor_1) part_1.set_output(tensor_2) tensor_1.add_consumer(part_1) tensor_2.add_producer(part_1) assert ( len( _generate_output_stripe_configs( part_1, stripe_factors, enable_striping=False, multi_dimensional=False ) ) == 1 )
def test_generate_output_stripe_configs_single_dimension(): stripe_factors = 3 subgraph = cs.TESubgraph([], None) part_1 = cs.InlinePart( subgraph, [ cs.Propagator( [[2, 0, 0], [0, 2, 0], [0, 0, 1]], [0, 0], ), ], ) tensor_1 = cs.Tensor([800, 800], "uint8") tensor_2 = cs.Tensor([400, 400], "uint8") part_1.set_input(0, tensor_1) part_1.set_output(tensor_2) tensor_1.add_consumer(part_1) tensor_2.add_producer(part_1) expected_stripe_configs = { cs.StripeConfig([400, 1], [400, 400], [400, 1], [2, 1], [1, 400], [0, 0]), cs.StripeConfig([400, 200], [400, 400], [400, 200], [2, 1], [1, 2], [0, 0]), cs.StripeConfig([1, 400], [400, 400], [1, 400], [1, 2], [400, 1], [0, 0]), cs.StripeConfig([200, 400], [400, 400], [200, 400], [1, 2], [2, 1], [0, 0]), cs.StripeConfig([400, 400], [400, 400], [400, 400], [1, 2], [1, 1], [0, 0]), } output_stripe_configs = _generate_output_stripe_configs( part=part_1, stripe_factors=stripe_factors, enable_striping=True, multi_dimensional=False ) assert len(output_stripe_configs) == len(expected_stripe_configs) assert set(output_stripe_configs) == expected_stripe_configs
def test_small_graph(): subgraph = cs.TESubgraph([], None) part_a = cs.InlinePart( subgraph, [ cs.Propagator( [[1, 0, 0], [0, 1, 0], [0, 0, 1]], [0, 0], ), cs.Propagator( [[0, 1, 0], [1, 0, 0], [0, 0, 1]], [-1, -1], ), ], ) part_b = cs.InlinePart( subgraph, [ cs.Propagator( [[1, 0, 0], [0, 1, 0], [0, 0, 1]], [0, 0], ), ], ) tensor_1 = cs.Tensor([10, 10], "uint8") tensor_2 = cs.Tensor([9, 9], "uint8") tensor_3 = cs.Tensor([10, 10], "uint8") tensor_4 = cs.Tensor([10, 10], "uint8") part_a.set_input(0, tensor_1) part_a.set_input(1, tensor_2) part_a.set_output(tensor_3) tensor_1.add_consumer(part_a) tensor_2.add_consumer(part_a) tensor_3.add_producer(part_a) part_b.set_input(0, tensor_3) part_b.set_output(tensor_4) tensor_3.add_consumer(part_b) tensor_4.add_producer(part_b) assert part_a.input_tensors == [tensor_1, tensor_2] assert part_a.output_tensor == tensor_3 assert part_b.input_tensors == [tensor_3] assert part_b.output_tensor == tensor_4 assert tensor_1.producers == [] assert tensor_1.consumers == [part_a] assert tensor_2.producers == [] assert tensor_2.consumers == [part_a] assert tensor_3.producers == [part_a] assert tensor_3.consumers == [part_b] assert tensor_4.producers == [part_b] assert tensor_4.consumers == [] graph = cs.CascaderGraph([tensor_1, tensor_2], [tensor_4]) assert graph.input_tensors == [tensor_1, tensor_2] assert graph.output_tensors == [tensor_4] assert graph.part_order == [part_b, part_a] for i, part in enumerate(graph.part_order): assert graph.get_part_id(part) == i
def test_ethosu_part(): te_subgraph = cs.TESubgraph([], None) output_quantum = [1, 2, 2, 8] propagator = cs.Propagator( [[1, 0, 0, 0, 2], [0, 1, 0, 0, 2], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]], [0, 0, 0, 0], ) stripe_config = cs.StripeConfig([1, 4, 4, 16], [1, 64, 72, 96], [1, 4, 4, 16], [1, 2, 3, 4], [1, 16, 13, 6], [0, 0, 0, 0]) subkernels = 3 valid_block_configs = [ cs.BlockConfig([1, 2, 4, 16], [1, 2, 4, 16], 15000, 7500) ] part = EthosuPart( te_subgraph, [propagator], output_quantum, subkernels, valid_block_configs, 1, ) input_tensor = cs.Tensor(shape=[1, 66, 74, 16], dtype="int8") part.set_input(0, input_tensor) output_tensor = cs.Tensor(shape=[1, 66, 74, 16], dtype="int8") part.set_output(output_tensor) assert part.get_stripe_align_hint() == output_quantum # Check that the performance model runs, don't verify output part.get_performance_info(stripe_config, BufferMode.ROLLING) part.get_performance_info(stripe_config, BufferMode.RECOMPUTE)
def test_generate_single_plans(SRAM, DRAM): subgraph = cs.TESubgraph([], None) part_1 = cs.InlinePart( subgraph, [ cs.Propagator( [[2, 0, 0], [0, 2, 0], [0, 0, 1]], [0, 0], ), ], ) tensor_1 = cs.Tensor([800, 800], "int8") tensor_2 = cs.Tensor([400, 400], "int8") part_1.set_input(0, tensor_1) part_1.set_output(tensor_2) tensor_1.add_consumer(part_1) tensor_2.add_producer(part_1) home_map = { tensor_1: [SRAM, DRAM], tensor_2: [SRAM], } options = make_options(cascade_region=SRAM, stripe_factors=1) output_stripe_configs = _generate_output_stripe_configs( part_1, options.stripe_factors) plans = _generate_single_plans(part_1, output_stripe_configs, home_map, options) for plan in plans: assert plan.interior_region == SRAM assert plan.part_group == frozenset([part_1]) assert set(plan.tensor_configs.keys()) == set([tensor_1, tensor_2]) for open_config in plan.open_configs: assert open_config.state == cs.TensorConfigState.INTERIOR
def test_generate_graph_plans(SRAM, DRAM): num_part_groups = 3 stripe_factors = 4 max_plan_size = 10 subgraph = cs.TESubgraph([], None) part_a = cs.InlinePart( subgraph, [ cs.Propagator( [[1, 0, 0], [0, 1, 0], [0, 0, 1]], [0, 0], ), cs.Propagator( [[0, 1, 0], [1, 0, 0], [0, 0, 1]], [-1, -1], ), ], ) part_b = cs.InlinePart( subgraph, [ cs.Propagator( [[1, 0, 0], [0, 1, 0], [0, 0, 1]], [0, 0], ), ], ) tensor_1 = cs.Tensor([10, 10], "int8") tensor_2 = cs.Tensor([9, 9], "int8") tensor_3 = cs.Tensor([10, 10], "int8") tensor_4 = cs.Tensor([10, 10], "int8") part_a.set_input(0, tensor_1) part_a.set_input(1, tensor_2) part_a.set_output(tensor_3) tensor_1.add_consumer(part_a) tensor_2.add_consumer(part_a) tensor_3.add_producer(part_a) part_b.set_input(0, tensor_3) part_b.set_output(tensor_4) tensor_3.add_consumer(part_b) tensor_4.add_producer(part_b) graph = cs.CascaderGraph([tensor_1, tensor_2], [tensor_4]) home_map = { tensor_1: [SRAM, DRAM], tensor_2: [SRAM], tensor_3: [SRAM], tensor_4: [SRAM, DRAM], } options = make_options( cascade_region=SRAM, stripe_factors=stripe_factors, max_plan_size=max_plan_size, ) closed_plans = _generate_graph_plans(graph, home_map, options) assert len(closed_plans) == num_part_groups
def test_tensor(): shape = [1, 2, 3] dtype = "uint8" is_constant = True compression_ratio = 0.5 size = 6 tensor = cs.Tensor(shape, dtype, is_constant, compression_ratio) assert tensor.shape == shape assert tensor.dtype == dtype assert tensor.is_constant == is_constant assert tensor.compression_ratio == compression_ratio assert tensor.size == size
def test_generate_output_stripe_configs(): stripe_factors = 3 expected_configs = 13 subgraph = cs.TESubgraph([], None) part_1 = cs.InlinePart( subgraph, [ cs.Propagator( [[2, 0, 0], [0, 2, 0], [0, 0, 1]], [0, 0], ), ], ) tensor_1 = cs.Tensor([800, 800], "uint8") tensor_2 = cs.Tensor([400, 400], "uint8") part_1.set_input(0, tensor_1) part_1.set_output(tensor_2) tensor_1.add_consumer(part_1) tensor_2.add_producer(part_1) assert len(_generate_output_stripe_configs( part_1, stripe_factors)) == expected_configs
def test_conv_performance( accelerator, op_type, activation, kernel, stride, dilation, padding, in_shape, out_shape, block_shape, input_block_shape, expected, ): ifm_channels = in_shape[3] ifm_matrix, ifm_offset, weight_matrix, weight_offset, _, _ = make_matrices( op_type, kernel, stride, padding, "NHWC", "NHWC", dilation, ifm_channels, ) propagator = cs.Propagator(ifm_matrix, ifm_offset) weight_propagator = cs.Propagator(weight_matrix, weight_offset) subkernels = ((kernel[0] + 7) // 8) * ((kernel[1] + 7) // 8) device_config = cs.EthosuDeviceConfig(accelerator) output_cycles = device_config._get_output_cycles(op_type, "", "int8", "int8", activation) output_cycles *= reduce(lambda a, b: a * b, block_shape, 1) is_partkernel = device_config.is_partkernel( op_type, ifm_channels, "int8", kernel[0] * kernel[1] ) compute_cycles = device_config._estimate_compute_cycles_per_block( op_type, _Shape(block_shape), _Shape(input_block_shape), kernel[0], kernel[1], ifm_channels, "int8", is_partkernel, ) block_configs = [ cs.BlockConfig(input_block_shape, block_shape, compute_cycles, int(output_cycles)) ] output_quantum = [1, 1, 2, 8] te_subgraph = cs.TESubgraph([], None) part = cs.EthosuPart( te_subgraph, [propagator, weight_propagator], output_quantum, subkernels, block_configs, 1, ) part.set_input(0, cs.Tensor(in_shape, "int8")) part.set_input(1, cs.Tensor([ifm_channels, kernel[0], kernel[1], out_shape[-1]], "int8")) part.set_output(cs.Tensor(out_shape, "int8")) stripes = [1] * len(output_quantum) offset = [0] * len(output_quantum) order = [1, 2, 3, 4] stripe_config = cs.StripeConfig(out_shape, out_shape, out_shape, order, stripes, offset) compute_cycles = part.get_performance_info(stripe_config, cs.BufferMode.ROLLING).compute_cycles tolerance = expected * 0.1 assert expected - tolerance <= compute_cycles <= expected + tolerance
def test_ethosu_conv2d_block_config_from_matcher(ifm_layout, ofm_layout, ifm_channels, expected_cycles): ofm_channels = 10 ifm_height = 123 ifm_width = 155 ifm_shape = ((1, ifm_height, ifm_width, ifm_channels) if ifm_layout == "NHWC" else (1, ifm_height, 1 + ((ifm_channels - 1) // 16), ifm_width, 16)) weight_shape = (ofm_channels, 3, 3, ifm_channels) scale_bias_shape = (ofm_channels, 10) ifm = te.placeholder(ifm_shape, dtype="int8") weight = te.placeholder(weight_shape, dtype="int8") scale_bias = te.placeholder(scale_bias_shape, dtype="uint8") lut = te.placeholder((), dtype="uint8") out = conv2d_compute( ifm=ifm, weight=weight, scale_bias=scale_bias, lut=lut, ifm_scale=1, ifm_zero_point=0, ofm_scale=1, ofm_zero_point=0, weight_zero_point=0, strides=(1, 1), padding=(0, 0, 0, 0), dilation=(1, 1), activation="NONE", clip_min=0, clip_max=0, upscale="NONE", rounding_mode="TFL", ifm_layout=ifm_layout, ofm_layout=ofm_layout, ) device_config = cs.EthosuDeviceConfig("ethos-u55-256") part = match_ethosu_conv2d(out, device_config) ofm_shape = [int(i) for i in part.subgraph.output_tensor.shape] # Add inputs and outputs to the part input_tensor = cs.Tensor(ifm_shape, "int8") part.set_input(0, input_tensor) weight_tensor = cs.Tensor(weight_shape, "int8") part.set_input(1, weight_tensor) scale_bias_tensor = cs.Tensor(scale_bias_shape, "int8") part.set_input(2, scale_bias_tensor) output_tensor = cs.Tensor(ofm_shape, "int8") part.set_output(output_tensor) # Create a stripe of a size of the output tensor order = [1, 2, 3, 4] if ofm_layout == "NHWC" else [1, 2, 4, 3, 0] stripes = [1] * len(order) offset = [0] * len(order) stripe_config = cs.StripeConfig(ofm_shape, ofm_shape, ofm_shape, order, stripes, offset) block = part.get_block_config(stripe_config) # Since we dont know the values of the variables we passed to the get_valid_block_configs in # the matcher, best we can do is to verify the compute cycle count since the channels have a # significant effect on it assert block.compute_cycles == expected_cycles
def test_best_block_config( test_id, op_type, activation, kernel, stride, dilation, padding, in_shape, out_shape, layouts, acc_config, expected_block_configs, ): ofm_channels = out_shape[3] ifm_channels = in_shape[3] nhwc_to_nhcwb16, _ = get_layout_transform_matrices(ofm_channels) ifm_matrix, ifm_offset, weight_matrix, weight_offset, _, _ = make_matrices( op_type, kernel, stride, padding, layouts[0], layouts[1], dilation, ifm_channels, ofm_channels, ) if layouts[0] == "NHCWB16": in_shape = [ int(math.ceil(n)) for n in np.matmul(nhwc_to_nhcwb16, in_shape + (1, )).tolist()[:-1] ] if layouts[1] == "NHCWB16": out_shape = [ int(math.ceil(n)) for n in np.matmul(nhwc_to_nhcwb16, out_shape + (1, )).tolist()[:-1] ] propagator = cs.Propagator(ifm_matrix, ifm_offset) weight_propagator = cs.Propagator(weight_matrix, weight_offset) subkernels = ((kernel[0] + 7) // 8) * ((kernel[1] + 7) // 8) op_attrs = { "op": op_type, "activation": activation, "stride_h": stride[0], "stride_w": stride[1], "dilation_h": dilation[0], "dilation_w": dilation[1], } device_config = cs.EthosuDeviceConfig(acc_config) block_configs = device_config.get_valid_block_configs( propagator, op_attrs, out_shape, ofm_channels, ifm_channels, layouts[1], layouts[0], "int8", "int8", kernel[0], kernel[1], ) output_quantum = [1, 1, 2, 8] if layouts[1] == "NHCWB16": output_quantum = [1, 1, 1, 2, 8] # Create EthosUPart te_subgraph = cs.TESubgraph([], None) part = cs.EthosuPart( te_subgraph, [propagator, weight_propagator], output_quantum, subkernels, block_configs, 1, ) # Add tensors input_tensor = cs.Tensor(in_shape, "int8") part.set_input(0, input_tensor) if op_type == "ethosu_conv2d": weight_tensor = cs.Tensor( [ofm_channels, kernel[0], kernel[1], ifm_channels], "int8") part.set_input(1, weight_tensor) elif op_type == "ethosu_depthwise_conv2d": weight_tensor = cs.Tensor([ofm_channels, kernel[0], kernel[1], 1], "int8") part.set_input(1, weight_tensor) output_tensor = cs.Tensor(out_shape, "int8") part.set_output(output_tensor) order = [1, 2, 3, 4] if layouts[1] == "NHCWB16" else [1, 2, 4, 3, 0] stripes = [1] * len(output_quantum) offset = [0] * len(output_quantum) stripe_config = cs.StripeConfig(out_shape, out_shape, out_shape, order, stripes, offset) block = part.get_block_config(stripe_config) block_shape = tuple(int(a) for a in block.output_shape) assert block_shape in expected_block_configs[test_id]