def test_global_avgpool(self): in_channels = 2 coords, feats, labels = data_loader(in_channels, batch_size=2) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) pool = MinkowskiGlobalPooling() output = pool(input) print(output) # Check backward fn = MinkowskiGlobalPoolingFunction() self.assertTrue( gradcheck(fn, (input.F, True, GlobalPoolingMode.INDEX_SELECT, input.coords_key, None, input.coords_man))) self.assertTrue( gradcheck(fn, (input.F, True, GlobalPoolingMode.SPARSE, input.coords_key, None, input.coords_man))) coords, feats, labels = data_loader(in_channels, batch_size=1) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) pool = MinkowskiGlobalPooling() output = pool(input) print(output) # Check backward fn = MinkowskiGlobalPoolingFunction() self.assertTrue( gradcheck(fn, (input.F, True, GlobalPoolingMode.AUTO, input.coords_key, None, input.coords_man)))
def test_broadcast(self): in_channels, D = 2, 2 coords, feats, labels = data_loader(in_channels) coords, feats_glob, labels = data_loader(in_channels) feats = feats.double() feats_glob = feats_glob.double() input = SparseTensor(feats, coords=coords) pool = MinkowskiGlobalPooling(dimension=D) input_glob = pool(input) input_glob.F.requires_grad_() broadcast = MinkowskiBroadcastAddition(D) broadcast_mul = MinkowskiBroadcastMultiplication(D) output = broadcast(input, input_glob) print(output) output = broadcast_mul(input, input_glob) print(output) # Check backward fn = MinkowskiBroadcastFunction() self.assertTrue( gradcheck( fn, (input.F, input_glob.F, OperationType.ADDITION, input.coords_key, input_glob.coords_key, input.coords_man))) self.assertTrue( gradcheck( fn, (input.F, input_glob.F, OperationType.MULTIPLICATION, input.coords_key, input_glob.coords_key, input.coords_man)))
def test(self): in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D) conv = conv.double() conv_tr = MinkowskiConvolutionTranspose(out_channels, in_channels, kernel_size=2, stride=2, has_bias=True, dimension=D) conv_tr = conv_tr.double() input = conv(input) output = conv_tr(input) print(output) # Check backward fn = MinkowskiConvolutionTransposeFunction() self.assertTrue( gradcheck(fn, (input.F, conv_tr.kernel, input.tensor_stride, conv_tr.stride, conv_tr.kernel_size, conv_tr.dilation, conv_tr.region_type_, conv_tr.region_offset_, input.coords_key, None, input.coords_man)))
def test(self): print(f"{self.__class__.__name__}: test") in_channels, D = 3, 2 coords, feats, labels = data_loader(in_channels, batch_size=2) # Create random coordinates with tensor stride == 2 out_coords, tensor_stride = get_random_coords() feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) conv = MinkowskiChannelwiseConvolution( in_channels, kernel_size=3, stride=1, bias=False, dimension=D).double() print('Initial input: ', input) output = conv(input) print('Conv output: ', output) output.F.sum().backward() print(input.F.grad)
def test(self): print(f"{self.__class__.__name__}: test_dense") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D) conv = conv.double() output = conv(input) print(input.C, output.C) # Convert to a dense tensor dense_output, min_coord, tensor_stride = output.dense() dense_output, min_coord, tensor_stride = output.dense( min_coords=torch.IntTensor([-2, -2]), max_coords=torch.IntTensor([4, 4])) print(dense_output) print(min_coord) print(tensor_stride) print(feats.grad) loss = dense_output.sum() loss.backward() print(feats.grad)
def test_gpu(self): print(f"{self.__class__.__name__}: test_gpu") if not torch.cuda.is_available(): return device = torch.device('cuda') in_channels, D = 3, 2 coords, feats, labels = data_loader(in_channels, batch_size=2) # Create random coordinates with tensor stride == 2 out_coords, tensor_stride = get_random_coords() feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords).to(device) conv = MinkowskiChannelwiseConvolution( in_channels, kernel_size=3, stride=1, bias=False, dimension=D).double().to(device) print('Initial input: ', input) output = conv(input) print('Conv output: ', output)
def test_unpool(self): in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() input = SparseTensor(feats, coords=coords) conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, dimension=D) conv = conv.double() unpool = MinkowskiPoolingTranspose(kernel_size=3, stride=2, dimension=D) input = conv(input) output = unpool(input) print(output) # Check backward fn = MinkowskiPoolingTransposeFunction() self.assertTrue( gradcheck(fn, (input.F, input.tensor_stride, unpool.stride, unpool.kernel_size, unpool.dilation, unpool.region_type_, unpool.region_offset_, False, input.coords_key, None, input.coords_man)))
def test_avgpooling_gpu(self): if not torch.cuda.is_available(): return in_channels, D = 2, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) pool = MinkowskiAvgPooling(kernel_size=3, stride=2, dimension=D) output = pool(input) print(output) device = torch.device('cuda') with torch.cuda.device(0): input = input.to(device) pool = pool.to(device) output = pool(input) print(output) # Check backward fn = MinkowskiAvgPoolingFunction() self.assertTrue( gradcheck( fn, (input.F, input.tensor_stride, pool.stride, pool.kernel_size, pool.dilation, pool.region_type_, pool.region_offset_, True, input.coords_key, None, input.coords_man)))
def test_tr(self): print(f"{self.__class__.__name__}: test_tr") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels, batch_size=2) # tensor stride must be at least 2 for convolution transpose with stride 2 coords[:, :2] *= 2 out_coords = torch.rand(10, 3) out_coords[:, :2] *= 10 # random coords out_coords[:, 2] *= 2 # random batch index out_coords = out_coords.floor().int() feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords, tensor_stride=2) cm = input.coords_man print(cm._get_coords_key(2)) conv_tr = MinkowskiConvolutionTranspose(in_channels, out_channels, kernel_size=3, stride=2, bias=False, dimension=D).double() print('Initial input: ', input) print('Specified output coords: ', out_coords) output = conv_tr(input, out_coords) print('Conv output: ', output) output.F.sum().backward() print(input.F.grad)
def test_coords_map(self): coords, _, _ = data_loader(1) key = CoordsKey(D=2) key.setTensorStride(1) # Initialize map cm = CoordsManager(D=2) mapping, inverse_mapping = cm.initialize(coords, key, force_remap=True, allow_duplicate_coords=False) print(mapping, len(mapping)) cm.print_diagnostics(key) print(cm) print(cm.get_batch_size()) print(cm.get_batch_indices()) # Create a strided map stride_key = cm.stride(key, [2, 2]) print('Stride: ', cm.get_coords(stride_key)) cm.print_diagnostics(key) print(cm) ins, outs = cm.get_coords_map(1, 2) inc = cm.get_coords(1) outc = cm.get_coords(2) for i, o in zip(ins, outs): print(f"{i}: ({inc[i]}) -> {o}: ({outc[o]})")
def test_duplicate_coords(self): print(f"{self.__class__.__name__}: test_duplicate_coords") coords, feats, labels = data_loader(nchannel=2) # create duplicate coords coords[0] = coords[1] coords[2] = coords[3] input = SparseTensor(feats, coords=coords, allow_duplicate_coords=True) self.assertTrue(len(input) == len(coords) - 2) print(coords) print(input) input = SparseTensor( feats, coords=coords, quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE) self.assertTrue(len(coords) == 16) self.assertTrue(len(input) == 14) # 1D coords = torch.IntTensor([[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]) feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T # 0.5, 2.5, 5.5, 7 sinput = SparseTensor( coords=coords, feats=feats, quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE) self.assertTrue(len(sinput) == 4) self.assertTrue(0.5 in sinput.feats) self.assertTrue(2.5 in sinput.feats) self.assertTrue(5.5 in sinput.feats) self.assertTrue(7 in sinput.feats) self.assertTrue(len(sinput.slice(sinput)) == len(coords))
def test_kernelmap(self): print(f"{self.__class__.__name__}: test_kernelmap") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) cm = input.coords_man ikey = cm.get_coords_key(1) print('Input coords: ') cm.print_diagnostics(ikey) print('Convolution: ') # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D).double() output = conv(input) iC = input.C.numpy() oC = output.C.numpy() print(iC) print(oC) kernel_map = output.coords_man.get_kernel_map(1, 2, stride=2, kernel_size=3) for row in kernel_map: k, i, o = row print(k.item(), iC[i], oC[o]) self.assertTrue(len(kernel_map) == 26)
def test_pruning(self): in_channels, D = 2, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) use_feat = torch.rand(feats.size(0)) < 0.5 pruning = MinkowskiPruning(D) output = pruning(input, use_feat) print(use_feat, output) # Check backward fn = MinkowskiPruningFunction() self.assertTrue( gradcheck(fn, (input.F, use_feat, input.coords_key, output.coords_key, input.coords_man))) device = torch.device('cuda') with torch.cuda.device(0): input = input.to(device) output = pruning(input, use_feat) print(output) self.assertTrue( gradcheck(fn, (input.F, use_feat, input.coords_key, output.coords_key, input.coords_man)))
def test_union(self): in_channels = 2 coords, feats, labels = data_loader(in_channels) N = len(coords) input1 = SparseTensor(torch.rand(N, in_channels, dtype=torch.double), coords=coords) input2 = SparseTensor( torch.rand(N, in_channels, dtype=torch.double), coords=coords + 1, coords_manager=input1.coords_man, # Must use same coords manager force_creation=True # The tensor stride [1, 1] already exists. ) input1.F.requires_grad_() input2.F.requires_grad_() inputs = [input1, input2] union = MinkowskiUnion() output = union(input1, input2) print(output) output.F.sum().backward() device = torch.device('cuda') with torch.cuda.device(0): inputs = [input.to(device) for input in inputs] output = union(*inputs) output.F.sum().backward() print(output)
def test(self): print(f"{self.__class__.__name__}: test") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels, batch_size=2) # Create random coordinates with tensor stride == 2 out_coords, tensor_stride = get_random_coords() feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) cm = input.coords_man print(cm._get_coords_key(1)) conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=1, bias=False, dimension=D).double() print('Initial input: ', input) print('Specified output coords: ', out_coords) output = conv(input, out_coords) # To specify the tensor stride out_coords_key = cm.create_coords_key(out_coords, tensor_stride=2) output = conv(input, out_coords_key) print('Conv output: ', output) output.F.sum().backward() print(input.F.grad)
def test(self): print(f"{self.__class__.__name__}: test") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D) conv = conv.double() output = conv(input) print(output) kernel_map = input.coords_man.get_kernel_map(1, 2, stride=2, kernel_size=3) print(kernel_map) # Check backward fn = MinkowskiConvolutionFunction() self.assertTrue( gradcheck(fn, (input.F, conv.kernel, input.tensor_stride, conv.stride, conv.kernel_size, conv.dilation, conv.region_type_, conv.region_offset_, input.coords_key, None, input.coords_man)))
def test_broadcast_gpu(self): in_channels, D = 2, 2 coords, feats, labels = data_loader(in_channels) coords, feats_glob, labels = data_loader(in_channels) feats = feats.double() feats_glob = feats_glob.double() input = SparseTensor(feats, coords=coords) pool = MinkowskiGlobalPooling() input_glob = pool(input) input_glob.F.requires_grad_() broadcast_add = MinkowskiBroadcastAddition() broadcast_mul = MinkowskiBroadcastMultiplication() broadcast_cat = MinkowskiBroadcastConcatenation() cpu_add = broadcast_add(input, input_glob) cpu_mul = broadcast_mul(input, input_glob) cpu_cat = broadcast_cat(input, input_glob) # Check backward fn = MinkowskiBroadcastFunction() device = torch.device('cuda') input = input.to(device) input_glob = input_glob.to(device) gpu_add = broadcast_add(input, input_glob) gpu_mul = broadcast_mul(input, input_glob) gpu_cat = broadcast_cat(input, input_glob) self.assertTrue( torch.prod(gpu_add.F.cpu() - cpu_add.F < 1e-5).item() == 1) self.assertTrue( torch.prod(gpu_mul.F.cpu() - cpu_mul.F < 1e-5).item() == 1) self.assertTrue( torch.prod(gpu_cat.F.cpu() - cpu_cat.F < 1e-5).item() == 1) self.assertTrue( gradcheck( fn, (input.F, input_glob.F, OperationType.ADDITION, input.coords_key, input_glob.coords_key, input.coords_man))) self.assertTrue( gradcheck( fn, (input.F, input_glob.F, OperationType.MULTIPLICATION, input.coords_key, input_glob.coords_key, input.coords_man)))
def test_duplicate_coords(self): print(f"{self.__class__.__name__}: test_duplicate_coords") coords, feats, labels = data_loader(nchannel=2) # create duplicate coords coords[0] = coords[1] coords[2] = coords[3] input = SparseTensor(feats, coords=coords, allow_duplicate_coords=True) self.assertTrue(len(input) == len(coords) - 2) print(coords) print(input)
def test_force_creation(self): print(f"{self.__class__.__name__}: test_force_creation") coords, feats, labels = data_loader(nchannel=2) input1 = SparseTensor(feats, coords=coords, tensor_stride=1) input2 = SparseTensor(feats, coords=coords, tensor_stride=1, coords_manager=input1.coords_man, force_creation=True) print(input2)
def test_with_convtr(self): channels, D = [2, 3, 4], 2 coords, feats, labels = data_loader(channels[0], batch_size=1) feats = feats.double() feats.requires_grad_() # Create a sparse tensor with large tensor strides for upsampling start_tensor_stride = 4 input = SparseTensor(feats, coords=coords * start_tensor_stride, tensor_stride=start_tensor_stride) conv_tr1 = MinkowskiConvolutionTranspose(channels[0], channels[1], kernel_size=3, stride=2, generate_new_coords=True, dimension=D).double() conv1 = MinkowskiConvolution(channels[1], channels[1], kernel_size=3, dimension=D).double() conv_tr2 = MinkowskiConvolutionTranspose(channels[1], channels[2], kernel_size=3, stride=2, generate_new_coords=True, dimension=D).double() conv2 = MinkowskiConvolution(channels[2], channels[2], kernel_size=3, dimension=D).double() pruning = MinkowskiPruning() out1 = conv_tr1(input) self.assertTrue(torch.prod(torch.abs(out1.F) > 0).item() == 1) out1 = conv1(out1) use_feat = torch.rand(len(out1)) < 0.5 out1 = pruning(out1, use_feat) out2 = conv_tr2(out1) self.assertTrue(torch.prod(torch.abs(out2.F) > 0).item() == 1) use_feat = torch.rand(len(out2)) < 0.5 out2 = pruning(out2, use_feat) out2 = conv2(out2) print(out2) out2.F.sum().backward() # Check gradient flow print(input.F.grad)
def test_global_maxpool(self): in_channels = 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) pool = MinkowskiGlobalMaxPooling() output = pool(input) print(output) # Check backward fn = MinkowskiGlobalMaxPoolingFunction() self.assertTrue( gradcheck(fn, (input.F, input.coords_key, None, input.coords_man)))
def test_inst_norm(self): in_channels, D = 2, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() input = SparseTensor(feats, coords=coords) input.F.requires_grad_() norm = MinkowskiInstanceNorm(num_features=in_channels, dimension=D).double() out = norm(input) print(out) fn = MinkowskiInstanceNormFunction() self.assertTrue( gradcheck(fn, (input.F, input.coords_key, None, input.coords_man)))
def test_maxpooling(self): in_channels, D = 2, 2 coords, feats, labels = data_loader(in_channels, batch_size=2) feats.requires_grad_() feats = feats.double() input = SparseTensor(feats, coords=coords) pool = MinkowskiMaxPooling(kernel_size=2, stride=2, dimension=D) print(pool) output = pool(input) print(input) print(output) C = output.coords_man print(C.get_coords(2)) region_type, _, _ = pool.kernel_generator.cache[(1, 1)] print( C.get_kernel_map( 1, 2, stride=2, kernel_size=2, region_type=region_type, is_pool=True)) # Check backward fn = MinkowskiMaxPoolingFunction() # Even numbered kernel_size error! self.assertTrue( gradcheck( fn, (input.F, input.tensor_stride, pool.stride, pool.kernel_size, pool.dilation, pool.region_type_, pool.region_offset_, input.coords_key, None, input.coords_man))) if not torch.cuda.is_available(): return device = torch.device('cuda') input = input.to(device) output = pool(input) print(output) # Check backward self.assertTrue( gradcheck( fn, (input.F, input.tensor_stride, pool.stride, pool.kernel_size, pool.dilation, pool.region_type_, pool.region_offset_, input.coords_key, None, input.coords_man)))
def test_inst_norm_gpu(self): in_channels = 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() device = torch.device('cuda') input = SparseTensor(feats, coords=coords).to(device) input.F.requires_grad_() norm = MinkowskiInstanceNorm( num_features=in_channels).to(device).double() out = norm(input) print(out) fn = MinkowskiInstanceNormFunction() self.assertTrue( gradcheck(fn, (input.F, GlobalPoolingMode.AUTO, input.coords_key, None, input.coords_man)))
def test_kernelmap(self): print(f"{self.__class__.__name__}: test_kernelmap") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D) conv = conv.double() output = conv(input) print(input.C, output.C) print(output.coords_man.get_kernel_map(1, 2, stride=2, kernel_size=3))
def conv_on_coords(): in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels, batch_size=1) # Create input with tensor stride == 4 strided_coords4, tensor_stride4 = get_random_coords(tensor_stride=4) strided_coords2, tensor_stride2 = get_random_coords(tensor_stride=2) input = ME.SparseTensor( feats=torch.rand(len(strided_coords4), in_channels), # coords=strided_coords4, tensor_stride=tensor_stride4) cm = input.coords_man # Convolution transpose and generate new coordinates conv_tr = ME.MinkowskiConvolutionTranspose(in_channels, out_channels, kernel_size=3, stride=2, has_bias=False, dimension=D) pool_tr = ME.MinkowskiPoolingTranspose(in_channels, out_channels, kernel_size=2, stride=2, has_bias=False, dimension=D) # If the there is no coordinates defined for the tensor stride, it will create one # tensor stride 4 -> conv_tr with stride 2 -> tensor stride 2 output1 = conv_tr(input) # output1 = pool_tr(input) # convolution on the specified coords output2 = conv_tr(input, coords) # output2 = pool_tr(input, coords) # convolution on the specified coords with tensor stride == 2 coords_key = cm.create_coords_key(strided_coords2, tensor_stride=2) output3 = conv_tr(input, coords_key) # output3 = pool_tr(input, coords_key) # convolution on the coordinates of a sparse tensor output4 = conv_tr(input, output1)
def test_empty(self): in_channels = 2 coords, feats, labels = data_loader(in_channels, batch_size=1) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) use_feat = torch.BoolTensor(len(input)) use_feat.zero_() pruning = MinkowskiPruning() output = pruning(input, use_feat) print(input) print(use_feat) print(output) # Check backward fn = MinkowskiPruningFunction() self.assertTrue( gradcheck(fn, (input.F, use_feat, input.coords_key, output.coords_key, input.coords_man)))
def conv(): in_channels, out_channels, D = 2, 3, 2 bp() coords, feats, labels = data_loader(in_channels, batch_size=1) # Convolution input = ME.SparseTensor(feats=feats, coords=coords) conv = ME.MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=False, dimension=D) output = conv(input) print('Input:') print_sparse_tensor(input) print('Output:') print_sparse_tensor(output) # Convolution transpose and generate new coordinates strided_coords, tensor_stride = get_random_coords() bp() input = ME.SparseTensor( feats=torch.rand(len(strided_coords), in_channels), # coords=strided_coords, tensor_stride=tensor_stride) conv_tr = ME.MinkowskiConvolutionTranspose(in_channels, out_channels, kernel_size=3, stride=2, has_bias=False, dimension=D) output = conv_tr(input) print('\nInput:') print_sparse_tensor(input) print('Convolution Transpose Output:') print_sparse_tensor(output)
def test_global_maxpool(self): in_channels = 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) pool = MinkowskiGlobalMaxPooling() output = pool(input) print(output) # Check backward fn = MinkowskiGlobalMaxPoolingFunction() self.assertTrue( gradcheck(fn, (input.F, input.coords_key, None, input.coords_man))) if torch.cuda.is_available(): input_cuda = input.to(torch.device(0)) output_cuda = pool(input) self.assertTrue(torch.allclose(output_cuda.F.cpu(), output.F))
def test_kernelmap_gpu(self): print(f"{self.__class__.__name__}: test_kernelmap_gpu") if not torch.cuda.is_available(): return in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) cm = input.coords_man ikey = cm._get_coords_key(1) print('Input coords: ') cm.print_diagnostics(ikey) print('Convolution: ') # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D).double() output = conv(input) iC = input.C.numpy() oC = output.C.numpy() print(iC) print(oC) in_maps, out_maps = output.coords_man.get_kernel_map(1, 2, stride=2, kernel_size=3, on_gpu=True) kernel_index = 0 for in_map, out_map in zip(in_maps, out_maps): for i, o in zip(in_map, out_map): print(kernel_index, iC[i], '->', oC[o]) kernel_index += 1 self.assertTrue(sum(len(in_map) for in_map in in_maps) == 26)