def extract_convolution(self, input_name, output_name, scope_id, num_output, kernel_size, stride, padding, data_format="NCHW", weight_format="NHWC", axis=1, dilation=1, groups=1, layer_names=["convolution", "kernel", "bias"]): kernel, bias = self.get_weights(scope_id, layer_names) if (weight_format == "NHWC"): if (len(kernel.shape) == 3): kernel = kernel.transpose([2, 1, 0]) kernel = np.expand_dims(kernel, -1) elif (len(kernel.shape) == 4): kernel = kernel.transpose([3, 2, 0, 1]) else: print("[ERROR] unsupported convolution kernel size") exit(1) layer = caffe_net.LayerParameter(name=output_name, type='Convolution', bottom=[input_name], top=[output_name]) if (bias is None): layer.add_data(kernel) bias_term = False else: layer.add_data(kernel, bias) bias_term = True layer.convolution_param(num_output, kernel_size, stride, padding, bias_term, dilation, groups) self.caffe_model.add_layer(layer) assert (data_format == "NCHW") if (self.data_dict[input_name] is not None): input_data, input_shape, inv_transpose_dims = self.preprocess_nchwc8_nchw_input( input_name, axis) output_data = Operators.convolution(input_data, kernel, bias, num_output, kernel_size, stride, padding, dilation, groups, output_name) self.data_dict[ output_name] = output_data #self.postprocess_nchwc8_nchw_output(output_data, input_shape, inv_transpose_dims) else: self.data_dict[output_name] = None return output_name