def extract(cls, node): proto_layer, model_layer = node.pb, node.model_pb if not proto_layer: raise Error('Protobuf layer can not be empty') deconv_param = proto_layer.convolution_param params = conv_set_params(deconv_param, 'Deconv2D') attrs = conv_create_attrs(params) attrs.update({ 'type': 'Deconvolution', 'op': 'Deconv2D', 'get_group': lambda node: node.group, 'get_output_feature_dim': lambda node: node.output, 'input_feature_channel': 0, 'output_feature_channel': 1, }) # Embed weights and biases as attributes # It will be moved to a separate nodes in special pass attrs.update(weights_biases(deconv_param.bias_term, model_layer)) attrs.update(layout_attrs()) # update the attributes of the node Convolution.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node): final_pads = get_pads(node.module) # Extract strides attribute strides = node.module.stride final_strides = np.array([1, 1, *strides], dtype=np.int64) # Extract dilations attribute dilations = node.module.dilation if isinstance(dilations, int): dilations = [dilations, dilations] final_dilations = np.array([1, 1, *dilations], dtype=np.int64) attrs = { 'op': 'Conv2D', # Note: should be 2D but not 2d 'pad': final_pads, 'stride': final_strides, 'dilation': final_dilations, 'group': 1, 'kernel_spatial': np.array(node.module.kernel_size, dtype=np.int64), 'input_feature_channel': 1, 'output_feature_channel': 0, 'channel_dims': np.array([1], dtype=np.int64), 'batch_dims': np.array([0], dtype=np.int64), 'layout': 'NCHW', } # update the attributes of the node Convolution.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node): proto_layer, model_layer = node.pb, node.model_pb if not proto_layer: raise Error('Protobuf layer can not be empty') conv_param = proto_layer.convolution_param conv_type = 'ConvND' if len(proto_layer.bottom) > 1 else 'Conv2D' params = conv_set_params(conv_param, conv_type) attrs = conv_create_attrs(params) attrs.update({ 'op': conv_type, 'get_group': lambda node: node.group, 'get_output_feature_dim': lambda node: node.output, 'weights_index': 1 if conv_type == 'Conv2D' else 2 }) # Embed weights and biases as attributes # It will be moved to a separate nodes in special pass attrs.update( weights_biases(conv_param.bias_term, model_layer, start_index=len(proto_layer.bottom), proto=conv_param)) attrs.update(layout_attrs()) # update the attributes of the node Convolution.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node: Node) -> bool: """ Extract conv parameters from node.parameters. node.parameters like file descriptor object. :param node: Convolution node :return: """ pb = node.parameters kernel = read_token_value(pb, b'<PatchDim>') stride = read_token_value(pb, b'<PatchStep>') patch_stride = read_token_value(pb, b'<PatchStride>') read_learning_info(pb) collect_until_whitespace(pb) weights, weights_shape = read_binary_matrix(pb) collect_until_whitespace(pb) biases = read_binary_vector(pb) if (patch_stride - kernel) % stride != 0: raise Error( 'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. ' + refer_to_faq_msg(93)) output = biases.shape[0] if weights_shape[0] != output: raise Error( 'Weights shape does not correspond to the `output` attribute of Convolution layer. ' + refer_to_faq_msg(93)) mapping_rule = { 'output': output, 'patch_stride': patch_stride, 'bias_term': None, 'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64), 'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64), 'dilation': np.array([1, 1, 1, 1], dtype=np.int64), 'kernel': np.array([1, 1, 1, kernel], dtype=np.int64), 'stride': np.array([1, 1, 1, stride], dtype=np.int64), 'kernel_spatial': np.array([1, kernel], dtype=np.int64), 'input_feature_channel': 1, 'output_feature_channel': 0, 'kernel_spatial_idx': [2, 3], 'group': 1, 'reshape_kernel': True, } mapping_rule.update(layout_attrs()) embed_input(mapping_rule, 1, 'weights', weights) embed_input(mapping_rule, 2, 'biases', biases) mapping_rule['bias_addable'] = len(biases) > 0 Convolution.update_node_stat(node, mapping_rule) return cls.enabled
def extract(node): # Extract pads attribute # In case if pads is not specified it will be set in default (1) in infer function pads = onnx_attr(node, 'pads', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64)) assert pads is None or len(pads) % 2 == 0 final_pad = None if pads is not None: pads = pads.reshape([2, -1]) pads = np.transpose(pads) final_pad = np.array([[0, 0], [0, 0], *pads], dtype=np.int64) # Extract dilations attribute # In case if dilations is not specified it will be set in default (1) in infer function dilations = onnx_attr(node, 'dilations', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64)) final_dilations = np.array([1, 1, *dilations], dtype=np.int64) if dilations is not None else None # Extract dilations attribute # In case if dilations is not specified it will be set in default (1) in infer function strides = onnx_attr(node, 'strides', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64)) final_strides = np.array([1, 1, *strides], dtype=np.int64) if strides is not None else None kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', default=None) auto_pad = onnx_attr(node, 'auto_pad', 's', default=None, dst_type=get_onnx_autopad) group = onnx_attr(node, 'group', 'i', default=1, dst_type=lambda x: np.array(x, dtype=np.int64)) attrs = { 'op': __class__.op, 'auto_pad': auto_pad, 'bias_addable': True, 'bias_term': None, 'pad': final_pad, 'pad_spatial_shape': np.array(pads, dtype=np.int64) if pads is not None else None, 'dilation': final_dilations, 'output_spatial_shape': None, 'output_shape': None, 'stride': final_strides, 'group': group, 'output': None, 'kernel_spatial': np.array(kernel_shape, dtype=np.int64) if kernel_shape is not None else None, 'input_feature_channel': 1, 'output_feature_channel': 0, 'kernel_spatial_idx': None, # Will be calculated in infer function (np.array([2, 3])) 'spatial_dims': None, # Will be calculated in infer function 'channel_dims': np.array([1], dtype=np.int64), 'batch_dims': np.array([0], dtype=np.int64), 'layout': 'NCHW' } # update the attributes of the node Convolution.update_node_stat(node, attrs) return __class__.enabled
def extract(cls, node): attrs = get_mxnet_layer_attrs(node.symbol_dict) scale = attrs.int("scale", 1) num_filter = attrs.int("num_filter", 0) mode = attrs.str("sample_type", None) if mode == 'nearest': node_attrs = { 'factor': attrs.int("scale", 1), 'mode': mode, 'antialias': 0, 'axes': int64_array([2, 3]), } Interpolate.update_node_stat(node, node_attrs) elif mode == 'bilinear': """ Bilinear UpSampling uses deconvolution algorithm under the hood. For MXNet Bilinear UpSampling op just wrapper over Deconvolution op. Inputs data: input1 - input data input2 - deconvolution weight """ kernel = 2 * scale - scale % 2 stride = scale pad = math.ceil((scale - 1) / 2) num_group = num_filter node_attrs = { 'op': __class__.op, 'type': 'Deconvolution', 'bias_addable': True, 'bias_term': False, 'pad': int64_array([[0, 0], [0, 0], [pad, pad], [pad, pad]]), 'pad_spatial_shape': int64_array([[pad, pad], [pad, pad]]), 'dilation': None, 'output_spatial_shape': None, 'output_shape': None, 'stride': int64_array([1, 1, stride, stride]), 'group': num_group, 'output': num_filter, 'kernel_spatial': int64_array([kernel, kernel]), 'input_feature_channel': 0, 'output_feature_channel': 1, 'kernel_spatial_idx': None, 'reshape_kernel': True, 'spatial_dims': None, 'channel_dims': int64_array([1]), 'batch_dims': int64_array([0]), 'layout': 'NCHW', 'get_pad': DeconvFrontExtractor.get_pad, } Convolution.update_node_stat(node, node_attrs) return cls.enabled
def extract(cls, node): attr = get_mxnet_layer_attrs(node.symbol_dict) kernel = attr.tuple("kernel", int, None) stride = attr.tuple("stride", int, tuple(np.ones(len(kernel), dtype=np.int64))) padding = attr.tuple("pad", int, tuple(np.zeros(len(kernel), dtype=np.int64))) dilate = attr.tuple("dilate", int, tuple(np.ones(len(kernel), dtype=np.int64))) group = attr.int("num_group", 1) output = attr.int("num_filter", None) bias_term = not attr.bool("no_bias", True) target_shape = attr.tuple("target_shape", int, None) if target_shape: target_shape = np.array(target_shape, dtype=np.int64) final_dilations = np.array([1, 1, *[d for d in dilate]], dtype=np.int64) if dilate is not None else None node_attrs = { 'op': __class__.op, 'type': 'Deconvolution', 'bias_addable': True, 'bias_term': bias_term, 'pad': np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]], dtype=np.int64), 'pad_spatial_shape': np.array([[pad, pad] for pad in padding], dtype=np.int64), 'dilation': final_dilations, 'output_spatial_shape': target_shape, 'original_output_spatial_shape': target_shape, 'output_shape': None, 'stride': np.array([1, 1, *[s for s in stride]], dtype=np.int64), 'group': group, 'output': output, 'kernel_spatial': np.array([k for k in kernel], dtype=np.int64), 'input_feature_channel': 1, 'output_feature_channel': 0, 'kernel_spatial_idx': None, 'reshape_kernel': True, 'spatial_dims': None, 'channel_dims': np.array([1], dtype=np.int64), 'batch_dims': np.array([0], dtype=np.int64), 'layout': 'NCHW', 'get_pad': DeconvFrontExtractor.get_pad, } output_padding = attr.tuple("adj", int, None) if target_shape is None and output_padding: node_attrs["output_padding"] = np.array([0, 0, *[s for s in output_padding]], dtype=np.int64) # update the attributes of the node Convolution.update_node_stat(node, node_attrs) return cls.enabled
def extract(cls, node): attrs = tf_create_attrs(node, 3, 4) attrs.update({ 'op': __class__.op, 'get_group': lambda node: 1, 'get_output_feature_dim': lambda node: node.kernel_shape[node.output_feature_channel], 'get_weights_permute': PermuteAttrs.Permutation(perm=int64_array([4, 3, 0, 1, 2]), inv=int64_array([2, 3, 4, 1, 0])) }) # update the attributes of the node Convolution.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node): attrs = tf_create_attrs(node, 2, 2) attrs.update({ 'op': __class__.op, 'kernel_spatial_idx': np.array([0, 1], dtype=np.int64), 'get_group': lambda node: node.kernel_shape[node.output_feature_channel], 'get_output_feature_dim': lambda node: node.kernel_shape[-1] * node.kernel_shape[-2], 'get_weights_permute': PermuteAttrs.Permutation(perm=int64_array([2, 3, 0, 1]), inv=int64_array([2, 3, 0, 1])) }) # update the attributes of the node Convolution.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node): attrs = tf_create_attrs(node, 2, 3) attrs.update({ 'op': __class__.op, 'get_group': lambda node: node.group if 'group' in node and node.group is not None else node.in_node(0). shape[node.channel_dims] // node.kernel_shape[ node.input_feature_channel], 'get_output_feature_dim': lambda node: node.kernel_shape[node.output_feature_channel], 'get_weights_permute': PermuteAttrs.Permutation(perm=int64_array([3, 2, 0, 1]), inv=int64_array([2, 3, 1, 0])) }) # update the attributes of the node Convolution.update_node_stat(node, attrs) return cls.enabled
def extract(node): attr = get_mxnet_layer_attrs(node.symbol_dict) kernel = attr.tuple("kernel", int, None) stride = attr.tuple("stride", int, tuple(np.ones(len(kernel), dtype=np.int64))) padding = attr.tuple("pad", int, tuple(np.zeros(len(kernel), dtype=np.int64))) dilate = attr.tuple("dilate", int, tuple(np.ones(len(kernel), dtype=np.int64))) group = attr.int("num_group", 1) output = attr.int("num_filter", None) bias_term = attr.str("no_bias", 'False') == 'False' final_dilations = np.array([1, 1, *[d for d in dilate]], dtype=np.int64) if dilate is not None else None node_attrs = { 'op': __class__.op, 'bias_addable': True, 'bias_term': bias_term, 'pad': np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]], dtype=np.int64), 'pad_spatial_shape': np.array([[pad, pad] for pad in padding], dtype=np.int64), 'dilation': final_dilations, 'output_spatial_shape': None, 'output_shape': None, 'stride': np.array([1, 1, *[s for s in stride]], dtype=np.int64), 'group': group, 'output': output, 'kernel_spatial': np.array([k for k in kernel], dtype=np.int64), 'input_feature_channel': 1, 'output_feature_channel': 0, 'kernel_spatial_idx': None, 'reshape_kernel': True, 'spatial_dims': None, 'channel_dims': np.array([1], dtype=np.int64), 'batch_dims': np.array([0], dtype=np.int64), 'layout': 'NCHW', } # update the attributes of the node Convolution.update_node_stat(node, node_attrs) return __class__.enabled
def extract(node): int64array = lambda x: np.array(x, dtype=np.int64) pads = onnx_attr(node, 'pads', 'ints', dst_type=int64array) auto_pad = onnx_attr(node, 'auto_pad', 's', default=None, dst_type=get_onnx_autopad) if pads is None: pads = np.array([0, 0, 0, 0], dtype=np.int64) if len(pads) % 2 != 0: raise Error( 'ConvTranspose node {} specifies pads = {} which has odd number of elements. The model is not correct.', node.soft_get('name'), pads ) pads = pads.reshape([2, -1]) pads = np.transpose(pads) dilations = int64array(onnx_attr(node, 'dilations', 'ints', default=[1, 1])) strides = int64array(onnx_attr(node, 'strides', 'ints', default=[1, 1])) kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', dst_type=int64array) if kernel_shape is None: raise Error( 'ConvTranspose node {} doesn\'t have explicitly defined kernel_shape. It is not supported.', node.soft_get('name') ) output_padding = onnx_attr(node, 'output_padding', 'ints', default=[0, 0]) output_shape = onnx_attr(node, 'output_shape', 'ints', default=None, dst_type=int64array) attrs = { 'type': 'Deconvolution', 'op': 'Deconv2D', 'auto_pad': auto_pad, 'bias_addable': True, 'bias_term': None, # will be deduced later; not really needed 'pad': int64array([[0, 0], [0, 0], pads[0], pads[1]]), 'pad_spatial_shape': int64array([pads[0], pads[1]]), 'dilation': int64array([1, 1, dilations[0], dilations[1]]), 'output_spatial_shape': output_shape, 'output_shape': None, 'output_padding': int64array([0, 0, output_padding[0], output_padding[1]]), 'stride': int64array([1, 1, strides[0], strides[1]]), 'group': onnx_attr(node, 'group', 'i', default=1), 'output': None, 'spatial_dims': int64array([2, 3]), 'channel_dims': int64array([1]), 'batch_dims': int64array([0]), 'kernel_spatial': int64array([kernel_shape[0], kernel_shape[1]]), # TODO WARNING Don't misuse X/Y 'input_feature_channel': 0, 'output_feature_channel': 1, 'kernel_spatial_idx': np.array([2, 3]), 'get_pad': ConvTransposeFrontExtractor.get_pad } attrs.update(layout_attrs()) # update the attributes of the node Convolution.update_node_stat(node, attrs) return __class__.enabled
def extract(cls, node): pads = onnx_attr(node, 'pads', 'ints', dst_type=int64_array) auto_pad = onnx_attr(node, 'auto_pad', 's', default=None, dst_type=get_onnx_autopad) if pads is not None: if len(pads) % 2 != 0: raise Error( 'ConvTranspose node {} specifies pads = {} which has odd number of elements. The model is not correct.', node.soft_get('name'), pads) pads = pads.reshape([2, -1]) pads = np.transpose(pads) final_pads = int64_array([[0, 0], [0, 0], *pads ]) if pads is not None else None dilations = onnx_attr(node, 'dilations', 'ints', default=None) final_dilations = int64_array([1, 1, *dilations ]) if dilations is not None else None strides = onnx_attr(node, 'strides', 'ints', default=None) final_strides = int64_array([1, 1, *strides ]) if strides is not None else None kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', dst_type=int64_array) if kernel_shape is None: raise Error( 'ConvTranspose node {} doesn\'t have explicitly defined kernel_shape. It is not supported.', node.soft_get('name')) output_padding = onnx_attr(node, 'output_padding', 'ints', default=None) final_output_padding = int64_array( [0, 0, *output_padding]) if output_padding is not None else None output_shape = onnx_attr(node, 'output_shape', 'ints', default=None, dst_type=int64_array) attrs = { 'type': 'Deconvolution', 'op': 'Deconv2D', 'auto_pad': auto_pad, 'bias_addable': True, 'bias_term': None, # will be deduced later; not really needed 'pad': final_pads, 'dilation': final_dilations, 'output_spatial_shape': output_shape, 'output_shape': None, 'output_padding': final_output_padding, 'stride': final_strides, 'group': onnx_attr(node, 'group', 'i', default=1), 'output': None, 'spatial_dims': None, # Will be calculated in infer function 'channel_dims': int64_array([1]), 'batch_dims': int64_array([0]), 'layout': 'NCHW', 'input_feature_channel': 0, 'output_feature_channel': 1, 'get_pad': ConvTransposeFrontExtractor.get_pad, 'get_output_feature_dim': lambda node: node.kernel_shape[node.output_feature_channel] * node. group, } # update the attributes of the node Convolution.update_node_stat(node, attrs) return cls.enabled