def extract(cls, node): proto_layer, model_layer = node.pb, node.model_pb if not proto_layer: raise Error('Protobuf layer can not be empty') conv_param = proto_layer.convolution_param conv_type = 'ConvND' if len(proto_layer.bottom) > 1 else 'Conv2D' params = conv_set_params(conv_param, conv_type) attrs = conv_create_attrs(params) attrs.update({ 'op': conv_type, 'get_group': lambda node: node.group, 'get_output_feature_dim': lambda node: node.output, 'weights_index': 1 if conv_type == 'Conv2D' else 2 }) # Embed weights and biases as attributes # It will be moved to a separate nodes in special pass attrs.update( weights_biases(conv_param.bias_term, model_layer, start_index=len(proto_layer.bottom), proto=conv_param)) attrs.update(layout_attrs()) # update the attributes of the node Convolution.update_node_stat(node, attrs) return cls.enabled
def scale_ext(pl, ml): param = pl.scale_param attrs = { 'op': 'ScaleShift', 'type': 'ScaleShift', 'axis': param.axis, 'infer': copy_shape_infer } if ml is None and len(pl.bottom) == 1: # default weights and biases for scale layer if the caffemodel file doesn't contain them ml = NamedAttrsClass({ 'blobs': np.array([ NamedAttrsClass({'data': np.array([1])}), NamedAttrsClass({'data': np.array([0])}) ]) }) # scale with 1 input and 1 or 2 blobs if ml and len(ml.blobs) != 0 and len(pl.bottom) == 1: attrs.update(weights_biases(param.bias_term, ml)) # 2 inputs + bias elif len(pl.bottom) == 2 and param.bias_term: if ml is None or len(ml.blobs) == 0: # default bias for scale layer with 2 inputs if the caffemodel file doesn't contain them ml = NamedAttrsClass({ 'blobs': np.array([NamedAttrsClass({'data': np.array([0])})]) }) embed_input(attrs, 1, 'biases', ml.blobs[0].data) return attrs
def extract(cls, node): proto_layer, model_layer = node.pb, node.model_pb if not proto_layer: raise Error('Protobuf layer can not be empty') deconv_param = proto_layer.convolution_param params = conv_set_params(deconv_param, 'Deconv2D') attrs = conv_create_attrs(params) attrs.update({ 'type': 'Deconvolution', 'op': 'Deconv2D', 'get_group': lambda node: node.group, 'get_output_feature_dim': lambda node: node.output, 'input_feature_channel': 0, 'output_feature_channel': 1, }) # Embed weights and biases as attributes # It will be moved to a separate nodes in special pass attrs.update(weights_biases(deconv_param.bias_term, model_layer)) attrs.update(layout_attrs()) # update the attributes of the node Convolution.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node): pb = node.pb model = node.model_pb param = pb.scale_param attrs = { 'axis': param.axis, } if model is None and len(pb.bottom) == 1: # default weights and biases for scale layer if the caffemodel file doesn't contain them model = NamedAttrsClass({ 'blobs': np.array([ NamedAttrsClass({'data': np.array([1])}), NamedAttrsClass({'data': np.array([0])}) ]) }) # scale with 1 input and 1 or 2 blobs if model and len(model.blobs) != 0 and len(pb.bottom) == 1: attrs.update(weights_biases(param.bias_term, model)) # 2 inputs + bias elif len(pb.bottom) == 2 and param.bias_term: if model is None or len(model.blobs) == 0: # default bias for scale layer with 2 inputs if the caffemodel file doesn't contain them model = NamedAttrsClass({ 'blobs': np.array([NamedAttrsClass({'data': np.array([0])})]) }) embed_input(attrs, 1, 'biases', model.blobs[0].data) ScaleShiftOp.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node): proto_layer = node.pb pb_model = node.model_pb param = proto_layer.prelu_param update_attrs = { 'channel_shared': int(param.channel_shared) } variance_norm_caffe_map = { 0: 'caffe.FillerParameter.FAN_IN', 1: 'caffe.FillerParameter.FAN_OUT', 2: 'caffe.FillerParameter.AVERAGE' } if hasattr(param, 'filler'): update_attrs.update({ 'filler_type': param.filler.type, 'filler_value': int(param.filler.value), 'min': int(param.filler.min), 'max': int(param.filler.max), 'mean': int(param.filler.mean), 'std': int(param.filler.std), 'sparse': param.filler.sparse, 'variance_norm': variance_norm_caffe_map[param.filler.variance_norm] }) mapping_rule = merge_attrs(param, update_attrs) mapping_rule.update(weights_biases(False, pb_model)) mapping_rule.update(layout_attrs()) # update the attributes of the node PReLU.update_node_stat(node, mapping_rule) return cls.enabled
def extract(node): proto_layer = node.pb param = proto_layer.norm_param attrs = collect_attributes(param, enable_flattening_nested_params=True) attrs.update(weights_biases(False, node.model_pb)) # update the attributes of the node Op.get_op_class_by_name(__class__.op).update_node_stat(node, attrs) return __class__.enabled
def extract(cls, node): proto_layer = node.pb param = proto_layer.norm_param attrs = collect_attributes(param, enable_flattening_nested_params=True) attrs.update(weights_biases(False, node.model_pb)) # update the attributes of the node NormalizeOp.update_node_stat(node, attrs) return cls.enabled
def inner_product_ext(pb_layer, pb_model): param = pb_layer.inner_product_param attrs = { 'type': 'FullyConnected', 'out-size': param.num_output, 'layout': 'NCHW', 'infer': caffe_inner_product } attrs.update(weights_biases(param.bias_term, pb_model)) return attrs
def extract(cls, node): param = node.pb.inner_product_param pb_model = node.model_pb attrs = { 'out-size': param.num_output, 'transpose_weights': not param.transpose, } attrs.update(weights_biases(param.bias_term, pb_model)) FullyConnected.update_node_stat(node, attrs) return cls.enabled
def test_weights_biases_layer_bias(self, embed_input_mock): weights_biases(True, FakeModelLayer([[1, 2], [3, 4]])) calls = [call({}, 1, 'weights', [1, 2]), call({}, 2, 'biases', [3, 4])] embed_input_mock.assert_has_calls(calls)
def test_weights_biases_layer_no_bias(self, embed_input_mock): weights_biases(False, FakeModelLayer([ [1, 2], ])) calls = [call({}, 1, 'weights', [1, 2])] embed_input_mock.assert_has_calls(calls)
def test_weights_biases_no_layer_no_bias(self): res = weights_biases(False, None) self.assertEqual(res, {})