def load_caffe_proto_model(caffe_pb2, proto_path: str, model_path: [str, None] = None): # 1. python protobuf is used if api_implementation._implementation_type == 'python': message = 'Please expect that Model Optimizer conversion might be slow. ' \ 'You are currently using Python protobuf library implementation. \n' try: from google.protobuf.pyext import cpp_message # Check os windows and env variable PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION if os.name == 'nt' and os.environ.get('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', default='') != 'cpp': # 2. cpp implementation is available but not used message += 'However, cpp implementation is available, you can boost ' \ 'model conversion by setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION env variable to cpp. \n' \ 'Run: set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp \n' except ImportError: # 3. cpp implementation is not available message += 'However you can use the C++ protobuf implementation that is supplied with the OpenVINO toolkit' \ 'or build protobuf library from sources. \n' \ 'Navigate to "install_prerequisites" folder and run: ' \ 'python -m easy_install protobuf-3.5.1-py($your_python_version)-win-amd64.egg \n' \ 'set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp' print(message + '\n\n' + refer_to_faq_msg(80)) # Read proto layers try: proto = caffe_pb2.NetParameter() with open(proto_path, "r") as file: text_format.Merge(str(file.read()), proto) except Exception as e: log.error('Exception message: {}\n\n'.format(e) + ' Possible reasons:\n' + ' 1. {} does not exist\n'.format(proto_path) + ' 2. {} does not have a valid structure, for example, it was downloaded as html\n'.format( proto_path) + ' 3. {} contains custom layers or attributes that are not supported\n'.format(proto_path) + ' in Model Optimizer by default.\n\n' + ' After you made sure that {} has a valid structure and still see this issue, then\n'.format( proto_path) + ' you need to generate a python parser for caffe.proto that was used when the model\n' + ' was created.\n' + ' Run "python3 generate_caffe_pb2.py --input_proto ${PATH_TO_CAFFE}/src/caffe/proto/caffe.proto"' + refer_to_faq_msg(1) + '\n\n', extra={'framework_error': True}) raise FrameworkError('Model Optimizer is not able to parse {}'.format(proto_path)) from e # Read model layer if exists model = None try: if model_path: model = caffe_pb2.NetParameter() with open(model_path, "rb") as infile: map = mmap.mmap(infile.fileno(), 0, access=mmap.ACCESS_READ) model.MergeFromString(map) except Exception as e: log.error('Exception message: {}\n\n'.format(e) + ' Possible reasons:\n' + ' 1. {} does not exist\n'.format(model_path) + ' 2. {} does not have a valid structure\n'.format(model_path), extra={'framework_error': True}) raise FrameworkError('Model Optimizer is not able to parse {}'.format(model_path)) from e return proto, model
def test_caffe_same_name_layer(self): proto = caffe_pb2.NetParameter() text_format.Merge(proto_str_multi_input + proto_same_name_layers, proto) graph, input_shapes = caffe_pb_to_nx(proto, None) # 6 nodes because: 2 inputs + 2 convolutions np.testing.assert_equal(len(graph.nodes()), 4)
def test_caffe_same_name_layer(self): proto = caffe_pb2.NetParameter() text_format.Merge(proto_str_multi_input + proto_same_name_layers, proto) graph = Graph() caffe_pb_to_nx(graph, proto, None) # 6 nodes because: 2 inputs + 2 convolutions + 2 identity nodes used as fake outputs np.testing.assert_equal(len(graph.nodes()), 6)
def test_caffe_pb_to_nx_one_input(self): proto = caffe_pb2.NetParameter() text_format.Merge(proto_str_one_input, proto) input_shapes = caffe_pb_to_nx(Graph(), proto, None) expected_input_shapes = {'Input0': np.array([1, 3, 224, 224])} for i in expected_input_shapes: np.testing.assert_array_equal(input_shapes[i], expected_input_shapes[i])
def test_caffe_pb_to_standart_input(self): proto = caffe_pb2.NetParameter() text_format.Merge(proto_str_input + layer_proto_str, proto) graph, input_shapes = caffe_pb_to_nx(proto, None) expected_input_shapes = {'data': np.array([1, 3, 224, 224])} for i in expected_input_shapes: np.testing.assert_array_equal(input_shapes[i], expected_input_shapes[i])
def test_caffe_pb_to_nx_old_styled_multi_input(self): proto = caffe_pb2.NetParameter() text_format.Merge(proto_str_old_styled_multi_input + layer_proto_str, proto) self.assertRaises(Error, caffe_pb_to_nx, Graph(), proto, None)
def get_layers(proto): if len(proto.layer): return proto.layer elif len(proto.layers): return proto.layers else: print( 'Invalid proto file: there is neither "layer" nor "layers" top-level messages. ' ) #the model is downloaded from http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_16_layers.caffemodel caffemodel_filename = './vgg16/VGG_ILSVRC_16_layers.caffemodel' model = caffe_pb2.NetParameter() f = open(caffemodel_filename, 'rb') model.ParseFromString(f.read()) f.close() # print(str(model)) proto_layers = get_layers(model) for i, layer in enumerate(proto_layers): print('----------- i=' + str(i)) # print(str(layer)) print(layer.name) print(layer.type) for j, node in enumerate(layer.blobs): print(' ----------- j=' + str(j)) print(node.num) print(node.channels)