예제 #1
0
def parse_caffemodel(model_path):
    caffemodel = caffe_pb2.NetParameter()
    with open(model_path, 'rb') as f:
        caffemodel.MergeFromString(f.read())

    # Check trainable layers
    print(*set([(layer.type, len(layer.blobs)) for layer in caffemodel.layer]), sep='\n')

    params = OrderedDict()
    previous_layer_type = None
    for layer in caffemodel.layer:
        print('{} ({}): {}'.format(layer.name, layer.type, len(layer.blobs)))

        # Skip the shared branch
        if 'res075' in layer.name or 'res05' in layer.name:
            continue

        # Convolution or Dilated Convolution
        if 'Convolution' in layer.type:
            params[layer.name] = {}
            params[layer.name]['kernel_size'] = layer.convolution_param.kernel_size[0]
            params[layer.name]['weight'] = list(layer.blobs[0].data)
            if len(layer.blobs) == 2:
                params[layer.name]['bias'] = list(layer.blobs[1].data)
            if len(layer.convolution_param.stride) == 1:  # or []
                params[layer.name]['stride'] = layer.convolution_param.stride[0]
            else:
                params[layer.name]['stride'] = 1
            if len(layer.convolution_param.pad) == 1:  # or []
                params[layer.name]['padding'] = layer.convolution_param.pad[0]
            else:
                params[layer.name]['padding'] = 0
            if isinstance(layer.convolution_param.dilation, int):
                params[layer.name]['dilation'] = layer.convolution_param.dilation
            elif len(layer.convolution_param.dilation) == 1:
                params[layer.name]['dilation'] = layer.convolution_param.dilation[0]
            else:
                params[layer.name]['dilation'] = 1
        # Batch Normalization
        elif 'BatchNorm' in layer.type:
            params[layer.name] = {}
            params[layer.name]['running_mean'] = np.array(layer.blobs[0].data) / layer.blobs[2].data[0]  # NOQA
            params[layer.name]['running_var'] = np.array(layer.blobs[1].data) / layer.blobs[2].data[0]  # NOQA
            params[layer.name]['eps'] = layer.batch_norm_param.eps
            params[layer.name]['momentum'] = layer.batch_norm_param.moving_average_fraction
            batch_norm_layer = layer.name
        # Scale
        elif 'Scale' in layer.type:
            assert (previous_layer_type == 'BatchNorm')
            params[batch_norm_layer]['weight'] = list(layer.blobs[0].data)
            params[batch_norm_layer]['bias'] = list(layer.blobs[1].data)

        previous_layer_type = layer.type

    return params
예제 #2
0
def parse_caffemodel(model_path):
    caffemodel = caffe_pb2.NetParameter()
    with open(model_path, 'rb') as f:
        caffemodel.MergeFromString(f.read())

    # Check trainable layers
    print(set([(layer.type, len(layer.blobs)) for layer in caffemodel.layer]))

    params = OrderedDict()
    for layer in caffemodel.layer:
        print('{} ({}): {}'.format(layer.name, layer.type, len(layer.blobs)))

        # Convolution or Dilated Convolution
        if 'Convolution' in layer.type:
            params[layer.name] = {}
            params[layer.name][
                'kernel_size'] = layer.convolution_param.kernel_size[0]
            params[layer.name]['stride'] = layer.convolution_param.stride[0]
            params[layer.name]['weight'] = list(layer.blobs[0].data)
            if len(layer.blobs) == 2:
                params[layer.name]['bias'] = list(layer.blobs[1].data)
            if len(layer.convolution_param.pad) == 1:  # or []
                params[layer.name]['padding'] = layer.convolution_param.pad[0]
            else:
                params[layer.name]['padding'] = 0
            if isinstance(layer.convolution_param.dilation, int):  # or []
                params[
                    layer.name]['dilation'] = layer.convolution_param.dilation
            else:
                params[layer.name]['dilation'] = 1

        # Batch Normalization
        elif 'BN' in layer.type:
            params[layer.name] = {}
            params[layer.name]['weight'] = list(layer.blobs[0].data)
            params[layer.name]['bias'] = list(layer.blobs[1].data)
            params[layer.name]['running_mean'] = list(layer.blobs[2].data)
            params[layer.name]['running_var'] = list(layer.blobs[3].data)
            params[layer.name]['eps'] = layer.bn_param.eps
            params[layer.name]['momentum'] = layer.bn_param.momentum

    return params
예제 #3
0
def parse_caffemodel(model_path):
    caffemodel = caffe_pb2.NetParameter()
    with open(model_path, "rb") as f:
        caffemodel.MergeFromString(f.read())

    # Check trainable layers
    print(set([(layer.type, len(layer.blobs)) for layer in caffemodel.layer]))

    params = OrderedDict()
    for layer in caffemodel.layer:
        print("{} ({}): {}".format(layer.name, layer.type, len(layer.blobs)))

        # Convolution or Dilated Convolution
        if "Convolution" in layer.type:
            params[layer.name] = {}
            params[layer.name][
                "kernel_size"] = layer.convolution_param.kernel_size[0]
            params[layer.name]["stride"] = layer.convolution_param.stride[0]
            params[layer.name]["weight"] = list(layer.blobs[0].data)
            if len(layer.blobs) == 2:
                params[layer.name]["bias"] = list(layer.blobs[1].data)
            if len(layer.convolution_param.pad) == 1:  # or []
                params[layer.name]["padding"] = layer.convolution_param.pad[0]
            else:
                params[layer.name]["padding"] = 0
            if isinstance(layer.convolution_param.dilation, int):  # or []
                params[
                    layer.name]["dilation"] = layer.convolution_param.dilation
            else:
                params[layer.name]["dilation"] = 1

        # Batch Normalization
        elif "BN" in layer.type:
            params[layer.name] = {}
            params[layer.name]["weight"] = list(layer.blobs[0].data)
            params[layer.name]["bias"] = list(layer.blobs[1].data)
            params[layer.name]["running_mean"] = list(layer.blobs[2].data)
            params[layer.name]["running_var"] = list(layer.blobs[3].data)
            params[layer.name]["eps"] = layer.bn_param.eps
            params[layer.name]["momentum"] = layer.bn_param.momentum

    return params
예제 #4
0
파일: convert.py 프로젝트: z0322/CONTA
def parse_caffemodel(model_path):
    caffemodel = caffe_pb2.NetParameter()
    with open(model_path, "rb") as f:
        caffemodel.MergeFromString(f.read())

    # Check trainable layers
    print(
        *Counter([(layer.type, len(layer.blobs))
                  for layer in caffemodel.layer]).most_common(),
        sep="\n",
    )

    params = OrderedDict()
    previous_layer_type = None
    for layer in caffemodel.layer:
        # Skip the shared branch
        if "res075" in layer.name or "res05" in layer.name:
            continue

        print(
            "\033[34m[Caffe]\033[00m",
            "{} ({}): {}".format(layer.name, layer.type, len(layer.blobs)),
        )

        # Convolution or Dilated Convolution
        if "Convolution" in layer.type:
            params[layer.name] = {}
            params[layer.name][
                "kernel_size"] = layer.convolution_param.kernel_size[0]
            params[layer.name]["weight"] = list(layer.blobs[0].data)
            if len(layer.blobs) == 2:
                params[layer.name]["bias"] = list(layer.blobs[1].data)
            if len(layer.convolution_param.stride) == 1:  # or []
                params[
                    layer.name]["stride"] = layer.convolution_param.stride[0]
            else:
                params[layer.name]["stride"] = 1
            if len(layer.convolution_param.pad) == 1:  # or []
                params[layer.name]["padding"] = layer.convolution_param.pad[0]
            else:
                params[layer.name]["padding"] = 0
            if isinstance(layer.convolution_param.dilation, int):
                params[
                    layer.name]["dilation"] = layer.convolution_param.dilation
            elif len(layer.convolution_param.dilation) == 1:
                params[layer.
                       name]["dilation"] = layer.convolution_param.dilation[0]
            else:
                params[layer.name]["dilation"] = 1
        # Fully-connected
        elif "InnerProduct" in layer.type:
            params[layer.name] = {}
            params[layer.name]["weight"] = list(layer.blobs[0].data)
            if len(layer.blobs) == 2:
                params[layer.name]["bias"] = list(layer.blobs[1].data)
        # Batch Normalization
        elif "BatchNorm" in layer.type:
            params[layer.name] = {}
            params[layer.name]["running_mean"] = (
                np.array(layer.blobs[0].data) / layer.blobs[2].data[0])
            params[layer.name]["running_var"] = (
                np.array(layer.blobs[1].data) / layer.blobs[2].data[0])
            params[layer.name]["eps"] = layer.batch_norm_param.eps
            params[layer.name][
                "momentum"] = layer.batch_norm_param.moving_average_fraction
            params[layer.name]["num_batches_tracked"] = np.array(0)
            batch_norm_layer = layer.name
        # Scale
        elif "Scale" in layer.type:
            assert previous_layer_type == "BatchNorm"
            params[batch_norm_layer]["weight"] = list(layer.blobs[0].data)
            params[batch_norm_layer]["bias"] = list(layer.blobs[1].data)
        elif "Pooling" in layer.type:
            params[layer.name] = {}
            params[layer.name]["kernel_size"] = layer.pooling_param.kernel_size
            params[layer.name]["stride"] = layer.pooling_param.stride
            params[layer.name]["padding"] = layer.pooling_param.pad

        previous_layer_type = layer.type

    return params