示例#1
0
def make_channel_pruning_model(input_file, output_file, bottom_layer, rank):
    with open(input_file, 'r') as fp:
        nett = NetParameter()
        text_format.Parse(fp.read(), nett)
    """ do not anything """
    def _creat_new(name, layer_name):
        new_ = LayerParameter()
        new_.CopyFrom(layer_name)
        new_.name = name
        new_.convolution_param.ClearField('num_output')
        return new_

    new_layer = []
    for layer in nett.layer:
        if layer.name != bottom_layer:
            new_layer.append(layer)
        else:
            newConv = _creat_new(bottom_layer, layer)
            conv_param = newConv.convolution_param
            conv_param.num_output = rank
            new_layer.append(newConv)

    new_net = NetParameter()
    new_net.CopyFrom(nett)
    del (new_net.layer[:])

    new_net.layer.extend(new_layer)
    with open(output_file, 'w') as fp:
        fp.write(text_format.MessageToString(new_net))
示例#2
0
def resize_network(netdef, name2num, verbose=True):
    """Change number of channels in convolutions
    netdef: network params
    name2num: maps from channel name to new number of channels
    verbose: if True, display changes
    """
    new_layers = []
    for l in netdef.layer:
        newl = LayerParameter()
        newl.CopyFrom(l)
        if (l.name in name2num):
            if (l.type == 'Convolution'):
                if verbose:
                    print(l.name + ': \t' + 'Changing num_output from ' +
                          str(l.convolution_param.num_output) + ' to ' +
                          str(name2num[l.name]))
                newl.convolution_param.num_output = name2num[l.name]
                if newl.convolution_param.group > 1:
                    newl.convolution_param.group = name2num[l.name]
            else:
                if verbose:
                    print('Layer ' + l.name + ' is not convolution, skipping')
        new_layers.append(newl)
    new_pnet = NetParameter()
    new_pnet.CopyFrom(netdef)
    del (new_pnet.layer[:])
    new_pnet.layer.extend(new_layers)
    return new_pnet
示例#3
0
 def deploy_head(self):
     net = NetParameter()
     net.name = self.name
     net.input.append("data")
     inshape = net.input_shape.add()
     inshape.dim.append(self.batch_size)
     inshape.dim.append(self.channels)
     inshape.dim.append(self.infmt.crop_size)
     inshape.dim.append(self.infmt.crop_size)
     return net
示例#4
0
    def test_copy_msg(self):

        x = NetParameter()
        assert_is_not_none(x)
        y = pu.copy_msg(x, NetParameter)
        assert_is_not(x, y)
        assert_is_not_none(y)
示例#5
0
def make_lowrank_model(input_file, conf, output_file):
    with open(input_file, 'r') as fp:
        net = NetParameter()
        pb.text_format.Parse(fp.read(), net)
    new_layers = []
    for layer in net.layer:
        if not layer.name in conf.keys():
            new_layers.append(layer)
            continue
        v, h = vh_decompose(layer, conf[layer.name])
        new_layers.extend([v, h])
    new_net = NetParameter()
    new_net.CopyFrom(net)
    del (new_net.layer[:])
    new_net.layer.extend(new_layers)
    with open(output_file, 'w') as fp:
        fp.write(pb.text_format.MessageToString(new_net))
示例#6
0
def create_approx_netdef(input_file, output_file, btd_config):
    with open(input_file, 'r') as fp:
        net = NetParameter()
        txtf.Merge(fp.read(), net)
    new_layers = []
    for layer in net.layer:
        if layer.name not in list(btd_config.keys()):
            new_layers.append(layer)
            continue
        s, t, r = btd_config[layer.name]
        a, b, c = decompose2abc(layer, s, t, r)
        new_layers.extend([a, b, c])
    new_net = NetParameter()
    new_net.CopyFrom(net)
    del (new_net.layer[:])
    new_net.layer.extend(new_layers)
    with open(output_file, 'w') as fp:
        fp.write(txtf.MessageToString(new_net))
def main():
    # Use first line of file docstring as description if a file docstring exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--bitmask', type=str, required=True)
    parser.add_argument('--output', type=str, required=True)
    parser.add_argument('--snapshot_prefix',
                        default='{output}/models/alexnet_{bitmask}')
    parser.add_argument('--solver',
                        default='models/bvlc_alexnet/solver.prototxt')
    parser.add_argument('--trainval',
                        default='models/bvlc_alexnet/train_val.prototxt')
    # Do we need the deploy?
    parser.add_argument('--deploy',
                        default='models/bvlc_alexnet/deploy.prototxt')

    args = parser.parse_args()
    args.snapshot_prefix = args.snapshot_prefix.format(output=args.output,
                                                       bitmask=args.bitmask)

    bitmask = args.bitmask
    assert len(bitmask) == len(LAYERS), (
        'Expected {} bits in bitmask, received {}.'.format(
            len(LAYERS), len(bitmask)))
    try:
        int(bitmask)
    except ValueError:
        raise Exception('Invalid bitmask: {}'.format(bitmask))

    zeroed_layers = set(layer for i, layer in enumerate(LAYERS)
                        if bitmask[i] == '0')

    # Parse solver
    solver = SolverParameter()
    parse_prototxt(solver, args.solver)
    solver.snapshot_prefix = args.snapshot_prefix
    solver.net = '{}/train_val.prototxt'.format(args.output)

    # Parse trainval NetParameter
    trainval = NetParameter()
    parse_prototxt(trainval, args.trainval)

    for layer in trainval.layer:
        if layer.name in zeroed_layers:
            print('Zeroing {}'.format(layer.name))
            for param in layer.param:
                param.lr_mult = 0
                param.decay_mult = 0

    mkdir_p(args.output)
    write_prototxt(trainval, '{}/train_val.prototxt'.format(args.output))
    write_prototxt(solver, '{}/solver.prototxt'.format(args.output))
    copyfile(args.deploy, '{}/deploy.prototxt'.format(args.output))
示例#8
0
def write_model_file(model_file, template_file, test_file, root_folder):
    param = NetParameter()
    with open(template_file, 'r') as f:
        prototxt.Merge(f.read(), param)
    for layer in param.layer:
        if layer.molgrid_data_param.source == 'TESTFILE':
            layer.molgrid_data_param.source = test_file
        if layer.molgrid_data_param.root_folder == 'DATA_ROOT':
            layer.molgrid_data_param.root_folder = root_folder
    with open(model_file, 'w') as f:
        f.write(str(param))
 def __init__(self, net_file):
     self.node_id = 0
     self.nodes_list = []
     self.bottom_nodes_dict = {}
     self.top_nodes_dict = {}
     self.layer_id_to_node_dict = {}
     self.in_node_num_dict_up = {}
     self.in_node_num_dict_down = {}
     self.channel_dict = {}
     with open(net_file, 'r') as fp:
         self.net = NetParameter()
         pb.text_format.Parse(fp.read(), self.net)
def parse_prototxt(model_file, layer_name):
    with open(model_file) as fp:
        net = NetParameter()
        text_format.Parse(fp.read(), net)
    for i, layer in enumerate(net.layer):
        if layer.name != layer_name: continue
        blob = layer.top[0]
        for j in xrange(i + 1, len(net.layer)):
            if blob in net.layer[j].bottom:
                next_layer = net.layer[j].name
                return blob, next_layer
    raise ValueError(
        "Cannot find layer {} or its next layer".format(layer_name))
示例#11
0
def write_model_file(model_file,
                     template_file,
                     train_file,
                     test_file,
                     root_folder,
                     avg_rotations=False,
                     train_file2=None,
                     ratio=None,
                     root_folder2=None,
                     test_root_folder=None):
    '''Writes a model prototxt file based on a provided template file
    with certain placeholders replaced in each MolGridDataLayer.
    For the source parameter, "TRAINFILE" is replaced with train_file
    and "TESTFILE" is replaced with test_file.
    For the root_folder parameter, "DATA_ROOT" is replaced with root_folder,
    unless the layer is TEST phase and test_root_folder is provided,
    then it is replaced with test_root_folder.
    For the source2 parameter, "TRAINFILE2" is replaced with train_file2,
    and in the same layer the source_ratio parameter is set to ratio.
    For the root_folder2 parameter, "DATA_ROOT2" is replaced with root_folder2.
    If the avg_rotations argument is set and the layer is TEST phase,
    the rotate parameter is set to 24.'''
    netparam = NetParameter()
    with open(template_file, 'r') as f:
        prototxt.Merge(f.read(), netparam)
    for layer in netparam.layer:
        if layer.type == "NDimData":
            param = layer.ndim_data_param
        elif layer.type == "MolGridData":
            param = layer.molgrid_data_param
        else:
            continue
        if param.source == 'TRAINFILE':
            param.source = train_file
        if param.source == 'TESTFILE':
            param.source = test_file
        if param.root_folder == 'DATA_ROOT':
            if test_root_folder and 'TEST' in str(layer):
                param.root_folder = test_root_folder
            else:
                param.root_folder = root_folder
        if train_file2 and param.source2 == 'TRAINFILE2':
            param.source2 = train_file2
            param.source_ratio = ratio
        if root_folder2 and param.root_folder2 == 'DATA_ROOT2':
            param.root_folder2 = root_folder2
        if avg_rotations and 'TEST' in str(layer):
            param.rotate = 24  #TODO axial rotations aren't working
            #layer.molgrid_data_param.random_rotation = True
    with open(model_file, 'w') as f:
        f.write(str(netparam))
def get_layer_params(input_file, layer_name):
    
    with open(input_file,'r') as fp:
        net_file = NetParameter()
        text_format.Parse(fp.read(), net_file)

    pad = 0
    k_w = 0
    k_h = 0

    for layer in net_file.layer:
        if layer.name ==layer_name:
            conv_param = layer.convolution_param
            pad = conv_param.pad
            k_w = conv_param.kernel_size
            k_h = k_w
    return pad, k_w, k_h
示例#13
0
def get_layer_params(input_file, layer_name):
    with open(input_file, 'r') as fp:
        net_file = NetParameter()
        text_format.Parse(fp.read(), net_file)

    pad_w = 0
    pad_h = 0
    k_w = 0
    k_h = 0
    for layer in net_file.layer:
        if layer.name == layer_name:
            pad_w = layer.convolution_param.pad_w
            pad_h = layer.convolution_param.pad_h
            k_w = layer.convolution_param.kernel_w
            k_h = layer.convolution_param.kernel_h

    return pad_h, pad_w, k_h, k_w
import sys
sys.path.insert(0,'/home/dingzhonggan/workspace/scene/caffe-optimize/python')
import caffe
import google.protobuf as pb
from caffe.proto.caffe_pb2 import NetParameter, LayerParameter

caffe_config_filename = 'deploy_darknet_nobn.prototxt'
modelname = 'scene_darknet_nobn.caffemodel'
origin_caffe_config_filename = 'deploy_darknet.prototxt'
origin_modelname = 'scene_darknet_gooddata_iter_6000.caffemodel'


net = {}
with open(origin_caffe_config_filename,'r') as fp:
    net = NetParameter()
    pb.text_format.Parse(fp.read(),net)

net_model = caffe.Net(caffe_config_filename,caffe.TEST)

origin_net_model = caffe.Net(origin_caffe_config_filename,origin_modelname,caffe.TEST)

for layer in net.layer:
    if layer.type == 'Scale':
        bn_layer_name = layer.name.replace('scale','bn')
        mean  = origin_net_model.params[bn_layer_name][0].data
        var   = origin_net_model.params[bn_layer_name][1].data
        
        value = origin_net_model.params[bn_layer_name][2].data
        mean = mean / value
        var = var / value
示例#15
0
def make_decomp_file(input_file, conf, output_file, sim_type, weight,
                     output_weight, args):
    with open(input_file, 'r') as fp:
        net = NetParameter()
        pbt.Parse(fp.read(), net)

    filename = '../../base_models/{}/eigenvalue.conf'.format(args.net_type)
    f = open(filename, 'w')
    f.write('{\n\t"layer":{\n')

    i = 1
    idx = 0
    new_layers = []
    layer_dic = {}
    for layer in net.layer:
        if layer.name in conf["vbmf"]["cv"].keys():
            print('layer.name = {}'.format(layer.name))
            layer_dic[layer.name] = idx
            bottom = layer.bottom[0]
            k = (conf["vbmf"]["cv"][layer.name])
            if bottom == 'data':
                a, b = cv_channel(layer, k)
                new_layers.extend([a, b])
            else:
                g = layer.convolution_param.group
                a, b = cv_spatial(layer, k * g)
                new_layers.extend([a, b])
            idx += 1
        else:
            if layer.name in conf["vbmf"]["fc"].keys():
                layer_dic[layer.name] = idx
                k = conf["vbmf"]["fc"][layer.name]
                a, b = fc_decomp_2d(layer, k)
                new_layers.extend([a, b])
            else:
                new_layers.append(layer)
                continue

    new_net = NetParameter()
    new_net.CopyFrom(net)
    del (new_net.layer[:])
    new_net.layer.extend(new_layers)

    # File Write - eigenvalue.conf
    sorted_dic = sorted(layer_dic.items(), key=operator.itemgetter(1))
    for j in range(len(sorted_dic) - 1):
        f.write('"{}": {},\n'.format(sorted_dic[j][0], sorted_dic[j][1]))
    f.write('"{}": {}\n {}'.format(sorted_dic[len(sorted_dic) - 1][0],
                                   sorted_dic[len(sorted_dic) - 1][1], '},'))
    f.close()

    # File Write - prototxt
    out = os.path.splitext(
        os.path.basename(output_file))[0] + '_{}.prototxt'.format(i)
    out_dir = os.path.dirname(output_file)
    filename = '{}/{}'.format(out_dir, out)
    with open(filename, 'w') as fp:
        fp.write(pb.text_format.MessageToString(new_net))
    print '[Total] Wrote compressed prototxt to: {:s}'.format(filename)

    if sim_type == 'weight':
        out = os.path.splitext(
            os.path.basename(output_weight))[0] + '_{}.caffemodel'.format(i)
        out_dir = os.path.dirname(output_weight)
        out_weight = '{}/{}'.format(out_dir, out)
        decomp_weights(net.layer, input_file, weight, conf, filename,
                       out_weight, i, args)
示例#16
0
from google.protobuf import text_format

from caffe.proto.caffe_pb2 import NetParameter
#from caffe_pb2 import NetParameter
tf.disable_eager_execution()

parser = argparse.ArgumentParser()
parser.add_argument('--input_model', default='liteflownet.caffemodel')
parser.add_argument('--output_model', default='model')

args = parser.parse_args()

caffe_weights = {}

with open(args.input_model, 'rb') as fp:
    net = NetParameter()
    net.ParseFromString(fp.read())
    #text_format.Merge(fp.read(), net)

for idx, layer in enumerate(net.layer):
    #print layer.type
    if layer.type == 'Convolution' or layer.type == 'Deconvolution':
        conv_param = layer.convolution_param
        weights = net.layer[idx].blobs[0].data
        #print net.layer[idx].blobs[0].shape.dim
        weights = np.reshape(weights, net.layer[idx].blobs[0].shape.dim)
        caffe_weights[layer.name + '/weight'] = np.array(weights)

        if len(net.layer[idx].blobs) > 1:
            bias = net.layer[idx].blobs[1].data
            caffe_weights[layer.name + '/bias'] = np.array(bias)
示例#17
0
                      [3,1,1,512,1,0],
                      [3,1,1,512,1,0],
                      [2,2,0,0,1,1],
                      [3,1,1,512,1,0],
                      [3,1,1,512,1,0],
                      [3,1,1,512,1,0],
                      [2,2,0,0,1,1],
                      [0,0,0,4096,1,4],
                      [0,0,0,4096,1,4],
                      [0,0,0,1000,1,4]
])
'''
net_name = 'res_152'
net_file = '\\\\GCR\\\scratch\\B99\\v-guoxie\\proto\\residual_net\\res_net_152.prototxt'
with open('record.txt', 'a+') as fid_out:
    net_param = NetParameter()
    with open(net_file, 'r') as fid:
        text_format.Merge(str(fid.read()), net_param)
    layer_dict = OrderedDict()

    for idx, layer in enumerate(net_param.layer):
        layer_dict[layer.name] = idx

    net = caffe.Net(net_file, caffe.TEST)

    total_mem = np.double(0)
    total_top_vec = np.double(0)
    total_param_mem = np.double(0)
    # input_data memory
    input_size = np.array(net.blobs['data'].data.shape)
    input_size[0] = 256
示例#18
0
def decompose_layer_fc(txt_idx, orig_net, priv_conf, net, priv_net, orig_file,
                       orig_weight, output_file, output_weight, i, log,
                       net_type, check_set, layer_list):

    filename = 'stage1_buf_{}.txt'.format(txt_idx)
    new_conf = priv_conf
    acc = {}
    loss = {}

    for num in range(len(check_set)):
        new_layers = []
        buf_f = open(filename, 'a+')

        for layer in net.layer:

            if layer.name in priv_conf["vbmf"]["cv"].keys():
                idx = layer_list.index(layer.name)
                g = layer.convolution_param.group
                bottom = layer.bottom[0]
                weight_a = priv_net.params[layer.name + '_v'][0].data
                weight_b = priv_net.params[layer.name + '_h'][0].data
                k = check_set[num][idx] * g
                if bottom == 'data':
                    a, b = cv_channel(layer, k)
                else:
                    a, b = cv_spatial(layer, k)
                new_layers.extend([a, b])
                new_conf["vbmf"]["cv"][layer.name] = k

            elif layer.name in priv_conf["vbmf"]["fc"].keys():
                idx = layer_list.index(layer.name)
                weight_a = priv_net.params[layer.name + '_v'][0].data
                weight_b = priv_net.params[layer.name + '_h'][0].data
                k = check_set[num][idx]
                a, b = fc_decomp_2d(layer, k)
                new_layers.extend([a, b])
                new_conf["vbmf"]["fc"][layer.name] = k

            else:
                new_layers.append(layer)
                continue

        new_net = NetParameter()
        new_net.CopyFrom(net)
        del (new_net.layer[:])
        new_net.layer.extend(new_layers)

        # File Write
        out = os.path.splitext(
            os.path.basename(output_file))[0] + '_{}_{}.prototxt'.format(
                i, num)
        out_dir = os.path.dirname(output_file)
        comp_file = '{}/{}'.format(out_dir, out)
        with open(comp_file, 'w') as fp:
            fp.write(pb.text_format.MessageToString(new_net))
        #print '[Layer] Wrote compressed prototxt to: {:s}'.format(comp_file)

        out = os.path.splitext(
            os.path.basename(output_weight))[0] + '_{}_{}.caffemodel'.format(
                i, num)
        out_dir = os.path.dirname(output_weight)
        comp_weight = '{}/{}'.format(out_dir, out)

        # Decomposition
        decomp_weights(orig_net, num, net.layer, orig_file, orig_weight,
                       new_conf, comp_file, comp_weight, priv_net, check_set,
                       layer_list)

        # Accuarcy & Loss test
        out = os.path.splitext(os.path.basename(log))[0] + '_{}_{}.log'.format(
            i, num)
        out_dir = os.path.dirname(log)
        comp_log = '{}/{}'.format(out_dir, out)

        tmp_acc_loss = call_caffe_test(comp_file, comp_weight, comp_log,
                                       txt_idx, net_type)

        acc[num] = tmp_acc_loss[0][0]
        loss[num] = tmp_acc_loss[0][1]

        buf_f.write('{} {} {} {}\n'.format(i, num, acc[num], loss[num]))
        buf_f.close()
        os.system('./remove_model.sh {} {}'.format(txt_idx + 1, net_type))

    return acc, loss
示例#19
0
            False,
        ] * n_old_classes * (6 - 2)),
        "conv17_2_mbox_conf":
        np.array(class_mask * 2 + [
            False,
        ] * n_old_classes * (6 - 2))
    }

    #reference network (bigger)
    ref_net = caffe.Net('models/ssd_voc/deploy.prototxt',
                        'models/ssd_voc/MobileNetSSD_deploy.caffemodel',
                        caffe.TEST)

    #reference network parameters
    with open('models/ssd_voc/deploy.prototxt', 'r') as f:
        ref_par = NetParameter()
        txtf.Merge(f.read(), ref_par)

    #new network parameters: train,test,deploy
    with open('models/ssd_face/ssd_face_train.prototxt', 'r') as f:
        train_par = NetParameter()
        txtf.Merge(f.read(), train_par)
    with open('models/ssd_face/ssd_face_test.prototxt', 'r') as f:
        test_par = NetParameter()
        txtf.Merge(f.read(), test_par)
    with open('models/ssd_face/ssd_face_deploy.prototxt', 'r') as f:
        dep_par = NetParameter()
        txtf.Merge(f.read(), dep_par)

    #get faces collage and compute layer responses
    faces = cv2.imread('images/faces.png')
示例#20
0
 def __init__(self, net_file, net_params):
     self.caffe_net = caffe.Net(net_file, net_params, caffe.TEST)
     with open(net_file, 'r') as fp:
         self.net = NetParameter()
         pb.text_format.Parse(fp.read(), self.net)
示例#21
0
def main():
    # parse supplied arguments
    parser, args = parse_args()

    # set log level based on '--verbose/-v' argument
    if args.verbose:
        logging.basicConfig(format='%(levelname)s: %(message)s',
                            level=logging.DEBUG,
                            stream=sys.stdout)
    else:
        logging.basicConfig(format='%(levelname)s: %(message)s',
                            level=logging.WARNING,
                            stream=sys.stdout)

    # flag error for invalid file names
    if not args.input_filename[0].endswith(
            '.prototxt') or not args.output_filename[0].endswith('.prototxt'):
        parser.error(Msg.ERROR_MSG_FILE_NAME)

    # read SW-Net configuration file
    f = open(args.input_filename[0], 'r')
    sw_net_config_text = f.read()
    # create object representing SW-Net config
    sw_net = NetParameter()
    sw_net.ParseFromText(sw_net_config_text)
    # create object representing SK-Net config
    sk_net = NetParameter()
    sk_net.ParseFromText(sw_net_config_text)

    global f_init
    # fetch f_init & input patch dimension (height or width) if available
    if 'input_dim' in sw_net:
        f_init = sw_net['input_dim'][1]
        sw_initial_input_size = sw_net['input_dim'][3]
    else:
        # assign default value if not available in SW-Net
        sw_initial_input_size = 108

    sk_initial_input_size = sw_initial_input_size
    # if available, use supplied initial input size for SK-Net
    if args.sk_dim is not None:
        sk_initial_input_size = args.sk_dim

    transformer = SKTransformer(sw_net, sk_net, sw_initial_input_size,
                                sk_initial_input_size)
    # call to convert SW-Net into SK-Net
    transformer.transform()
    ''' check for final kernel stride param value. Provided SW-Network configuration is inconsistent
    if kernel stride value not equals to 1'''
    if transformer.d_temp == 1:
        print('\n' * 2 + '>' * 68)
        print(' ' * 18 + Msg.SUCCESS_MSG)
        print('<' * 68 + '\n')
        # in DEBUG mode print SW-Net and SK-Net configuration in table format
        if logger.getEffectiveLevel() == logging.DEBUG:
            print_sw_net(transformer)
            print_sk_net(transformer)

        # save converted SK-Net in to supplied output file
        output_file = open(args.output_filename[0], 'w')
        output_file.write(transformer.sk_net.SerializeToText())
        output_file.close()
    else:
        logging.error(Msg.ERROR_MSG_INNER_PRODUCT_LAYER)
示例#22
0
    def test_duplicate(self):

        fpath = os.path.join(os.path.dirname(ROOT_PKG_PATH),
                             TEST_DATA_DIRNAME, TEST_NET_FILENAME)

        n1 = Parser().from_net_params_file(fpath)
        n2 = Parser().from_net_params_file(fpath)

        n1_tmp = NetParameter(); n1_tmp.CopyFrom(n1)
        n2_tmp = NetParameter(); n2_tmp.CopyFrom(n2)
        s = mrg.merge_indep_net_spec([n1_tmp, n2_tmp])

        assert_is_not_none(s)
        assert_is_instance(s, str)
        assert_greater(len(s), 0)

        n = NetParameter()
        text_format.Merge(s, n)
        assert_is_not_none(n)

        # Data Layer from first network
        for l in n.layer:
            if l.type.lower() == 'data':
                for l1 in n1.layer:
                    if l1.type.lower() == 'data':

                        dat_phase = [x.phase for x in l.include]
                        # compare test with test and train with train
                        if dat_phase == [x.phase for x in l1.include]:

                            assert_is_not(l.top, l1.top)
                            assert_list_equal(list(l.top), list(l1.top))
                            assert_equal(l.data_param.source, l1.data_param.source)
                            assert_equal(l.data_param.backend, l1.data_param.backend)
                            assert_equal(l.data_param.batch_size, l1.data_param.batch_size)
                            assert_equal(l.transform_param.scale, l1.transform_param.scale)
        # For non-data layers

        # back up merged net
        for ni in [n1, n2]:
            for l1 in ni.layer:
                found = False
                if l1.type.lower() != 'data':

                    for l in n.layer:
                        if l.type.lower() == l1.type.lower() and \
                           [t.split('_nidx')[0] for t in l.top] == list(l1.top) and \
                           [b.split('_nidx')[0] for b in l.bottom] == list(l1.bottom):

                            assert_true(l.name.startswith(l1.name))

                            fnames1 = [f.name for f in l1.DESCRIPTOR.fields]
                            fnames = [f.name for f in l.DESCRIPTOR.fields]
                            assert_list_equal(fnames, fnames1)

                            l.ClearField('name')
                            l.ClearField('top')
                            l.ClearField('bottom')
                            l1.ClearField('name')
                            l1.ClearField('top')
                            l1.ClearField('bottom')

                            assert_equal(text_format.MessageToString(l), text_format.MessageToString(l1))

                            found = True
                else:
                    continue  # skip for data layers
                assert_true(found, "Failed to find %s in merged network!" % (l1.name,))
import caffe
from caffe.proto.caffe_pb2 import NetParameter, LayerParameter
import google.protobuf.text_format as txtf
import sys

pattern = sys.argv[1:]
print(pattern)

ref_net = caffe.Net('models/ssd_face/ssd_face_train.prototxt', 
                    'models/ssd_face/best_bn_full.caffemodel', caffe.TRAIN)
               
for mode in ['train','test','deploy']:
    with open('models/ssd_face/ssd_face_'+mode+'.prototxt', 'r') as f:
        net_par = NetParameter()
        txtf.Merge(f.read(), net_par)
        
    new_layers = []
    for l in net_par.layer:
        newl = LayerParameter()
        newl.CopyFrom(l)
        
        if l.name in {'mbox_loc','mbox_conf','mbox_priorbox'}:
            newbot = [e for e in l.bottom if (('14' not in e) and ('15' not in e) and 
                                                ('16' not in e) and ('17' not in e))]
            del(newl.bottom[:])
            newl.bottom.extend(newbot)
            new_layers.append(newl)
        elif (('14' not in l.name) and ('15' not in l.name) and 
                ('16' not in l.name) and ('17' not in l.name)):
            new_layers.append(newl)
        
示例#24
0
def load_file(input_file):
    with open(input_file, 'r') as fp:
        net = NetParameter()
        pbt.Parse(fp.read(), net)

    return net