コード例 #1
0
def convert_caffemodel_to_prototxt(caffemodel_filename):

    model = caffe_pb2.NetParameter()
    # load model from binary file
    f = open(caffemodel_filename, 'rb')
    model.ParseFromString(f.read())
    f.close()

    layers = model.layer
    res = list()
    res.append('name: ' + model.name)
    for layer in layers:

        # name
        res.append('layer {')
        res.append('  name: "%s"' % layer.name)

        # type
        res.append('  type: "%s"' % layer.type)

        # bottom
        for bottom in layer.bottom:
            res.append('  bottom: "%s"' % bottom)

        # top
        for top in layer.top:
            res.append('  top: "%s"' % top)

        # loss_weight
        for loss_weight in layer.loss_weight:
            res.append('  loss_weight: %0.3f' % loss_weight)

        # param
        for param in layer.param:
            param_res = list()
            if param.lr_mult is not None:
                param_res.append('    lr_mult: %0.3f' % param.lr_mult)
            if param.decay_mult != 1:
                param_res.append('    decay_mult: %0.3f' % param.decay_mult)
            if len(param_res) > 0:
                res.append('  param{')
                res.extend(param_res)
                res.append('  }')

        # lrn_param
        if layer.lrn_param is not None:
            lrn_res = list()
            if layer.lrn_param.local_size != 5:
                lrn_res.append('    local_size: %d' %
                               layer.lrn_param.local_size)
            if layer.lrn_param.alpha != 1:
                lrn_res.append('    alpha: %f' % layer.lrn_param.alpha)
            if layer.lrn_param.beta != 0.75:
                lrn_res.append('    beta: %f' % layer.lrn_param.beta)
            NormRegionMapper = {'0': 'ACROSS_CHANNELS', '1': 'WITHIN_CHANNEL'}
            if layer.lrn_param.norm_region != 0:
                lrn_res.append(
                    '    norm_region: %s' %
                    NormRegionMapper[str(layer.lrn_param.norm_region)])
            EngineMapper = {'0': 'DEFAULT', '1': 'CAFFE', '2': 'CUDNN'}
            if layer.lrn_param.engine != 0:
                lrn_res.append('    engine: %s' %
                               EngineMapper[str(layer.lrn_param.engine)])
            if len(lrn_res) > 0:
                res.append('  lrn_param{')
                res.extend(lrn_res)
                res.append('  }')

        # include
        if len(layer.include) > 0:
            include_res = list()
            includes = layer.include
            phase_mapper = {'0': 'TRAIN', '1': 'TEST'}

            for include in includes:
                if include.phase is not None:
                    include_res.append('    phase: %s' %
                                       phase_mapper[str(include.phase)])

            if len(include_res) > 0:
                res.append('  include {')
                res.extend(include_res)
                res.append('  }')

        # transform_param
        if layer.transform_param is not None:
            transform_param_res = list()
            if layer.transform_param.scale != 1:
                transform_param_res.append('    scale: %s' %
                                           layer.transform_param.scale)
            if layer.transform_param.mirror != False:
                transform_param_res.append('    mirror: true')
            if len(transform_param_res) > 0:
                res.append('  transform_param {')
                res.extend(transform_param_res)
                res.append('  }')

        # data_param
        if layer.data_param is not None and (layer.data_param.source != "" or
                                             layer.data_param.batch_size != 0
                                             or layer.data_param.backend != 0):
            data_param_res = list()
            if layer.data_param.source is not None:
                data_param_res.append('    source: "%s"' %
                                      layer.data_param.source)
            if layer.data_param.batch_size is not None:
                data_param_res.append('    batch_size: %d' %
                                      layer.data_param.batch_size)
            if layer.data_param.backend is not None:
                data_param_res.append('    backend: %s' %
                                      layer.data_param.backend)

            if len(data_param_res) > 0:
                res.append('  data_param: {')
                res.extend(data_param_res)
                res.append('  }')

        # convolution_param
        if layer.convolution_param is not None:
            convolution_param_res = list()
            conv_param = layer.convolution_param
            if conv_param.num_output != 0:
                convolution_param_res.append('    num_output: %d' %
                                             conv_param.num_output)
            if len(conv_param.kernel_size) > 0:
                for kernel_size in conv_param.kernel_size:
                    convolution_param_res.append('    kernel_size: %d' %
                                                 kernel_size)
            if len(conv_param.pad) > 0:
                for pad in conv_param.pad:
                    convolution_param_res.append('    pad: %d' % pad)
            if len(conv_param.stride) > 0:
                for stride in conv_param.stride:
                    convolution_param_res.append('    stride: %d' % stride)
            if conv_param.weight_filler is not None and conv_param.weight_filler.type != 'constant':
                convolution_param_res.append('    weight_filler {')
                convolution_param_res.append('      type: "%s"' %
                                             conv_param.weight_filler.type)
                convolution_param_res.append('    }')
            if conv_param.bias_filler is not None and conv_param.bias_filler.type != 'constant':
                convolution_param_res.append('    bias_filler {')
                convolution_param_res.append('      type: "%s"' %
                                             conv_param.bias_filler.type)
                convolution_param_res.append('    }')

            if len(convolution_param_res) > 0:
                res.append('  convolution_param {')
                res.extend(convolution_param_res)
                res.append('  }')

        # pooling_param
        if layer.pooling_param is not None:
            pooling_param_res = list()
            if layer.pooling_param.kernel_size > 0:
                pooling_param_res.append('    kernel_size: %d' %
                                         layer.pooling_param.kernel_size)
                pooling_param_res.append('    stride: %d' %
                                         layer.pooling_param.stride)
                pooling_param_res.append('    pad: %d' %
                                         layer.pooling_param.pad)
                PoolMethodMapper = {'0': 'MAX', '1': 'AVE', '2': 'STOCHASTIC'}
                pooling_param_res.append(
                    '    pool: %s' %
                    PoolMethodMapper[str(layer.pooling_param.pool)])

            if len(pooling_param_res) > 0:
                res.append('  pooling_param {')
                res.extend(pooling_param_res)
                res.append('  }')

        # inner_product_param
        if layer.inner_product_param is not None:
            inner_product_param_res = list()
            if layer.inner_product_param.num_output != 0:
                inner_product_param_res.append(
                    '    num_output: %d' %
                    layer.inner_product_param.num_output)

            if len(inner_product_param_res) > 0:
                res.append('  inner_product_param {')
                res.extend(inner_product_param_res)
                res.append('  }')

        # drop_param
        if layer.dropout_param is not None:
            dropout_param_res = list()
            if layer.dropout_param.dropout_ratio != 0.5:  #or layer.dropout_param.scale_train!=True:
                dropout_param_res.append('    dropout_ratio: %f' %
                                         layer.dropout_param.dropout_ratio)
                #dropout_param_res.append('    scale_train: ' + str(layer.dropout_param.scale_train))

            if len(dropout_param_res) > 0:
                res.append('  dropout_param {')
                res.extend(dropout_param_res)
                res.append('  }')

        res.append('}')
    net_string = ""
    for line in res:
        net_string += line + '\n'
    return net_string
コード例 #2
0
def make_test_files(testable_net_path, train_weights_path, num_iterations,
                    in_h, in_w):
    # load the train net prototxt as a protobuf message
    with open(testable_net_path) as f:
        testable_str = f.read()
    testable_msg = caffe_pb2.NetParameter()
    text_format.Merge(testable_str, testable_msg)

    bn_layers = [l.name for l in testable_msg.layer if l.type == "BN"]
    bn_blobs = [l.top[0] for l in testable_msg.layer if l.type == "BN"]
    bn_means = [l.top[1] for l in testable_msg.layer if l.type == "BN"]
    bn_vars = [l.top[2] for l in testable_msg.layer if l.type == "BN"]

    net = caffe.Net(testable_net_path, train_weights_path, caffe.TEST)

    # init our blob stores with the first forward pass
    res = net.forward()
    bn_avg_mean = {
        bn_mean: np.squeeze(res[bn_mean]).copy()
        for bn_mean in bn_means
    }
    bn_avg_var = {bn_var: np.squeeze(res[bn_var]).copy() for bn_var in bn_vars}

    # iterate over the rest of the training set
    for i in xrange(1, num_iterations):
        res = net.forward()
        for bn_mean in bn_means:
            bn_avg_mean[bn_mean] += np.squeeze(res[bn_mean])
        for bn_var in bn_vars:
            bn_avg_var[bn_var] += np.squeeze(res[bn_var])
        print 'progress: {}/{}'.format(i, num_iterations)

    # compute average means and vars
    for bn_mean in bn_means:
        bn_avg_mean[bn_mean] /= num_iterations
    for bn_var in bn_vars:
        bn_avg_var[bn_var] /= num_iterations

    for bn_blob, bn_var in zip(bn_blobs, bn_vars):
        m = np.prod(net.blobs[bn_blob].data.shape) / np.prod(
            bn_avg_var[bn_var].shape)
        bn_avg_var[bn_var] *= (m / (m - 1))

    # calculate the new scale and shift blobs for all the BN layers
    scale_data = {
        bn_layer: np.squeeze(net.params[bn_layer][0].data)
        for bn_layer in bn_layers
    }
    shift_data = {
        bn_layer: np.squeeze(net.params[bn_layer][1].data)
        for bn_layer in bn_layers
    }

    var_eps = 1e-9
    new_scale_data = {}
    new_shift_data = {}
    for bn_layer, bn_mean, bn_var in zip(bn_layers, bn_means, bn_vars):
        gamma = scale_data[bn_layer]
        beta = shift_data[bn_layer]
        Ex = bn_avg_mean[bn_mean]
        Varx = bn_avg_var[bn_var]
        new_gamma = gamma / np.sqrt(Varx + var_eps)
        new_beta = beta - (gamma * Ex / np.sqrt(Varx + var_eps))

        new_scale_data[bn_layer] = new_gamma
        new_shift_data[bn_layer] = new_beta
    print "New data:"
    print new_scale_data.keys()
    print new_shift_data.keys()

    # assign computed new scale and shift values to net.params
    for bn_layer in bn_layers:
        net.params[bn_layer][0].data[...] = new_scale_data[bn_layer].reshape(
            net.params[bn_layer][0].data.shape)
        net.params[bn_layer][1].data[...] = new_shift_data[bn_layer].reshape(
            net.params[bn_layer][1].data.shape)

    # build a test net prototxt
    test_msg = testable_msg
    # replace data layers with 'input' net param
    data_layers = [l for l in test_msg.layer if l.type.endswith("Data")]
    for data_layer in data_layers:
        test_msg.layer.remove(data_layer)
    test_msg.input.append("data")
    test_msg.input_dim.append(1)
    test_msg.input_dim.append(3)
    test_msg.input_dim.append(in_h)
    test_msg.input_dim.append(in_w)
    # Set BN layers to INFERENCE so they use the new stat blobs
    # and remove mean, var top blobs.
    for l in test_msg.layer:
        if l.type == "BN":
            if len(l.top) > 1:
                dead_tops = l.top[1:]
                for dl in dead_tops:
                    l.top.remove(dl)
            l.bn_param.bn_mode = caffe_pb2.BNParameter.INFERENCE
    # replace output loss, accuracy layers with a softmax
    dead_outputs = [
        l for l in test_msg.layer if l.type in ["SoftmaxWithLoss", "Accuracy"]
    ]
    out_bottom = dead_outputs[0].bottom[0]
    for dead in dead_outputs:
        test_msg.layer.remove(dead)
    test_msg.layer.add(name="prob",
                       type="Softmax",
                       bottom=[out_bottom],
                       top=['prob'])
    return net, test_msg
コード例 #3
0
def modify_net_prototxt(net_prototxt,
                        new_net_prototxt,
                        moving_average_fraction=0.9999):
    # Reads net parameters from prototxt
    net_param = caffe_pb2.NetParameter()
    with open(net_prototxt) as f:
        txtf.Merge(str(f.read()), net_param)

    flag = True
    for l in net_param.layer:
        # Comment / uncomment / adapt the following blocks as needed

        # if l.type == "Convolution" and l.convolution_param.kernel_size[0] > 1:
        #    l.param[0].lr_mult = 0.0
        #    l.param[1].lr_mult = 0.0

        # if l.type in ["Convolution", "InnerProduct"]: # and l.name != "mid_fc8":
        #    l.param[0].lr_mult = 1.0
        #    l.param[1].lr_mult = 1.0

        # if l.type == "Convolution":
        #    if l.convolution_param.group !=1:
        #        l.type = "DepthwiseConvolution"

        # if l.type in ["Convolution","DepthwiseConvolution"]:
        #    l.param[0].lr_mult = 1.0
        #    l.param[1].lr_mult = 1.0

        # if l.name == "conv5_4/sep":
        #    flag = False

        if l.type == "BatchReNorm":
            # l.batch_renorm_param.step_to_init = 48
            # l.batch_renorm_param.step_to_r_max = 96
            # l.batch_renorm_param.step_to_d_max = 96
            # l.batch_renorm_param.r_max = 3
            # l.batch_renorm_param.d_max = 5

            # l.batch_renorm_param.use_global_stats = flag
            l.batch_renorm_param.step_to_init = 0
            l.batch_renorm_param.step_to_r_max = 1
            l.batch_renorm_param.step_to_d_max = 1
            l.batch_renorm_param.moving_average_fraction = moving_average_fraction  # 0.99995

            l.batch_renorm_param.r_max = 1.25
            l.batch_renorm_param.d_max = 0.5

            # l.batch_renorm_param.r_max = 1.5
            # l.batch_renorm_param.d_max = 2.5

        # if l.type == "Scale":
        #    l.param[0].lr_mult = 0.0
        #    l.param[1].lr_mult = 0.0

        # if l.type == "Convolution" and l.convolution_param.kernel_size[0] > 1:
        #    l.param[0].lr_mult = 0.0
        # if l.type == "DepthwiseConvolution" and l.convolution_param.kernel_size[0] > 1:
        #    l.param[0].lr_mult = 0.0
        # if l.type == "Convolution" and l.convolution_param.kernel_size[0] == 1:
        #    l.param[0].lr_mult = 0.0

    with open(new_net_prototxt, 'w') as f:
        f.write(str(net_param))
コード例 #4
0
 def __init__(self, name="network"):
     self.net = caffe_pb2.NetParameter()
     self.net.name = name
     self.bottom = None
     self.cur = None
     self.this = None
コード例 #5
0
def caffe_to_sas(network_file, model_name, network_param=None,
                 phase=caffe.TEST, verbose=False):
    '''
    Generate a SAS deep learning model from Caffe definition

    Parameters
    ----------
    network_file : string
       Fully qualified file name of network definition file (*.prototxt).
    sas_file : string
       Fully qualified file name of SAS deep learning Python model definition.
    model_name : string
       Name for deep learning model.
    network_param : string, optional
       Fully qualified file name of network parameter file (*.caffemodel).
    phase : int, optional
       One of {caffe.TRAIN, caffe.TEST, None}.
    verbose : bool, optional
       To view all Caffe information messages, set to True.

    '''

    # open output file

    try:
        output_code = ''
        # initialize Caffe logging facility
        caffe.init_log(0, verbose)

        # instantiate a model and read network parameters
        if (network_param is None):
            model = caffe.Net(network_file, phase)
        else:
            model = caffe.Net(network_file, phase, weights=network_param)
        net = caffe_pb2.NetParameter()
        text_format.Merge(open(network_file + '.tmp').read(), net)

        # remove temporary file created
        if os.path.isfile(network_file + '.tmp'):
            os.remove(network_file + '.tmp')

        # identify common Caffe/SAS computation layers
        layer_list = []
        for layer in net.layer:
            include_layer = False
            if (len(layer.include) == 0):
                include_layer = True
            else:
                for layer_phase in layer.include:
                    if (caffe.TEST == layer_phase.phase):
                        include_layer = True

            # exclude layers not implemented (or implemented in a different fashion)
            if (layer.type.lower() not in common_layers):
                include_layer = False

            if include_layer:
                layer_list.append(make_composite_layer(layer))

        # associate activations with computation layers
        for layer in net.layer:
            layer_type = layer.type.lower()
            if (layer_type in ['relu', 'prelu', 'elu', 'sigmoid', 'tanh']):
                layer_index = None
                for ii in range(len(layer_list)):
                    if (layer.top[0] == layer_list[ii].layer_parm.top[0]):
                        layer_index = ii

                if layer_index is not None:
                    layer_list[layer_index].related_layers.append(layer)
                else:
                    raise CaffeParseError(
                        'Activation layer ' + layer.name +
                        ' is not associated with any computation layer.')

        # associate dropout with computation layers
        for layer in net.layer:
            layer_type = layer.type.lower()
            if (layer_type == 'dropout'):
                layer_index = None
                for ii in range(len(layer_list)):
                    if (layer.top[0] == layer_list[ii].layer_parm.top[0]):
                        layer_index = ii

                if layer_index is not None:
                    layer_list[layer_index].related_layers.append(layer)
                else:
                    raise CaffeParseError(
                        'Dropout layer ' + layer.name +
                        ' is not associated with any computation layer.')

        # associate softmax with a fully-connected layer
        for layer in net.layer:
            layer_type = layer.type.lower()
            if (layer_type in ['softmax', 'softmaxwithloss']):
                layer_index = None
                for ii in range(len(layer_list)):
                    for jj in range(len(layer.bottom)):
                        if (layer.bottom[jj] == layer_list[ii].layer_parm.top[0]):
                            layer_index = ii

                if layer_index is not None:
                    layer_list[layer_index].related_layers.append(layer)
                else:
                    raise CaffeParseError(
                        'Softmax layer ' + layer.name +
                        ' is not associated with any fully-connected layer.')

        # determine source layer(s) for computation layers
        for ii in range(len(layer_list)):
            for kk in range(len(layer_list[ii].layer_parm.bottom)):
                name = None
                for jj in range(ii):
                    if (layer_list[ii].layer_parm.bottom[kk] ==
                            layer_list[jj].layer_parm.top[0]):
                        name = layer_list[jj].layer_parm.name

                if name:
                    layer_list[ii].source_layer.append(name)

        # associate scale layer with batchnorm layer
        for layer in net.layer:
            if (layer.type.lower() == 'scale'):
                bn_found = False
                for ii in range(len(layer_list)):
                    if ((layer_list[ii].layer_parm.type.lower() == 'batchnorm') and
                            (layer_list[ii].layer_parm.top[0] == layer.top[0])):
                        layer_list[ii].related_layers.append(layer)
                        bn_found = True
                        break

                if not bn_found:
                    raise CaffeParseError(
                        'Scale layer ' + layer.name +
                        ' is not associated with a batch normalization layer')

        # loop over included layers
        for clayer in layer_list:
            layer_type = clayer.layer_parm.type.lower()
            if (layer_type == 'pooling'):  # average/max pooling
                sas_code = caffe_pooling_layer(clayer, model_name)
            elif (layer_type == 'convolution'):  # 2D convolution
                sas_code = caffe_convolution_layer(clayer, model_name)
            elif (layer_type == 'batchnorm'):  # batch normalization
                sas_code = caffe_batch_normalization_layer(clayer, model_name)
            elif (layer_type in ['data', 'memorydata']):  # input layer
                sas_code = caffe_input_layer(clayer, model_name)
            elif (layer_type == 'eltwise'):  # residual
                sas_code = caffe_residual_layer(clayer, model_name)
            elif (layer_type == 'innerproduct'):  # fully connected
                sas_code = caffe_full_connect_layer(clayer, model_name)
            else:
                raise CaffeParseError(layer_type +
                                      ' is an unsupported layer type')

            # write SAS code associated with Caffe layer
            if sas_code:
                output_code = output_code + sas_code + '\n\n'

            else:
                raise CaffeParseError(
                    'Unable to generate SAS definition for layer ' +
                    clayer.layer_parm.name)

            # convert from BINARYPROTO to HDF5
            if network_param is not None:
                sas_hdf5 = os.path.join(os.getcwd(), '{}_weights.h5'.format(model_name))
                write_caffe_hdf5(model, layer_list, sas_hdf5)

        return output_code

    except CaffeParseError as err_msg:
        print(err_msg)
コード例 #6
0
def proto_decomposition(network_in, network_out, layers_ranks):
    proto_in = caffe_pb2.NetParameter()

    with open(network_in, 'r') as file:
        text_format.Merge(str(file.read()), proto_in)

    proto_out = caffe_pb2.NetParameter()
    proto_out.CopyFrom(proto_in)
    proto_out.ClearField('layer')
    channel_buffer = {}

    for layer in proto_in.layer:
        if layer.type != 'Convolution' and layer.type != 'InnerProduct':
            proto_out.layer.add()
            proto_out.layer[-1].CopyFrom(layer)

            if layer.type == 'Data':
                channel_buffer[layer.top[0]] = 3
                channel_buffer[layer.top[1]] = 1
            else:
                channel_buffer[layer.top[0]] = channel_buffer[layer.bottom[0]]
        elif layer.type == 'Convolution':
            channel_buffer[layer.top[0]] = layer.convolution_param.num_output
            layer.convolution_param.ClearField('weight_filler')
            layer.convolution_param.ClearField('bias_filler')

            if layer.name not in layers_ranks:
                proto_out.layer.add()
                proto_out.layer[-1].CopyFrom(layer)
            else:
                if layers_ranks[layer.name][0] != channel_buffer[
                        layer.bottom[0]]:
                    proto_out.layer.add()
                    lra_a_layer = proto_out.layer[-1]
                    lra_a_layer.CopyFrom(layer)
                    lra_a_layer.name += '_lra_a'
                    lra_a_layer.convolution_param.kernel_size[0] = 1
                    lra_a_layer.convolution_param.num_output = layers_ranks[
                        layer.name][0]
                    lra_a_layer.convolution_param.ClearField('pad')
                    lra_a_layer.convolution_param.ClearField('stride')
                    lra_a_layer.top[0] = layer.name + '_lra_a'
                    channel_buffer[lra_a_layer.top[0]] = layers_ranks[
                        layer.name][0]
                proto_out.layer.add()
                lra_b_layer = proto_out.layer[-1]
                lra_b_layer.CopyFrom(layer)
                lra_b_layer.name += '_lra_b'
                lra_b_layer.convolution_param.num_output = layers_ranks[
                    layer.name][1]

                if layer.name + '_lra_a' in channel_buffer:
                    lra_b_layer.bottom[0] = layer.name + '_lra_a'

                if layers_ranks[layer.name][1] != channel_buffer[layer.top[0]]:
                    lra_b_layer.top[0] = layer.name + '_lra_b'
                    proto_out.layer.add()
                    lra_c_layer = proto_out.layer[-1]
                    lra_c_layer.CopyFrom(layer)
                    lra_c_layer.name += '_lra_c'
                    lra_c_layer.convolution_param.kernel_size[0] = 1
                    lra_c_layer.convolution_param.ClearField('pad')
                    lra_c_layer.convolution_param.ClearField('stride')
                    lra_c_layer.bottom[0] = layer.name + '_lra_b'
                    channel_buffer[lra_c_layer.bottom[0]] = layers_ranks[
                        layer.name][1]
        elif layer.type == 'InnerProduct':
            channel_buffer[layer.top[0]] = layer.inner_product_param.num_output
            layer.inner_product_param.ClearField('weight_filler')
            layer.inner_product_param.ClearField('bias_filler')

            if layer.name not in layers_ranks:
                proto_out.layer.add()
                proto_out.layer[-1].CopyFrom(layer)
            else:
                proto_out.layer.add()
                svd_a_layer = proto_out.layer[-1]
                svd_a_layer.CopyFrom(layer)
                svd_a_layer.name += '_svd_a'
                svd_a_layer.inner_product_param.num_output = layers_ranks[
                    layer.name]

                if layers_ranks[layer.name] != channel_buffer[layer.top[0]]:
                    svd_a_layer.top[0] = layer.name + '_svd_a'
                    channel_buffer[svd_a_layer.top[0]] = layers_ranks[
                        layer.name]
                    proto_out.layer.add()
                    svd_b_layer = proto_out.layer[-1]
                    svd_b_layer.CopyFrom(layer)
                    svd_b_layer.name += '_svd_b'
                    svd_b_layer.bottom[0] = layer.name + '_svd_a'

    with open(network_out, 'w') as file:
        file.write(text_format.MessageToString(proto_out))
コード例 #7
0
import caffe.proto.caffe_pb2 as caffe_pb2


#caffemodel_filename = 'D:/caffe-windows/examples/mnist/lenet.caffemodel'
caffemodel_filename = 'G:/py/tf2caffe/testBN/lenet.caffemodel'
model = caffe_pb2.NetParameter()
with open(caffemodel_filename, 'rb') as f:
    model.ParseFromString(f.read())

layers = model.layer
print('name: "%s"' % model.name)
layer_id = -1

fname = 'testBN/lenetmodel.prototxt'
with open(fname, 'w') as f:
    for layer in layers:
        f.write('layer{\n')
        f.write(str(layer))
        f.write('\n}\n')
    #f.write('test')


for layer in layers:
    if layer.name in ['data_bn']:
        print(layer)




for layer in layers:
    layer_id = layer_id + 1
コード例 #8
0
def Inpt_OPT_New_Bias(original_prototxt_path, original_model_path,
                      optimized_prototxt_path, new_model_path, mean_vector,
                      scale, H, W, input_channel):
    net_param = caffe_pb2.NetParameter()
    with open(original_prototxt_path, 'rt') as f:
        Parse(f.read(), net_param)
    layer_num = len(net_param.layer)

    new_net_param = caffe_pb2.NetParameter()
    new_net_param.name = 'calc_new_bias'
    new_net_param.layer.add()
    new_net_param.layer[-1].name = "data"
    new_net_param.layer[-1].type = 'Input'
    new_net_param.layer[-1].top.append('data')
    new_net_param.layer[-1].input_param.shape.add()
    new_net_param.layer[-1].input_param.shape[-1].dim.append(1)
    new_net_param.layer[-1].input_param.shape[-1].dim.append(
        int(input_channel))
    new_net_param.layer[-1].input_param.shape[-1].dim.append(int(H))
    new_net_param.layer[-1].input_param.shape[-1].dim.append(int(W))

    target_blob_name = ''
    target_layer_name = ''
    input_layer_type = ['Data', 'Input', 'AnnotatedData']
    for layer_idx in range(0, layer_num):
        layer = net_param.layer[layer_idx]
        if layer.type not in input_layer_type:
            assert (layer.type == 'Convolution' or layer.type == 'InnerProduct'
                    ), "## ERROR : First Layer MUST BE CONV or IP. ##"
            new_net_param.layer.extend([layer])
            if layer.type == 'Convolution':
                try:
                    assert (
                        new_net_param.layer[-1].convolution_param.pad[0] == 0
                    ), '## ERROR : MEAN cannot be mearged into CONV with padding > 0. ##'
                except:
                    # padding not set
                    pass
                target_blob_name = layer.top[0]
                target_layer_name = layer.name
            break

    new_proto_name = './tmpfile.prototxt'
    with open(new_proto_name, 'wt') as f:
        f.write(MessageToString(new_net_param))
    caffe.set_mode_cpu()
    net = caffe.Net(new_proto_name, str(original_model_path), caffe.TEST)

    mean_array = mean_vector * (-1.0) * scale
    mean_array = mean_array.reshape(input_channel, 1)
    mean_array = np.tile(mean_array,
                         (1, H * W)).reshape(1, input_channel, H, W)

    os.remove(new_proto_name)

    net.blobs['data'].data[...] = mean_array
    net.forward()
    mean_data = net.blobs[target_blob_name].data[...]
    mean_data = mean_data.reshape(mean_data.shape[1],
                                  mean_data.shape[2] * mean_data.shape[3])
    new_bias = np.mean(mean_data, 1)
    print "INPUT PREPROCESS (SUB MEAN) OPT : Calc New Bias Done."

    caffe.set_mode_cpu()
    net = caffe.Net(original_prototxt_path, str(original_model_path),
                    caffe.TEST)
    if len(net.params[target_layer_name]) == 2:
        # with bias
        net.params[target_layer_name][1].data[...] += new_bias[...]
        net.save(new_model_path)
        try:
            shutil.copyfile(original_prototxt_path, optimized_prototxt_path)
        except:
            # same file, not need to copy
            pass
        print "INPUT PREPROCESS (SUB MEAN) OPT : Merge Mean Done."
        print bcolors.OKGREEN + "INPUT PREPROCESS (SUB MEAN) OPT : Model at " + new_model_path + "." + bcolors.ENDC
        print bcolors.OKGREEN + "INPUT PREPROCESS (SUB MEAN) OPT : Prototxt at " + optimized_prototxt_path + "." + bcolors.ENDC
        print bcolors.WARNING + "INPUT PREPROCESS (SUB MEAN) OPT : ** WARNING ** Remember to set mean values to zero before test !!!" + bcolors.ENDC

    else:
        net_param = caffe_pb2.NetParameter()
        with open(original_prototxt_path, 'rt') as f:
            Parse(f.read(), net_param)
        layer_num = len(net_param.layer)
        for layer_idx in range(0, layer_num):
            layer = net_param.layer[layer_idx]
            if layer.name == target_layer_name:
                if layer.type == 'Convolution':
                    net_param.layer[
                        layer_idx].convolution_param.bias_term = True
                else:
                    net_param.layer[
                        layer_idx].inner_product_param.bias_term = True
                break
        with open(optimized_prototxt_path, 'wt') as f:
            f.write(MessageToString(net_param))

        new_net = caffe.Net(optimized_prototxt_path, caffe.TEST)
        for param_name in net.params.keys():
            for i in range(0, len(net.params[param_name])):
                new_net.params[param_name][i].data[
                    ...] = net.params[param_name][i].data[...]
        new_net.params[target_layer_name][1].data[...] = new_bias[...]
        new_net.save(new_model_path)
        print "INPUT PREPROCESS (SUB MEAN) OPT : Merge Mean Done."
        print bcolors.OKGREEN + "INPUT PREPROCESS (SUB MEAN) OPT : Model at " + new_model_path + "." + bcolors.ENDC
        print bcolors.OKGREEN + "INPUT PREPROCESS (SUB MEAN) OPT : Prototxt at " + optimized_prototxt_path + "." + bcolors.ENDC
        print bcolors.WARNING + "INPUT PREPROCESS (SUB MEAN) OPT : ** WARNING ** Remember to set mean values to zero before test !!!" + bcolors.ENDC
コード例 #9
0
ファイル: calibrator.py プロジェクト: zwh930712/caffe-1
def transform_convolutions(model_path,
                           compiled_model_path,
                           top_blobs_map,
                           bottom_blobs_map,
                           use_unsigned_range,
                           concat_use_fp32,
                           unify_concat_scales,
                           conv_algo,
                           enable_1st_conv=False):
    net = caffe_pb2.NetParameter()
    with open(model_path) as f:
        s = f.read()
        txtf.Merge(s, net)

    compiled_net = caffe_pb2.NetParameter()
    with open(compiled_model_path) as f:
        s = f.read()
        txtf.Merge(s, compiled_net)

    convs_output_with_relu = analyze_conv_output_with_relu(compiled_net)
    # extended convs output with relu is used for convs that cannot fuse with relu due to negative slope
    # extended_convs_output_with_relu = analyze_conv_output_with_relu_from_net(convs_output_with_relu, compiled_net, net)
    new_net = copy.deepcopy(net)

    convolution_layers = [(value, index)
                          for index, value in enumerate(net.layer)
                          if value.type in quantize_layers]
    compiled_convolution_layers = [
        (value, index) for index, value in enumerate(compiled_net.layer)
        if value.type in quantize_layers
    ]

    u8_max = 255
    s8_max = 127
    first_conv = True if enable_1st_conv else False
    for (l, index) in convolution_layers:
        for si in range(
                0, len(new_net.layer[index].quantization_param.scale_out)):
            if l.name in convs_output_with_relu:  # u8
                new_net.layer[index].quantization_param.scale_out[
                    si] = u8_max / new_net.layer[
                        index].quantization_param.scale_out[si]
            else:  # s8
                if use_unsigned_range:
                    new_net.layer[index].quantization_param.scale_out[
                        si] = u8_max / new_net.layer[
                            index].quantization_param.scale_out[si]
                else:
                    new_net.layer[index].quantization_param.scale_out[
                        si] = s8_max / new_net.layer[
                            index].quantization_param.scale_out[si]

        index_in_compiled_net = find_index_by_name(
            l.name, compiled_convolution_layers)
        assert (index_in_compiled_net >= 0)
        #conv_inputs = get_input_convolutions(l, compiled_net, index_in_compiled_net, ["Convolution"])
        #conv_input_u8 = analyze_conv_input_u8(conv_inputs, convs_output_with_relu)
        conv_input_u8 = is_convolution_input_u8(
            l, compiled_net, index_in_compiled_net, ["Convolution"],
            convs_output_with_relu)  # FIXME: extended_convs_output_with_relu
        for si in range(0,
                        len(new_net.layer[index].quantization_param.scale_in)):
            if conv_input_u8:  # u8
                if first_conv:
                    new_net.layer[index].quantization_param.scale_in[
                        si] = s8_max / new_net.layer[
                            index].quantization_param.scale_in[si]
                    new_net.layer[
                        index].quantization_param.is_negative_input = True
                    first_conv = False
                else:
                    new_net.layer[index].quantization_param.scale_in[
                        si] = u8_max / new_net.layer[
                            index].quantization_param.scale_in[si]
            else:
                new_net.layer[index].quantization_param.scale_in[
                    si] = s8_max / new_net.layer[
                        index].quantization_param.scale_in[si]
                new_net.layer[
                    index].quantization_param.is_negative_input = True

        for si in range(
                0, len(new_net.layer[index].quantization_param.scale_params)):
            if not isclose(
                    new_net.layer[index].quantization_param.scale_params[si],
                    0.0):
                new_scale_param = s8_max / new_net.layer[
                    index].quantization_param.scale_params[si]
                if np.isinf(new_scale_param):
                    new_scale_param = 0.0
                new_net.layer[index].quantization_param.scale_params[
                    si] = new_scale_param
            else:
                new_net.layer[index].quantization_param.scale_params[si] = 0.0

        if conv_algo:
            for conv_input in conv_inputs:
                index_bottom_layer = find_index_by_name(
                    conv_input[1], convolution_layers)
                for si in range(
                        0,
                        len(new_net.layer[index_bottom_layer].
                            quantization_param.scale_out)):
                    new_net.layer[
                        index_bottom_layer].quantization_param.scale_out[
                            si] = new_net.layer[
                                index].quantization_param.scale_in[si]

    concat_layers = [(value, index) for index, value in enumerate(net.layer)
                     if value.type == 'Concat']
    if len(concat_layers) > 0:
        compiled_concat_layers = [
            (value, index) for index, value in enumerate(compiled_net.layer)
            if value.type == 'Concat'
        ]
        concat_layers.reverse()
        if unify_concat_scales:
            for (l, index) in concat_layers:
                index_in_compiled_net = find_index_by_name(
                    l.name, compiled_concat_layers)
                assert (index_in_compiled_net >= 0)
                conv_inputs = get_input_convolutions(l, compiled_net,
                                                     index_in_compiled_net,
                                                     ["Convolution"],
                                                     ["Concat"])
                # TODO: support resonable cross-levels concat scale unify
                min_concat_scale = sys.float_info.max
                concat_input_indexes = []
                for conv_input in conv_inputs:
                    index_in_net = find_index_by_name(conv_input[1],
                                                      convolution_layers)
                    assert (index_in_net >= 0)
                    concat_input_indexes.append(index_in_net)
                    if new_net.layer[index_in_net].quantization_param.scale_out[
                            0] < min_concat_scale:
                        min_concat_scale = new_net.layer[
                            index_in_net].quantization_param.scale_out[0]

                for concat_input_index in concat_input_indexes:
                    new_net.layer[
                        concat_input_index].quantization_param.scale_out[
                            0] = min_concat_scale
        else:
            if concat_use_fp32:
                for (l, index) in concat_layers:
                    index_in_compiled_net = find_index_by_name(
                        l.name, compiled_concat_layers)
                    assert (index_in_compiled_net >= 0)
                    conv_inputs = get_input_convolutions(
                        l, compiled_net, index_in_compiled_net,
                        ["Convolution"])
                    for conv_input in conv_inputs:
                        index_in_net = find_index_by_name(
                            conv_input[1], convolution_layers)
                        assert (index_in_net >= 0)
                        new_net.layer[
                            index_in_net].quantization_param.bw_layer_out = 32
                        new_net.layer[
                            index_in_net].quantization_param.scale_out[:] = [
                                1.0
                            ]

    with open(model_path, 'w') as f:
        f.write(str(new_net))
コード例 #10
0
ファイル: main.py プロジェクト: walkerning/compression-tool
    def run(self):
        logger = logging.getLogger('dan.svdtool')
        import caffe
        import caffe.proto.caffe_pb2 as caffepb2

        if self.ori_solver is None:
            validate_status = self.validate_conf()
            if not validate_status:
                return False

        new_solver = caffepb2.NetParameter()
        new_solver.CopyFrom(self.ori_solver)
        new_solver.ClearField('layer')

        layer_index_dict = {}
        layer_index = 0
        # 构建第一个拆分的prototxt
        for i in range(len(self.ori_solver.layer)):
            layer = self.ori_solver.layer[i]
            if layer.name in self.svd_spec_dict:
                mid_layer_name = get_svd_layer_name(layer.name)
                svd_bottom_layer = modify_message(
                    layer,
                    in_place=False,
                    **{
                        'top': [mid_layer_name],
                        'inner_product_param.bias_term': False,
                        'param': [layer.param[0]]
                    }
                )
                svd_mid_layer = modify_message(
                    layer,
                    in_place=False,
                    **{
                        'bottom': [mid_layer_name],
                        'name': mid_layer_name
                    }
                )

                new_solver.layer.extend([svd_bottom_layer, svd_mid_layer])

                layer_index_dict[svd_bottom_layer.name] = layer_index
                layer_index += 1
                layer_index_dict[svd_mid_layer.name] = layer_index
            else:
                new_solver.layer.extend([layer])
                layer_index_dict[layer.name] = layer_index
            layer_index += 1

        # 写入拆分后的proto
        with open(self.output_proto, 'w') as output_proto_file:
            logger.info('Writing temporary prototxt to "%s".', self._log_output_proto)
            output_proto_file.write(text_format.MessageToString(new_solver))

        # 构建新的net方便计算
        new_net = caffe.Net(self.output_proto, caffe.TEST)

        final_solver = caffepb2.NetParameter()
        text_format.Merge(open(self.output_proto, 'r').read(), final_solver)

        final_param_dict = {}
        for layer_name, param in self.ori_net.params.iteritems():
            if layer_name not in self.svd_spec_dict:
                continue
            svd_spec = self.svd_spec_dict[layer_name]

            logger.info("Start calculating svd of layer < %s >. Strategy: %s. Argument < %s >: %s",
                        layer_name, svd_spec['method'].method_name,
                        svd_spec['method'].method_arg, str(svd_spec['argument']))
            hide_layer_size, new_param_list = svd_spec['method'](svd_spec['argument'],
                                                                 param[0].data, net=self.ori_net, new_net=new_net)
            logger.info("Finish calculating svd of layer < %s >.", layer_name)

            svd_hidelayer_name = get_svd_layer_name(layer_name)

            # Store the final data
            final_param_dict[layer_name] = (new_param_list[1],)
            modify_message(
                final_solver.layer[layer_index_dict[layer_name]],
                in_place=True,
                **{
                    'inner_product_param.num_output': hide_layer_size
                }
            )
            # bias设置在后一层
            final_param_dict[svd_hidelayer_name] = (new_param_list[0], param[1])

        with open(self.output_proto, 'w') as output_proto_file:
            logger.info('Writing proto to file "%s".', self._log_output_proto)
            output_proto_file.write(text_format.MessageToString(final_solver))

        new_net = caffe.Net(self.output_proto, caffe.TEST)
        # USE THIS, as caffe will insert some layer such as split
        # the `layer_index_dict` in the above code is of no use! TODO: remove those codes
        layer_index_dict = {name: i for i, name in enumerate(new_net._layer_names)}

        # 读入新的prototxt,然后对需要赋值blobs的layer都赋值,最后save
        for layer_name, param in self.ori_net.params.iteritems():
            if layer_name not in self.svd_spec_dict:
                # 其它层的layer.blobs就等于原来的blobs
                update_blob_vec(new_net.layers[layer_index_dict[layer_name]].blobs,
                                param)
            else:
                    svd_hidelayer_name = get_svd_layer_name(layer_name)
                    update_blob_vec(new_net.layers[layer_index_dict[layer_name]].blobs,
                                    final_param_dict[layer_name])
                    update_blob_vec(new_net.layers[layer_index_dict[svd_hidelayer_name]].blobs,
                                    final_param_dict[svd_hidelayer_name])

        logger.info('Writing caffe model to file "%s".', self._log_output_caffemodel)
        new_net.save(self.output_caffemodel)
        logger.info('Finish processing svd of fc layer! Prototxt in file "%s"'
                    '. Caffemodel in file "%s".\n', self._log_output_proto,
                    self._log_output_caffemodel,
                    extra={
                        'important': True
                    })

        return True
コード例 #11
0
def AFFine_OPT_Create_Caffemodel(original_prototxt_path, original_model_path,
                                 optimized_prototxt_path, new_model_path):
    net_param = caffe_pb2.NetParameter()
    with open(original_prototxt_path, 'rt') as f:
        Parse(f.read(), net_param)

    param_layer_type_list = [layer.type for layer in net_param.layer]
    param_layer_name_list = [layer.name for layer in net_param.layer]
    target_layer_type = ['Convolution', 'InnerProduct']
    merge_layer_type = ['Scale', 'BatchNorm']

    caffe.set_mode_cpu()
    net = caffe.Net(original_prototxt_path, original_model_path, caffe.TEST)
    new_net = caffe.Net(optimized_prototxt_path, caffe.TEST)
    for param_name in new_net.params.keys():
        param_layer_idx = param_layer_name_list.index(param_name)
        param_layer_type = param_layer_type_list[param_layer_idx]
        if param_layer_type not in target_layer_type:
            # OTHER LAYERS
            for i in range(0, len(net.params[param_name])):
                new_net.params[param_name][i].data[
                    ...] = net.params[param_name][i].data[...]
        else:
            kernel_num = net.params[param_name][0].num
            new_net.params[param_name][0].data[
                ...] = net.params[param_name][0].data[...]
            if len(net.params[param_name]) == 2:
                new_net.params[param_name][1].data[
                    ...] = net.params[param_name][1].data[...]
            #else:
            #    print new_net.params[param_name][1].data[...]
            if param_layer_idx + 1 < len(param_layer_type_list):
                for i in range(param_layer_idx + 1,
                               len(param_layer_type_list)):
                    # CHECK : CONV + BN +SCALE / CONV + BN / IP + ...
                    affine_layer_type = param_layer_type_list[i]
                    affine_layer_name = param_layer_name_list[i]
                    if affine_layer_type in merge_layer_type:
                        # MERGE BN/SCALE
                        if affine_layer_type == "Scale":
                            if len(net_param.layer[i].bottom) >= 2:
                                # NOT In-place Scale
                                try:
                                    for j in range(
                                            0,
                                            len(net.params[affine_layer_name])
                                    ):
                                        new_net.params[affine_layer_name][
                                            j].data[...] = net.params[
                                                affine_layer_name][j].data[...]
                                except:
                                    # no parameter
                                    break
                            else:
                                # In-place Scale
                                scale = net.params[affine_layer_name][0].data
                                if len(net.params[affine_layer_name]) == 2:
                                    bias = net.params[affine_layer_name][
                                        1].data
                                else:
                                    bias = 0.0 * scale
                                for k in range(0, kernel_num):
                                    new_net.params[param_name][0].data[
                                        k] = new_net.params[param_name][
                                            0].data[k] * scale[k]
                                    new_net.params[param_name][1].data[
                                        k] = new_net.params[param_name][
                                            1].data[k] * scale[k] + bias[k]
                        elif affine_layer_type == "BatchNorm":
                            scale = net.params[affine_layer_name][2].data[0]
                            if scale != 0:
                                mean = net.params[affine_layer_name][
                                    0].data / scale
                                std = np.sqrt(
                                    net.params[affine_layer_name][1].data /
                                    scale)
                            else:
                                mean = net.params[affine_layer_name][0].data
                                std = np.sqrt(
                                    net.params[affine_layer_name][1].data)
                            for k in range(0, kernel_num):
                                new_net.params[param_name][0].data[
                                    k] = new_net.params[param_name][0].data[
                                        k] / std[k]
                                new_net.params[param_name][1].data[k] = (
                                    new_net.params[param_name][1].data[k] -
                                    mean[k]) / std[k]
                        else:
                            # TODO
                            assert (
                                1 > 2
                            ), "## TODO ## : Other layers haven't been supported yet. ##"
                    else:
                        # NOT BN or SCALE, then BREAK
                        break
            else:
                # LAST LAYER, then BREAK
                break
    new_net.save(new_model_path)
    print bcolors.OKGREEN + "BN SCALE OPT : Model at " + new_model_path + "." + bcolors.ENDC
コード例 #12
0
def extract_cnn_features(img_filelist, img_root, prototxt, caffemodel, feat_name, output_path=None, output_type=None, caffe_path=None, mean_file=None, gpu_index=0, pool_size=8):   
    if caffe_path:
        sys.path.append(os.path.join(caffe_path, 'python'))
    import caffe
    from caffe.proto import caffe_pb2
    
    imagenet_mean = np.array([104, 117, 123])
    if mean_file:
        imagenet_mean = np.load(mean_file)
        net_parameter = caffe_pb2.NetParameter()
        text_format.Merge(open(prototxt, 'r').read(), net_parameter)
        print("input dim",net_parameter.input_dim, len(net_parameter.input_dim),'input_shape',net_parameter.input_shape,len(net_parameter.input_shape))
        if len(net_parameter.input_dim) != 0:
            input_shape = net_parameter.input_dim
        else:
            input_shape = net_parameter.input_shape[0].dim
        imagenet_mean = caffe.io.resize_image(imagenet_mean.transpose((1, 2, 0)), input_shape[2:]).transpose((2, 0, 1))
    
    # INIT NETWORK
    caffe.set_mode_gpu()
    caffe.set_device(gpu_index)
    net = caffe.Classifier(prototxt,caffemodel,
        mean=imagenet_mean,
        raw_scale=255,
        channel_swap=(2, 1, 0))
    
    img_filenames = [os.path.abspath(img_filelist)]
    if img_filelist.endswith('.txt'):
        img_filenames = [img_root+'/'+x.rstrip() for x in open(img_filelist, 'r')]
    N = len(img_filenames)
    
    if output_path != None and os.path.exists(output_path):
        if os.path.isdir(output_path):
            shutil.rmtree(output_path)
        else:
            os.remove(output_path)
        
    ## BATCH FORWARD 
    batch_size = int(net.blobs['data'].data.shape[0])
    batch_num = int(math.ceil(N/float(batch_size)))
    print('batch_num:', batch_num)

    def compute_feat_array(batch_idx):    
        start_idx = batch_size * batch_idx
        end_idx = min(batch_size * (batch_idx+1), N)
        print(datetime.datetime.now().time(), '- batch: ', batch_idx, 'of', batch_num, 'idx range:[', start_idx, end_idx, ']')
    
        input_data = []
        for img_idx in range(start_idx, end_idx):
            im = caffe.io.load_image(img_filenames[img_idx])
            input_data.append(im)
        while len(input_data) < batch_size:
            input_data.append(input_data[0])
        net.predict(input_data, oversample=False)
        feat_array = net.blobs[feat_name].data
        return feat_array
                
    if output_type == None:
        feat_list = []
        for batch_idx in range(batch_num):
            start_idx = batch_size * batch_idx
            end_idx = min(batch_size * (batch_idx+1), N)
            batch_count = end_idx - start_idx
            feat_array = compute_feat_array(batch_idx)
            for n in range(batch_count):
                feat_list.append(feat_array[n, ...].copy())
        return feat_list
    elif output_type == 'txt':
        with open(output_path, 'w') as output_file:
            for batch_idx in range(batch_num):
                start_idx = batch_size * batch_idx
                end_idx = min(batch_size * (batch_idx+1), N)
                batch_count = end_idx - start_idx
                feat_array = compute_feat_array(batch_idx)
                for n in range(batch_count):
                    output_file.write(' '.join([str(x) for x in feat_array[n, ...].flat])+'\n')
    elif output_type == 'lmdb':
        import lmdb
        env = lmdb.open(output_path, map_size=int(1e12))
        pool = Pool(pool_size)
        for batch_idx in range(batch_num):
            feat_array = compute_feat_array(batch_idx)        
            start_idx = batch_size * batch_idx
            end_idx = min(batch_size * (batch_idx+1), N)
            array4d_idx = [(feat_array, idx, idx+start_idx) for idx in range(end_idx-start_idx)]
            datum_strings = pool.map(_array4d_idx_to_datum_string, array4d_idx)
            with env.begin(write=True) as txn:
                for idx in range(end_idx-start_idx):
                    txn.put('{:0>10d}'.format(start_idx+idx).encode('utf-8'), datum_strings[idx])
        env.close();
コード例 #13
0
ファイル: caffe_net.py プロジェクト: wan-h/Brainpower
 def __init__(self):
     self.net = pb.NetParameter()
コード例 #14
0
ファイル: views.py プロジェクト: wyvern92/DIGITS
def image_classification_model_create():
    form = ImageClassificationModelForm()
    form.dataset.choices = get_datasets()
    form.standard_networks.choices = get_standard_networks()
    form.standard_networks.default = get_default_standard_network()
    form.previous_networks.choices = get_previous_networks()

    prev_network_snapshots = get_previous_network_snapshots()

    if not form.validate_on_submit():
        return render_template(
            'models/images/classification/new.html',
            form=form,
            previous_network_snapshots=prev_network_snapshots), 400

    datasetJob = scheduler.get_job(form.dataset.data)
    if not datasetJob:
        return 'Unknown dataset job_id "%s"' % form.dataset.data, 500

    job = None
    try:
        job = ImageClassificationModelJob(
            name=form.model_name.data,
            dataset_id=datasetJob.id(),
        )

        network = caffe_pb2.NetParameter()
        pretrained_model = None
        if form.method.data == 'standard':
            found = False
            networks_dir = os.path.join(os.path.dirname(digits.__file__),
                                        'standard-networks')
            for filename in os.listdir(networks_dir):
                path = os.path.join(networks_dir, filename)
                if os.path.isfile(path):
                    match = re.match(
                        r'%s.prototxt' % form.standard_networks.data, filename)
                    if match:
                        with open(path) as infile:
                            text_format.Merge(infile.read(), network)
                        found = True
                        break
            if not found:
                raise Exception('Unknown standard model "%s"' %
                                form.standard_networks.data)
        elif form.method.data == 'previous':
            old_job = scheduler.get_job(form.previous_networks.data)
            if not old_job:
                raise Exception('Job not found: %s' %
                                form.previous_networks.data)
            network.CopyFrom(old_job.train_task().network)
            for i, choice in enumerate(form.previous_networks.choices):
                if choice[0] == form.previous_networks.data:
                    epoch = int(request.form['%s-snapshot' %
                                             form.previous_networks.data])
                    if epoch != 0:
                        for filename, e in old_job.train_task().snapshots:
                            if e == epoch:
                                pretrained_model = filename
                                break

                        if pretrained_model is None:
                            raise Exception(
                                "For the job %s, selected pretrained_model for epoch %d is invalid!"
                                % (form.previous_networks.data, epoch))
                        if not (os.path.exists(pretrained_model)):
                            raise Exception(
                                "Pretrained_model for the selected epoch doesn't exists. May be deleted by another user/process. Please restart the server to load the correct pretrained_model details"
                            )
                    break

        elif form.method.data == 'custom':
            text_format.Merge(form.custom_network.data, network)
            pretrained_model = form.custom_network_snapshot.data.strip()
        else:
            raise Exception('Unrecognized method: "%s"' % form.method.data)

        policy = {'policy': form.lr_policy.data}
        if form.lr_policy.data == 'fixed':
            pass
        elif form.lr_policy.data == 'step':
            policy['stepsize'] = form.lr_step_size.data
            policy['gamma'] = form.lr_step_gamma.data
        elif form.lr_policy.data == 'multistep':
            policy['stepvalue'] = form.lr_multistep_values.data
            policy['gamma'] = form.lr_multistep_gamma.data
        elif form.lr_policy.data == 'exp':
            policy['gamma'] = form.lr_exp_gamma.data
        elif form.lr_policy.data == 'inv':
            policy['gamma'] = form.lr_inv_gamma.data
            policy['power'] = form.lr_inv_power.data
        elif form.lr_policy.data == 'poly':
            policy['power'] = form.lr_poly_power.data
        elif form.lr_policy.data == 'sigmoid':
            policy['stepsize'] = form.lr_sigmoid_step.data
            policy['gamma'] = form.lr_sigmoid_gamma.data
        else:
            return 'Invalid policy', 404

        job.tasks.append(
            tasks.CaffeTrainTask(
                job_dir=job.dir(),
                dataset=datasetJob,
                train_epochs=form.train_epochs.data,
                learning_rate=form.learning_rate.data,
                lr_policy=policy,
                batch_size=form.batch_size.data,
                val_interval=form.val_interval.data,
                pretrained_model=pretrained_model,
                crop_size=form.crop_size.data,
                use_mean=form.use_mean.data,
                network=network,
            ))

        scheduler.add_job(job)
        return redirect(url_for('models_show', job_id=job.id()))

    except:
        if job:
            scheduler.delete_job(job)
        raise
コード例 #15
0
def importPrototxt(request):
    if request.method == 'POST':
        try:
            prototxt = request.FILES['file']
        except Exception:
            return JsonResponse({
                'result': 'error',
                'error': 'No Prototxt model file found'
            })

        caffe_net = caffe_pb2.NetParameter()

        try:
            text_format.Merge(prototxt.read(), caffe_net)
        except Exception:
            return JsonResponse({
                'result': 'error',
                'error': 'Invalid Prototxt'
            })

        net = {}
        i = 0
        blobMap = {}
        net_name = caffe_net.name
        for layer in caffe_net.layer:
            id = "l" + str(i)
            input = []

            # this logic for phase has to be improved
            if len(layer.include):
                phase = layer.include[0].phase
            else:
                phase = None

            params = {}
            if (layer.type == 'Data'):
                params['source'] = layer.data_param.source
                params['batch_size'] = layer.data_param.batch_size
                params['backend'] = layer.data_param.backend
                params['scale'] = layer.transform_param.scale

            elif (layer.type == 'Convolution'):
                if len(layer.convolution_param.kernel_size):
                    params[
                        'kernel_h'] = layer.convolution_param.kernel_h or layer.convolution_param.kernel_size[
                            0]
                    params[
                        'kernel_w'] = layer.convolution_param.kernel_w or layer.convolution_param.kernel_size[
                            0]
                if len(layer.convolution_param.pad):
                    params[
                        'pad_h'] = layer.convolution_param.pad_h or layer.convolution_param.pad[
                            0]
                    params[
                        'pad_w'] = layer.convolution_param.pad_w or layer.convolution_param.pad[
                            0]
                if len(layer.convolution_param.stride):
                    params[
                        'stride_h'] = layer.convolution_param.stride_h or layer.convolution_param.stride[
                            0]
                    params[
                        'stride_w'] = layer.convolution_param.stride_w or layer.convolution_param.stride[
                            0]
                params[
                    'weight_filler'] = layer.convolution_param.weight_filler.type
                params[
                    'bias_filler'] = layer.convolution_param.bias_filler.type
                params['num_output'] = layer.convolution_param.num_output

            elif (layer.type == 'ReLU'):
                if (layer.top == layer.bottom):
                    params['inplace'] = True

            elif (layer.type == 'Pooling'):
                params[
                    'pad_h'] = layer.pooling_param.pad_h or layer.pooling_param.pad
                params[
                    'pad_w'] = layer.pooling_param.pad_w or layer.pooling_param.pad
                params[
                    'stride_h'] = layer.pooling_param.stride_h or layer.pooling_param.stride
                params[
                    'stride_w'] = layer.pooling_param.stride_w or layer.pooling_param.stride
                params[
                    'kernel_h'] = layer.pooling_param.kernel_h or layer.pooling_param.kernel_size
                params[
                    'kernel_w'] = layer.pooling_param.kernel_w or layer.pooling_param.kernel_size
                params['pool'] = layer.pooling_param.pool

            elif (layer.type == 'InnerProduct'):
                params['num_output'] = layer.inner_product_param.num_output
                params[
                    'weight_filler'] = layer.inner_product_param.weight_filler.type
                params[
                    'bias_filler'] = layer.inner_product_param.bias_filler.type

            elif (layer.type == 'SoftmaxWithLoss'):
                pass
            elif (layer.type == 'Accuracy'):
                pass
            elif (layer.type == 'Input'):
                params['dim'] = str(map(int,
                                        layer.input_param.shape[0].dim))[1:-1]
                # string '64,1,28,28'

            jsonLayer = {
                'info': {
                    'type': layer.type,
                    'phase': phase
                },
                'connection': {
                    'input': [],
                    'output': []
                },
                'params': params
            }

            # this logic was written for a general scenerio(where train and test layers are mixed up)
            # But as we know, the only differences between the train and test phase are:
            # 1) input layer with different source in test
            # 2) some accuracy layers in test
            # If we consider these constraint, the below logic can be vastly reduced
            for bottom_blob in layer.bottom:
                if (bottom_blob != 'label'):
                    # if the current layer has a phase
                    # then only connect with layers of same phase
                    # if it has no phase then connect with all layers
                    if jsonLayer['info']['phase'] is not None:
                        phase = jsonLayer['info']['phase']
                        for bottomLayerId in blobMap[bottom_blob]:
                            if (net[bottomLayerId]['info']['phase'] == phase
                                ) or (net[bottomLayerId]['info']['phase'] is
                                      None):
                                input.append(bottomLayerId)
                                net[bottomLayerId]['connection'][
                                    'output'].append(id)
                    else:
                        for bottomLayerId in blobMap[bottom_blob]:
                            input.append(bottomLayerId)
                            net[bottomLayerId]['connection']['output'].append(
                                id)
            for top_blob in layer.top:
                if (top_blob != 'label'):
                    if top_blob in blobMap:
                        if top_blob in layer.bottom:
                            # check for inplace operations
                            # layer has no phase
                            # then remove all layer history
                            # and add this one to the top
                            # layer has phase then remove all layers with same phase and append this
                            if jsonLayer['info']['phase'] is not None:
                                phase = jsonLayer['info']['phase']
                                for layerId in blobMap[bottom_blob]:
                                    if net[layerId]['info']['phase'] == phase:
                                        blobMap[bottom_blob].remove(layerId)
                                blobMap[top_blob].append(id)
                            else:
                                blobMap[top_blob] = [id]
                        else:
                            blobMap[top_blob].append(id)
                    else:
                        blobMap[top_blob] = [id]
            jsonLayer['connection']['input'] = input
            net[id] = jsonLayer
            i = i + 1

        return JsonResponse({
            'result': 'success',
            'net': net,
            'net_name': net_name
        })
コード例 #16
0
        action='store_true',
        help='replace ReLU layers by ReLU6'
    )
    parser.add_argument(
        '--tfpad',
        action='store_true',
        help='use tensorflow pad=SAME'
    )
    parser.add_argument(
        '--nobn',
        action='store_true',
        help='do not use batch_norm, defualt is false'
    )
    parser.add_argument(
        '-v','--version',
        type=str,
        default="2",
        help='MobileNet version, 1|2'
    )
    parser.add_argument(
        '-t','--type',
        type=str,
        default="ssdlite",
        help='ssd type, ssd|ssdlite'
    )
    FLAGS, unparsed = parser.parse_known_args()
    net_specs = caffe_pb2.NetParameter()
    net = CaffeNetGenerator(net_specs)
    net.generate(FLAGS)
    print(text_format.MessageToString(net_specs, float_format=".5g"))
コード例 #17
0
import sys
from sys import argv
from utils import *
from config import *

sys.path.append(pycaffe_path)

import caffe
from caffe.proto import caffe_pb2
from google.protobuf.text_format import Parse, MessageToString

print ">> start activation quantization ..."

caffe.set_mode_cpu()

net_param = caffe_pb2.NetParameter()
with open(act_int8_prototxt, 'rt') as f:
    Parse(f.read(), net_param)
layer_num = len(net_param.layer)

relu_top = []
relu_layer_idx = []

for layer_idx in range(layer_num):
    layer = net_param.layer[layer_idx]
    if layer.type == 'ReLU':
        relu_top.append(layer.top[0])
        relu_layer_idx.append(layer_idx)

del net_param
コード例 #18
0
def main(args):
    # Set default output file names
    if args.output_model is None:
        file_name = osp.splitext(args.model)[0]
        args.output_model = file_name + '_inference.prototxt'
    if args.output_weights is None:
        file_name = osp.splitext(args.weights)[0]
        args.output_weights = file_name + '_inference.caffemodel'
    with open(args.model) as f:
        model = caffe_pb2.NetParameter()
        pb.text_format.Parse(f.read(), model)

    # Determince the BN layers to be absorbed or replaced
    # Create the new layers
    new_layers = []
    absorbed, replaced = {}, {}
    for i, layer in enumerate(model.layer):
        if layer.type != 'BN':
            new_layers.append(layer)
            continue
        assert len(layer.bottom) == 1
        assert len(layer.top) == 1
        bottom_blob = layer.bottom[0]
        top_blob = layer.top[0]
        # Check if can be absorbed. As there could be some inplace layers,
        # for example, conv -> relu -> bn. In such case, the BN cannot be
        # absorbed.
        can_be_absorbed = False
        for j in xrange(i - 1, -1, -1):
            if bottom_blob in model.layer[j].top:
                if model.layer[j].type not in ['Convolution', 'InnerProduct']:
                    can_be_absorbed = False
                    break
                else:
                    can_be_absorbed = True
                    bottom_layer = model.layer[j]
        if can_be_absorbed:
            # Rename the blob in the top layers
            for j in xrange(i + 1, len(model.layer)):
                update_blob_name(model.layer[j].bottom, top_blob, bottom_blob)
                update_blob_name(model.layer[j].top, top_blob, bottom_blob)
            if bottom_layer.type == 'Convolution':
                bottom_layer.convolution_param.bias_term = True
            elif bottom_layer.type == 'InnerProduct':
                bottom_layer.inner_product_param.bias_term = True
            absorbed[layer.name] = bottom_layer.name
        elif args.replace_by == 'affine':
            # Replace by an scale bias layer
            new_layer = caffe_pb2.LayerParameter()
            new_layer.name = layer.name + '_affine'
            new_layer.type = 'Scale'
            new_layer.bottom.extend([bottom_blob])
            new_layer.top.extend([top_blob])
            new_layer.scale_param.bias_term = True
            replaced[layer.name] = new_layer.name
            new_layers.append(new_layer)
        elif args.replace_by == 'frozen':
            # Freeze the BN layer
            layer.bn_param.frozen = True
            del (layer.param[:])
            param = caffe_pb2.ParamSpec()
            param.lr_mult = 0
            param.decay_mult = 0
            layer.param.extend([param] * 2)
            new_layers.append(layer)

    # Save the prototxt
    output_model = caffe_pb2.NetParameter()
    output_model.CopyFrom(model)
    del (output_model.layer[:])
    output_model.layer.extend(new_layers)
    with open(args.output_model, 'w') as f:
        f.write(pb.text_format.MessageToString(output_model))

    # Copy the parameters
    weights = caffe.Net(args.model, args.weights, caffe.TEST)
    output_weights = caffe.Net(args.output_model, caffe.TEST)
    for name in np.intersect1d(weights.params.keys(),
                               output_weights.params.keys()):
        # Some original conv / inner product layers do not have bias_term
        for i in xrange(
                min(len(weights.params[name]),
                    len(output_weights.params[name]))):
            output_weights.params[name][i].data[...] = \
                weights.params[name][i].data.copy()

    # Absorb the BN parameters
    for old, new in absorbed.iteritems():
        scale, bias, mean, tmp = [p.data.ravel() for p in weights.params[old]]
        invstd = tmp if args.bn_style == 'invstd' else \
                 np.power(tmp + args.epsilon, -0.5)
        W, b = output_weights.params[new]
        assert W.data.ndim == 4 or W.data.ndim == 2
        assert b.data.ndim == 1
        if W.data.ndim == 4:
            W.data[...] = (W.data * scale[:, None, None, None] *
                           invstd[:, None, None, None])
        elif W.data.ndim == 2:
            W.data[...] = W.data * scale[:, None] * invstd[:, None]
        b.data[...] = (b.data[...] - mean) * scale * invstd + bias

    # Fill up the affine layers
    for old, new in replaced.iteritems():
        scale, bias, mean, tmp = [p.data.ravel() for p in weights.params[old]]
        invstd = tmp if args.bn_style == 'invstd' else \
                 np.power(tmp + args.epsilon, -0.5)
        W, b = output_weights.params[new]
        assert W.data.ndim == 1
        assert b.data.ndim == 1
        W.data[...] = scale * invstd
        b.data[...] = bias - scale * mean * invstd

    # Check if the conversion is correct
    check(weights, output_weights)

    # Save the caffemodel
    output_weights.save(args.output_weights)
コード例 #19
0
def main():
    args = parse_args()
    net = caffe_pb2.NetParameter()
    text_format.Merge(open(args.input_net_proto_file).read(), net)
    print('Drawing net to %s' % args.output_image_file)
    caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir)
コード例 #20
0
def convert(prototxt: str, caffemodel: str, dest: str = 'nnmodel.daq') -> None:
    assert isinstance(prototxt, str) and isinstance(
        caffemodel, str), 'prototxt and caffemodel shoule be filename'

    skipped_layers = []

    params = caffe_pb2.NetParameter()

    with open(prototxt) as f:
        text_format.Merge(f.read(), params)

    net = caffe.Net(prototxt, caffemodel, caffe.TEST)

    out_filename = dest
    with open(out_filename, 'wb') as f:
        model_writer = ModelWriter(f)

        for i, layer in enumerate(params.layer):
            if layer.type not in SUPPORTED_LAYERS:
                raise ValueError("Not supported layer " + layer.type)

            if layer.name in skipped_layers:
                continue

            top_name = layer.top[0]

            if i == 0:
                if layer.type != "Input":
                    raise ValueError("First layer should be input")

                param = layer.input_param
                model_writer.add_input(top_name, param.shape[0].dim)

            elif layer.type == 'Convolution':
                bottom_name = layer.bottom[0]
                param = layer.convolution_param

                if len(param.pad) == 0:
                    pad_left = pad_right = pad_top = pad_bottom = 0
                elif len(param.pad) == 1:
                    pad_left = pad_right = pad_top = pad_bottom = param.pad[0]
                elif len(param.pad) == 2:
                    pad_top, pad_bottom, pad_left, pad_right = param.pad[
                        0], param.pad[0], param.pad[1], param.pad[1]
                else:
                    raise ValueError("Only conv 2d is supported.")
                if param.pad_h != 0:
                    pad_top = pad_bottom = param.pad_h
                if param.pad_w != 0:
                    pad_left = pad_right = param.pad_w

                if len(param.stride) == 0:
                    stride_x = stride_y = 1
                elif len(param.stride) == 1:
                    stride_x = stride_y = param.stride[0]
                elif len(param.stride) == 2:
                    stride_y, stride_x = param.stride[0], param.stride[1]
                else:
                    raise ValueError("Only conv 2d is supported.")
                if param.stride_h != 0:
                    stride_y = param.stride_h
                if param.stride_w != 0:
                    stride_x = param.stride_w

                if len(param.kernel_size) == 1:
                    filter_height = filter_width = param.kernel_size[0]
                elif len(param.kernel_size) == 2:
                    filter_height, filter_width = param.kernel_size[
                        0], param.kernel_size[1]
                else:
                    raise ValueError("Only conv 2d is supported.")
                if param.kernel_h != 0:
                    filter_height = param.kernel_h
                if param.kernel_w != 0:
                    filter_width = param.kernel_w

                if len(param.dilation) == 0:
                    dilation = 1
                elif len(param.dilation) == 1:
                    dilation = param.dilation[0]
                else:
                    raise ValueError("Only dilation == 1 is supported.")
                if dilation != 1:
                    raise ValueError(
                        "Dilation == {}, only dilation == 1 is supported.".
                        format(dilation))

                axis = param.axis
                if axis != 1:
                    raise ValueError("Only axis == 1 is supported.")
                num_output = param.num_output

                weights = net.params[layer.name][
                    0].data  # shape: [depth_out, depth_in, filter_height, filter_width]

                bias = net.params[layer.name][
                    1].data if param.bias_term else None  # np.zeros(swapped_weights.shape[0])
                activation = find_inplace_activation(params, top_name,
                                                     skipped_layers)

                group = param.group
                model_writer.add_conv(bottom_name, top_name, pad_left,
                                      pad_right, pad_top, pad_bottom, stride_x,
                                      stride_y, dilation, group, filter_height,
                                      filter_width, num_output, activation,
                                      weights, bias)

            elif layer.type == 'Pooling':
                param = layer.pooling_param
                bottom_name = layer.bottom[0]

                pad = param.pad
                pad_left = pad_right = pad_top = pad_bottom = pad
                if param.pad_h != 0:
                    pad_top = pad_bottom = param.pad_h
                if param.pad_w != 0:
                    pad_left = pad_right = param.pad_w
                stride = param.stride
                stride_x = stride_y = stride
                if param.stride_h != 0:
                    stride_y = param.stride_h
                if param.stride_w != 0:
                    stride_x = param.stride_w
                kernel_size = param.kernel_size
                filter_height = filter_width = kernel_size
                if param.kernel_h != 0:
                    filter_height = param.kernel_h
                if param.kernel_w != 0:
                    filter_width = param.kernel_w
                if param.global_pooling:
                    filter_height, filter_width = -1, -1
                activation = find_inplace_activation(params, top_name,
                                                     skipped_layers)

                if param.pool == CAFFE_POOL_MAX:
                    model_writer.add_max_pool(bottom_name, top_name, pad_left,
                                              pad_right, pad_top, pad_bottom,
                                              stride_x, stride_y,
                                              filter_height, filter_width,
                                              activation)
                elif param.pool == CAFFE_POOL_AVE:
                    model_writer.add_ave_pool(bottom_name, top_name, pad_left,
                                              pad_right, pad_top, pad_bottom,
                                              stride_x, stride_y,
                                              filter_height, filter_width,
                                              activation)
                else:
                    raise ValueError("Not supported pool type")

            elif layer.type == 'InnerProduct':
                bottom_name = layer.bottom[0]
                param = layer.inner_product_param
                input_dim = list(net.blobs[bottom_name].data.shape)
                weights = net.params[layer.name][0].data
                num_output = param.num_output
                if param.axis != 1:
                    raise ValueError(
                        "Only inner_product.axis == 1 is supported.")
                if param.transpose:
                    raise ValueError(
                        "Only inner_product.transpose == True is supported")
                if len(input_dim) == 4:
                    input_dim[0] = param.num_output
                    weights = weights.reshape(input_dim)
                    weights = np.moveaxis(weights, 1, 3)
                bias = net.params[layer.name][
                    1].data if param.bias_term else None  # np.zeros(num_output)
                activation = find_inplace_activation(params, top_name,
                                                     skipped_layers)

                model_writer.add_FC(bottom_name, top_name, num_output,
                                    activation, weights, bias)

            elif layer.type == 'ReLU':
                bottom_name = layer.bottom[0]
                param = layer.relu_param
                model_writer.add_ReLU(bottom_name, top_name,
                                      param.negative_slope)

            elif layer.type == 'Softmax':
                bottom_name = layer.bottom[0]
                param = layer.softmax_param
                if param.axis != 1:
                    raise ValueError("Only softmax.axis == 1 is supported.")
                model_writer.add_softmax(bottom_name, top_name, 1.)

            elif layer.type == 'Dropout':
                pass

            elif layer.type == 'LRN':
                bottom_name = layer.bottom[0]
                param = layer.lrn_param
                local_size = param.local_size
                alpha = param.alpha
                beta = param.beta
                model_writer.add_LRN(bottom_name, top_name, local_size, alpha,
                                     beta)

            elif layer.type == 'Eltwise':
                bottom0 = layer.bottom[0]
                bottom1 = layer.bottom[1]
                param = layer.eltwise_param
                if param.operation == CAFFE_ELTWISE_SUM:
                    if np.count_nonzero(np.array(param.coeff) != 1) > 0:
                        raise ValueError(
                            "Only all coefficients in sum == 1 is supported.")
                    model_writer.add_add(bottom0, mw.TENSOR_OP, bottom1,
                                         top_name)
                elif param.operation == CAFFE_ELTWISE_PROD:
                    model_writer.add_mul(bottom0, mw.TENSOR_OP, bottom1,
                                         top_name)
                else:
                    raise ValueError("Unsupported EltwiseOp " +
                                     str(param.operation))

            elif layer.type == 'BatchNorm':
                bottom_name = layer.bottom[0]
                param = layer.batch_norm_param
                if not param.use_global_stat:
                    raise ValueError(
                        "Only batch_norm.use_global_stat is true is supported. "
                        "(Did you load model in train phase?)")
                scale_factor = net.params[layer.name][2].data[0]
                mean = net.params[layer.name][0].data / scale_factor
                var = net.params[layer.name][1].data / scale_factor + param.eps

                model_writer.add_add(bottom_name, mw.ARRAY_OP, -mean, top_name)
                # Append top into blobs so that the mul will use a new index as input
                # It will be the index of output blob of add
                model_writer.add_mul(top_name, mw.ARRAY_OP, 1 / np.sqrt(var),
                                     top_name)

            elif layer.type == 'Scale':
                if len(layer.bottom) != 1:
                    raise ValueError(
                        "Only a learnable Scale layer is supported.")
                bottom_name = layer.bottom[0]
                param = layer.scale_param
                if param.num_axes != 1:
                    raise ValueError("Only scale.num_axes == 2 is supported.")
                multiplier = net.params[layer.name][0].data
                model_writer.add_mul(bottom_name, mw.ARRAY_OP, multiplier,
                                     top_name)
                if param.bias_term:
                    bias = net.params[layer.name][1].data
                    model_writer.add_add(top_name, mw.ARRAY_OP, bias, top_name)

            elif layer.type == 'Concat':
                param = layer.concat_param
                model_writer.add_concat(layer.bottom, top_name, param.axis)

            elif layer.type == 'Power':
                bottom_name = layer.bottom[0]
                param = layer.power_param
                power, scale, shift = param.power, param.scale, param.shift

                internal_bottom_name = bottom_name
                if scale != 1:
                    model_writer.add_mul(internal_bottom_name, mw.SCALAR_OP,
                                         scale, top_name)
                    internal_bottom_name = top_name
                if shift != 0:
                    model_writer.add_add(internal_bottom_name, mw.SCALAR_OP,
                                         shift, top_name)
                    internal_bottom_name = top_name
                if power != 1:
                    raise ValueError('Only power == 1 is supported')

        model_writer.save()
コード例 #21
0
# In[ ]:

# reading weights by using net.params
nice_edge_detectors = net.params['conv1_1']
higher_level_filter = net.params['fc7']


# In[ ]:

from google.protobuf import text_format
from caffe.draw import get_pydot_graph
from caffe.proto import caffe_pb2
from IPython.display import display, Image 

_net = caffe_pb2.NetParameter()
f = open("gender_siamese_train_2.prototxt")
text_format.Merge(f.read(), _net)
display(Image(get_pydot_graph(_net,"TB").create_png()))


# In[ ]:

# not works
# http://research.beenfrog.com/code/2015/03/28/read-leveldb-lmdb-for-caffe-with-python.html
def get_data_for_case_from_lmdb(lmdb_name, id):
    lmdb_env = lmdb.open(lmdb_name, readonly=True)
    lmdb_txn = lmdb_env.begin()

    raw_datum = lmdb_txn.get(id)
    datum = caffe.proto.caffe_pb2.Datum()
コード例 #22
0
def main():
    args = parse_args()

    if not args.verbose:
        os.environ['GLOG_minloglevel'] = '2'

    import caffe
    from caffe.proto import caffe_pb2

    if args.cpu:
        caffe.set_mode_cpu()
    else:
        caffe.set_mode_gpu()

    if args.verbose:
        if args.cpu:
            print("CPU mode set.")
        else:
            print("GPU mode set.")

    errorSum = np.zeros(2)

    for i in range(args.tests_number):
        tmpNetProto = tempfile.NamedTemporaryFile()
        tmpNetProto.write(createPoolingNet(args.params))
        tmpNetProto.flush()
        net = caffe.Net(tmpNetProto.name, caffe.TEST)
        deploy = caffe_pb2.NetParameter()
        Merge((open(tmpNetProto.name, 'r').read()), deploy)
        tmpNetProto.close()
        sys.stdout.write("{}. ".format(i + 1))
        if not args.verbose:
            sys.stdout.write("Input shape: {}, {},{},{} ".format(
                net.blobs['data'].data.shape[0],
                net.blobs['data'].data.shape[2],
                net.blobs['data'].data.shape[3],
                net.blobs['data'].data.shape[1]))
            poolingParams = deploy.layer[1].pooling_param
            sys.stdout.write("Pooling params: {},{} ".format(
                poolingParams.kernel_size, poolingParams.stride))
            sys.stdout.write("Output shape: {},{},{}".format(
                net.blobs['pooling'].data.shape[2],
                net.blobs['pooling'].data.shape[3],
                net.blobs['pooling'].data.shape[1]))

        net.blobs['data'].data[...] = np.random.random_sample(
            net.blobs['data'].data.shape) - 0.5
        net.blobs['pooling'].diff[...] = np.random.random_sample(
            net.blobs['pooling'].diff.shape) - 0.5

        errorSum += comparePooling(net,
                                   deploy,
                                   args.forward,
                                   args.backward,
                                   args.convbase_executable,
                                   outputPreffix="pooling_",
                                   verbose=args.verbose)

    meanError = errorSum / args.tests_number

    print("\n#############################################################")
    print("Number of tests: {}\n".format(args.tests_number))
    if args.forward:
        print("Mean forward error: {}".format(meanError[0]))
    if args.backward:
        print("Mean backward error: {}".format(meanError[1]))
    print("#############################################################")
コード例 #23
0
    parser.add_argument("--predict_net",
                        help="Caffe2 prediction net.",
                        default="predict_net.pb")
    parser.add_argument("--remove_legacy_pad",
                        help="Remove legacy pad \
                        (Only works for nets with one input blob)",
                        action="store_true",
                        default=False)
    parser.add_argument("--input_dims",
                        help="Dimension of input blob",
                        nargs='+',
                        type=int,
                        default=[])
    args = parser.parse_args()

    caffenet = caffe_pb2.NetParameter()
    caffenet_pretrained = caffe_pb2.NetParameter()
    input_proto = args.prototext
    input_caffemodel = args.caffemodel
    output_init_net = args.init_net
    output_predict_net = args.predict_net

    text_format.Merge(open(input_proto, 'r').read(), caffenet)
    caffenet_pretrained.ParseFromString(open(input_caffemodel, 'rb').read())
    #for l in caffenet_pretrained.layer:
    #    log.info(" the pretrained layer is {}".format(l))
    net, pretrained_params = TranslateModel(
        caffenet,
        caffenet_pretrained,
        is_test=True,
        remove_legacy_pad=args.remove_legacy_pad,
コード例 #24
0
                        metavar='image-params',
                        help='The parameters for image preprocess')
    parser.add_argument('--ignore_layer_list',
                        metavar='ignore-layer-list',
                        help='Ignore these layers because they already tuned')
    args = parser.parse_args()

    caffe.set_mode_gpu()
    caffe.set_device(0)
    # Call it when you are using yolo
    # caffe.yolo()

    if not os.path.isdir(args.output_path):
        os.mkdir(args.output_path)

    param = caffe_pb2.NetParameter()
    text_format.Merge(open(args.proto).read(), param)
    first = True
    for layer in param.layer:
        if 'data' == layer.name:
            continue
        #if 'BatchNorm' == layer.type:
        if layer.type in [
                'BatchNorm', 'Data', 'Input', 'Python', 'Softmax', 'PriorBox',
                'Reshape', 'Flatten', 'Reshape', 'DetectionOutput'
        ]:
            continue
        if layer.name in args.ignore_layer_list:
            continue

        best_model, best_proto = run_tune(args, layer.name, sys.float_info.max)
コード例 #25
0
ファイル: import_prototxt.py プロジェクト: rahul-y/Fabrik-Pro
def import_prototxt(request):
    prototxtIsText = False
    if request.method == 'POST':
        if ('file' in request.FILES) and \
           (request.FILES['file'].content_type == 'application/octet-stream' or
                request.FILES['file'].content_type == 'text/plain'):
            try:
                prototxt = request.FILES['file']
            except Exception:
                return JsonResponse({
                    'result': 'error',
                    'error': 'No Prototxt model file found'
                })
        elif 'sample_id' in request.POST:
            try:
                prototxt = open(
                    os.path.join(settings.BASE_DIR, 'example', 'caffe',
                                 request.POST['sample_id'] + '.prototxt'), 'r')
            except Exception:
                return JsonResponse({
                    'result': 'error',
                    'error': 'No Prototxt model file found'
                })
        elif 'config' in request.POST:
            prototxt = request.POST['config']
            prototxtIsText = True
        elif 'url' in request.POST:
            try:
                url = urlparse(request.POST['url'])
                if url.netloc == 'github.com':
                    url = url._replace(netloc='raw.githubusercontent.com')
                    url = url._replace(path=url.path.replace('blob/', ''))
                prototxt = urllib2.urlopen(url.geturl())
            except Exception as ex:
                return JsonResponse({
                    'result': 'error',
                    'error': 'Invalid URL\n' + str(ex)
                })
        caffe_net = caffe_pb2.NetParameter()

        # try to convert to new prototxt
        try:
            if prototxtIsText is True:
                content = prototxt
            else:
                content = prototxt.read()
            tempFile = tempfile.NamedTemporaryFile()
            tempFile.write(content)
            tempFile.seek(0)
            subprocess.call(
                "~/caffe/caffe/build/tools/upgrade_net_proto_text " +
                tempFile.name + " " + tempFile.name,
                shell=True)
            tempFile.seek(0)
            content = tempFile.read()
            tempFile.close()
        except Exception as ex:
            return JsonResponse({
                'result': 'error',
                'error': 'Invalid Prototxt\n' + str(ex)
            })

        try:
            text_format.Merge(content, caffe_net)
        except Exception as ex:
            return JsonResponse({
                'result': 'error',
                'error': 'Invalid Prototxt\n' + str(ex)
            })

        net = {}
        i = 0
        blobMap = {}
        net_name = caffe_net.name
        hasTransformParam = ['ImageData', 'Data', 'WindowData']
        for layer in caffe_net.layer:
            id = "l" + str(i)
            input = []

            # this logic for phase has to be improved
            if len(layer.include):
                if (layer.include[0].HasField('phase')):
                    phase = layer.include[0].phase
                else:
                    phase = None
            else:
                phase = None

            params = {}
            if (layer.type in hasTransformParam):
                params['scale'] = layer.transform_param.scale
                params['mirror'] = layer.transform_param.mirror
                params['crop_size'] = layer.transform_param.crop_size
                if (layer.transform_param.mean_file != ''):
                    params['mean_file'] = layer.transform_param.mean_file
                elif (layer.transform_param.mean_value):
                    params['mean_value'] = str(
                        map(int, layer.transform_param.mean_value))[1:-1]
                params['force_color'] = layer.transform_param.force_color
                params['force_gray'] = layer.transform_param.force_gray

            if layer.type in layer_dict:
                layer_params = layer_dict[layer.type](layer)
                params.update(layer_params)

            jsonLayer = {
                'info': {
                    'type': layer.type,
                    'phase': phase
                },
                'connection': {
                    'input': [],
                    'output': []
                },
                'params': params
            }
            # this logic was written for a scenario where train and test layers are mixed up
            # But as we know, the only differences between the train and test phase are:
            # 1) input layer with different source in test
            # 2) some accuracy layers in test
            # If we consider these constraint, the below logic can be vastly reduced
            for bottom_blob in layer.bottom:
                if (bottom_blob != 'label'):
                    # if the current layer has a phase
                    # then only connect with layers of same phase
                    # if it has no phase then connect with all layers
                    if jsonLayer['info']['phase'] is not None:
                        phase = jsonLayer['info']['phase']
                        for bottomLayerId in blobMap[bottom_blob]:
                            if (net[bottomLayerId]['info']['phase'] == phase) or\
                                    (net[bottomLayerId]['info']['phase'] is None):
                                input.append(bottomLayerId)
                                net[bottomLayerId]['connection'][
                                    'output'].append(id)
                    else:
                        for bottomLayerId in blobMap[bottom_blob]:
                            input.append(bottomLayerId)
                            net[bottomLayerId]['connection']['output'].append(
                                id)
            for top_blob in layer.top:
                if (top_blob != 'label'):
                    if top_blob in blobMap:
                        if top_blob in layer.bottom:
                            # check for in-place operations
                            # layer has no phase
                            # then remove all layer history
                            # and add this one to the top
                            # layer has phase then remove all layers with same phase and append this
                            if jsonLayer['info']['phase'] is not None:
                                phase = jsonLayer['info']['phase']
                                for layerId in blobMap[bottom_blob]:
                                    if net[layerId]['info']['phase'] == phase:
                                        blobMap[bottom_blob].remove(layerId)
                                blobMap[top_blob].append(id)
                            else:
                                blobMap[top_blob] = [id]
                        else:
                            blobMap[top_blob].append(id)
                    else:
                        blobMap[top_blob] = [id]
            jsonLayer['connection']['input'] = input
            net[id] = jsonLayer
            i = i + 1

        return JsonResponse({
            'result': 'success',
            'net': net,
            'net_name': net_name
        })
コード例 #26
0
def read_network_dag(processed_deploy_prototxt):
    """
    Reads from the caffe prototxt the network structure
    :param processed_deploy_prototxt: name of prototxt to load, preferably the prototxt should
     be processed before using a call to process_network_proto()
    :return: network_def, layer_name_to_record, top_to_layers
    network_def: caffe network structure, gives access to *all* the network information
    layer_name_to_record: *ordered* dictionary which maps between layer name and a structure which
      describes in a simple form the layer parameters
    top_to_layers: dictionary which maps a blob name to an ordered list of layers which output it
     when a top is used several times, like in inplace layhers, the list will contain all the layers
     by order of appearance
    """

    from caffe.proto import caffe_pb2
    from google.protobuf import text_format  # pylint: disable=relative-import
    from collections import OrderedDict

    # load prototxt file
    network_def = caffe_pb2.NetParameter()
    with open(processed_deploy_prototxt, 'r') as proto_file:
        text_format.Merge(str(proto_file.read()), network_def)

    # map layer name to layer record
    layer_name_to_record = OrderedDict()
    for layer_def in network_def.layer:
        if (len(layer_def.include) == 0) or \
           (caffe_pb2.TEST in [item.phase for item in layer_def.include]):

            layer_name_to_record[layer_def.name] = LayerRecord(layer_def)

    top_to_layers = dict()
    for layer in network_def.layer:
        # no specific phase, or TEST phase is specifically asked for
        if (len(layer.include) == 0) or (caffe_pb2.TEST in [
                item.phase for item in layer.include
        ]):
            for top in layer.top:
                if top not in top_to_layers:
                    top_to_layers[top] = list()
                top_to_layers[top].append(layer.name)

    # find parents and children of all layers
    for child_layer_name in layer_name_to_record.keys():  # pylint: disable=too-many-nested-blocks
        child_layer_def = layer_name_to_record[child_layer_name]
        for bottom in child_layer_def.bottoms:
            if bottom in top_to_layers:
                for parent_layer_name in top_to_layers[bottom]:
                    if parent_layer_name in layer_name_to_record:
                        parent_layer_def = layer_name_to_record[
                            parent_layer_name]
                        if parent_layer_def not in child_layer_def.parents:
                            child_layer_def.parents.append(parent_layer_def)
                        if child_layer_def not in parent_layer_def.children:
                            parent_layer_def.children.append(child_layer_def)

    # update filter, strid, pad for maxout "structures"
    for layer_name in layer_name_to_record.keys():
        layer_def = layer_name_to_record[layer_name]
        if layer_def.type == 'Eltwise' and \
           len(layer_def.parents) == 1 and \
           layer_def.parents[0].type == 'Slice' and \
           len(layer_def.parents[0].parents) == 1 and \
           layer_def.parents[0].parents[0].type in ['Convolution', 'InnerProduct']:
            layer_def.filter = layer_def.parents[0].parents[0].filter
            layer_def.stride = layer_def.parents[0].parents[0].stride
            layer_def.pad = layer_def.parents[0].parents[0].pad

    return network_def, layer_name_to_record, top_to_layers
コード例 #27
0
ファイル: caffe_train.py プロジェクト: xiaozhuka/DIGITS
    def save_prototxt_files(self):
        """
        Save solver, train_val and deploy files to disk
        """

        has_val_set = self.dataset.val_db_task() is not None

        ### Check what has been specified in self.network

        tops = []
        bottoms = {}
        train_data_layer = None
        val_data_layer = None
        hidden_layers = caffe_pb2.NetParameter()
        loss_layer = None
        accuracy_layer = None
        for layer in self.network.layer:
            assert layer.type not in ['MemoryData', 'HDF5Data', 'ImageData'], 'unsupported data layer type'
            if layer.type == 'Data':
                for rule in layer.include:
                    if rule.phase == caffe_pb2.TRAIN:
                        assert train_data_layer is None, 'cannot specify two train data layers'
                        train_data_layer = layer
                    elif rule.phase == caffe_pb2.TEST:
                        assert val_data_layer is None, 'cannot specify two test data layers'
                        val_data_layer = layer
            elif layer.type == 'SoftmaxWithLoss':
                assert loss_layer is None, 'cannot specify two loss layers'
                loss_layer = layer
            elif layer.type == 'Accuracy':
                assert accuracy_layer is None, 'cannot specify two accuracy layers'
                accuracy_layer = layer
            else:
                hidden_layers.layer.add().CopyFrom(layer)
                if len(layer.bottom) == 1 and len(layer.top) == 1 and layer.bottom[0] == layer.top[0]:
                    pass
                else:
                    for top in layer.top:
                        tops.append(top)
                    for bottom in layer.bottom:
                        bottoms[bottom] = True

        assert loss_layer is not None, 'must specify a SoftmaxWithLoss layer'
        assert accuracy_layer is not None, 'must specify an Accuracy layer'
        if not has_val_set:
            self.logger.warning('Discarding Data layer for validation')
            val_data_layer = None

        output_name = None
        for name in tops:
            if name not in bottoms:
                assert output_name is None, 'network cannot have more than one output'
                output_name = name
        assert output_name is not None, 'network must have one output'
        for layer in hidden_layers.layer:
            if output_name in layer.top and layer.type == 'InnerProduct':
                layer.inner_product_param.num_output = len(self.labels)
                break

        if train_data_layer is None:
            assert val_data_layer is None, 'cannot specify a test data layer without a train data layer'

        ### Write train_val file

        train_val_network = caffe_pb2.NetParameter()
        default_batch_size = 16 #XXX Reasonable default?

        # data layers
        if train_data_layer is not None:
            if train_data_layer.HasField('data_param'):
                assert not train_data_layer.data_param.HasField('source'), "don't set the data_param.source"
                assert not train_data_layer.data_param.HasField('backend'), "don't set the data_param.backend"
            max_crop_size = min(self.dataset.image_dims[0], self.dataset.image_dims[1])
            if self.crop_size:
                assert self.crop_size <= max_crop_size, 'crop_size is larger than the image size'
                train_data_layer.transform_param.crop_size = self.crop_size
            elif train_data_layer.transform_param.HasField('crop_size'):
                cs = train_data_layer.transform_param.crop_size
                if cs > max_crop_size:
                    # don't throw an error here
                    cs = max_crop_size
                train_data_layer.transform_param.crop_size = cs
                self.crop_size = cs
            train_val_network.layer.add().CopyFrom(train_data_layer)
            train_data_layer = train_val_network.layer[-1]
            if val_data_layer is not None and has_val_set:
                if val_data_layer.HasField('data_param'):
                    assert not val_data_layer.data_param.HasField('source'), "don't set the data_param.source"
                    assert not val_data_layer.data_param.HasField('backend'), "don't set the data_param.backend"
                if self.crop_size:
                    # use our error checking from the train layer
                    val_data_layer.transform_param.crop_size = self.crop_size
                train_val_network.layer.add().CopyFrom(val_data_layer)
                val_data_layer = train_val_network.layer[-1]
        else:
            train_data_layer = train_val_network.layer.add(type = 'Data', name = 'data')
            train_data_layer.top.append('data')
            train_data_layer.top.append('label')
            train_data_layer.include.add(phase = caffe_pb2.TRAIN)
            train_data_layer.data_param.batch_size = default_batch_size
            if self.crop_size:
                train_data_layer.transform_param.crop_size = self.crop_size
            if has_val_set:
                val_data_layer = train_val_network.layer.add(type = 'Data', name = 'data')
                val_data_layer.top.append('data')
                val_data_layer.top.append('label')
                val_data_layer.include.add(phase = caffe_pb2.TEST)
                val_data_layer.data_param.batch_size = default_batch_size
                if self.crop_size:
                    val_data_layer.transform_param.crop_size = self.crop_size
        train_data_layer.data_param.source = self.dataset.path(self.dataset.train_db_task().db_name)
        train_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB
        if val_data_layer is not None:
            val_data_layer.data_param.source = self.dataset.path(self.dataset.val_db_task().db_name)
            val_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB
        if self.use_mean:
            train_data_layer.transform_param.mean_file = self.dataset.path(self.dataset.train_db_task().mean_file)
            if val_data_layer is not None:
                val_data_layer.transform_param.mean_file = self.dataset.path(self.dataset.train_db_task().mean_file)
        if self.batch_size:
            train_data_layer.data_param.batch_size = self.batch_size
            if val_data_layer is not None:
                val_data_layer.data_param.batch_size = self.batch_size
        else:
            if not train_data_layer.data_param.HasField('batch_size'):
                train_data_layer.data_param.batch_size = default_batch_size
            if val_data_layer is not None and not val_data_layer.data_param.HasField('batch_size'):
                val_data_layer.data_param.batch_size = default_batch_size

        # hidden layers
        train_val_network.MergeFrom(hidden_layers)

        # output layers
        if loss_layer is not None:
            train_val_network.layer.add().CopyFrom(loss_layer)
            loss_layer = train_val_network.layer[-1]
        else:
            loss_layer = train_val_network.layer.add(
                type = 'SoftmaxWithLoss',
                name = 'loss')
            loss_layer.bottom.append(output_name)
            loss_layer.bottom.append('label')
            loss_layer.top.append('loss')

        if accuracy_layer is not None:
            train_val_network.layer.add().CopyFrom(accuracy_layer)
            accuracy_layer = train_val_network.layer[-1]
        elif self.dataset.val_db_task():
            accuracy_layer = train_val_network.layer.add(
                    type = 'Accuracy',
                    name = 'accuracy')
            accuracy_layer.bottom.append(output_name)
            accuracy_layer.bottom.append('label')
            accuracy_layer.top.append('accuracy')
            accuracy_layer.include.add(phase = caffe_pb2.TEST)

        with open(self.path(self.train_val_file), 'w') as outfile:
            text_format.PrintMessage(train_val_network, outfile)

        ### Write deploy file

        deploy_network = caffe_pb2.NetParameter()

        # input
        deploy_network.input.append('data')
        deploy_network.input_dim.append(1)
        deploy_network.input_dim.append(self.dataset.image_dims[2])
        if self.crop_size:
            deploy_network.input_dim.append(self.crop_size)
            deploy_network.input_dim.append(self.crop_size)
        else:
            deploy_network.input_dim.append(self.dataset.image_dims[0])
            deploy_network.input_dim.append(self.dataset.image_dims[1])

        # hidden layers
        deploy_network.MergeFrom(hidden_layers)

        # output layers
        prob_layer = deploy_network.layer.add(
                type = 'Softmax',
                name = 'prob')
        prob_layer.bottom.append(output_name)
        prob_layer.top.append('prob')

        with open(self.path(self.deploy_file), 'w') as outfile:
            text_format.PrintMessage(deploy_network, outfile)

        ### Write solver file

        solver = caffe_pb2.SolverParameter()
        solver.net = self.train_val_file
        if config_option('gpu_list'):
            solver.solver_mode = caffe_pb2.SolverParameter.GPU
        else:
            solver.solver_mode = caffe_pb2.SolverParameter.CPU
        solver.snapshot_prefix = self.snapshot_prefix

        # Epochs -> Iterations
        train_iter = int(math.ceil(float(self.dataset.train_db_task().entries_count) / train_data_layer.data_param.batch_size))
        solver.max_iter = train_iter * self.train_epochs
        solver.snapshot = train_iter
        if self.dataset.val_db_task() and self.val_interval:
            solver.test_iter.append(int(math.ceil(float(self.dataset.val_db_task().entries_count) / val_data_layer.data_param.batch_size)))
            solver.test_interval = train_iter * self.val_interval

        # Learning rate
        solver.base_lr = self.learning_rate
        solver.lr_policy = self.lr_policy['policy']
        scale = float(solver.max_iter)/100.0
        if solver.lr_policy == 'fixed':
            pass
        elif solver.lr_policy == 'step':
            # stepsize = stepsize * scale
            solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale))
            solver.gamma = self.lr_policy['gamma']
        elif solver.lr_policy == 'multistep':
            for value in self.lr_policy['stepvalue']:
                # stepvalue = stepvalue * scale
                solver.stepvalue.append(int(math.ceil(float(value) * scale)))
            solver.gamma = self.lr_policy['gamma']
        elif solver.lr_policy == 'exp':
            # gamma = gamma^(1/scale)
            solver.gamma = math.pow(self.lr_policy['gamma'], 1.0/scale)
        elif solver.lr_policy == 'inv':
            # gamma = gamma / scale
            solver.gamma = self.lr_policy['gamma'] / scale
            solver.power = self.lr_policy['power']
        elif solver.lr_policy == 'poly':
            solver.power = self.lr_policy['power']
        elif solver.lr_policy == 'sigmoid':
            # gamma = -gamma / scale
            solver.gamma = -1.0 * self.lr_policy['gamma'] / scale
            # stepsize = stepsize * scale
            solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale))
        else:
            raise Exception('Unknown lr_policy: "%s"' % solver.lr_policy)

        # go with the suggested defaults
        solver.momentum = 0.9
        solver.weight_decay = 0.0005

        # Display 8x per epoch, or once per 5000 images, whichever is more frequent
        solver.display = min(
                int(math.floor(float(solver.max_iter) / (self.train_epochs * 8))),
                int(math.ceil(5000.0 / train_data_layer.data_param.batch_size))
                )

        with open(self.path(self.solver_file), 'w') as outfile:
            text_format.PrintMessage(solver, outfile)
        self.solver = solver # save for later

        return True
コード例 #28
0
ファイル: convert_model.py プロジェクト: stoneyang/deep_share
    args = parser.parse_args()
    return args

if __name__ == '__main__':

    args = parse_args()

    print('Called with args:')
    print(args)
    # load files
    print 'Loading caffemodel: {}'.format(args.caffemodel)
    with open(args.caffemodel, 'rb') as f:
        binary_content = f.read()

    protobuf = caffe_pb2.NetParameter()
    protobuf.ParseFromString(binary_content)
    layers = protobuf.layer

    params = {}
    for layer in layers:
        if layer.type in args.layer_types:
            print (layer.name, layer.type)
            params[layer.name+'_w'] = np.reshape(np.array(layer.blobs[0].data), layer.blobs[0].shape.dim) 
            params[layer.name+'_b'] = np.reshape(np.array(layer.blobs[1].data), layer.blobs[1].shape.dim)
            print params[layer.name+'_w'].shape, params[layer.name+'_b'].shape

    # save the layers into a file
    # if the file name is .pkl, save to pickle file.
    # if the file name is .mat, save to mat file.
    # otherwise, report file type not recognized.
コード例 #29
0
 def get_netparameter(self, model):
     with open(model) as f:
         net = cp.NetParameter()
         pb.text_format.Parse(f.read(), net)
         return net
コード例 #30
0
    def _build_rnn_network(self,
                           wtype='xavier',
                           std=0.01,
                           batchsize=100,
                           numstep=24):
        network = caffe_pb2.NetParameter()
        network.force_backward = True
        network.name = 'rotation_rnn'
        network.input.append('images')
        network.input_dim.append(batchsize)
        network.input_dim.append(3)
        network.input_dim.append(64)
        network.input_dim.append(64)
        for t in range(numstep):
            network.input.append('rotations%d' % t)
            network.input_dim.append(batchsize)
            network.input_dim.append(3)
            network.input_dim.append(1)
            network.input_dim.append(1)

        layers = []
        name_generator = self._connection_name_generator()

        tensor_view = []
        relu2_view = []
        relu2_view_split = []
        concat = []
        dec_fc1 = []
        dec_relu1 = []
        dec_fc2 = []
        dec_relu2 = []
        dec_relu2_split = []
        dec_img_fc1 = []
        dec_img_relu1 = []
        dec_img_fold = []
        dec_img_up1 = []
        dec_img_conv1 = []
        dec_img_relu2 = []
        dec_img_up2 = []
        dec_img_conv2 = []
        dec_img_relu3 = []
        dec_img_up3 = []
        dec_img_conv3 = []
        dec_mask_fc1 = []
        dec_mask_relu1 = []
        dec_mask_fold = []
        dec_mask_up1 = []
        dec_mask_conv1 = []
        dec_mask_relu2 = []
        dec_mask_up2 = []
        dec_mask_conv2 = []
        dec_mask_relu3 = []
        dec_mask_up3 = []
        dec_mask_conv3 = []

        conv1 = self._make_conv_layer(network,
                                      kernel_size=5,
                                      stride=2,
                                      pad=2,
                                      num_output=64,
                                      shared_name='conv1')
        conv1.bottom.append('images')
        relu1 = self._make_relu_layer(network)
        conv2 = self._make_conv_layer(network,
                                      kernel_size=5,
                                      stride=2,
                                      pad=2,
                                      num_output=128,
                                      shared_name='conv2')
        relu2 = self._make_relu_layer(network)
        conv3 = self._make_conv_layer(network,
                                      kernel_size=5,
                                      stride=2,
                                      pad=2,
                                      num_output=256,
                                      shared_name='conv3')
        relu3 = self._make_relu_layer(network)
        fc1 = self._make_inner_product_layer(network,
                                             num_output=1024,
                                             shared_name='fc1')
        relu4 = self._make_relu_layer(network)
        fc2 = self._make_inner_product_layer(network,
                                             num_output=1024,
                                             shared_name='fc2')
        relu5 = self._make_relu_layer(network)

        enc_split = self._make_split_layer(network)

        fc1_id = self._make_inner_product_layer(network,
                                                num_output=512,
                                                shared_name='fc1_id')
        relu1_id = self._make_relu_layer(network)
        id_split = self._make_split_layer(network)

        fc1_view = self._make_inner_product_layer(network,
                                                  num_output=512,
                                                  shared_name='fc1_view')
        relu1_view = self._make_relu_layer(network)

        tensor_view.append(
            self._make_tensor_layer(network,
                                    num_output=512,
                                    shared_name='tensor_view'))
        tensor_view[-1].bottom.append('rotations0')
        relu2_view.append(self._make_relu_layer(network))
        relu2_view_split.append(self._make_split_layer(network))

        connections = []
        connections.append(
            (conv1.name, (conv1.top, relu1.bottom, relu1.top, conv2.bottom)))
        connections.append(
            (conv2.name, (conv2.top, relu2.bottom, relu2.top, conv3.bottom)))
        connections.append(
            (conv3.name, (conv3.top, relu3.bottom, relu3.top, fc1.bottom)))
        connections.append(
            (fc1.name, (fc1.top, relu4.bottom, relu4.top, fc2.bottom)))
        connections.append((fc2.name, (fc2.top, relu5.bottom)))
        connections.append((relu5.name, (relu5.top, enc_split.bottom)))
        connections.append((enc_split.name, (enc_split.top, fc1_id.bottom)))
        connections.append((fc1_id.name, (fc1_id.top, relu1_id.bottom,
                                          relu1_id.top, id_split.bottom)))
        connections.append((enc_split.name, (enc_split.top, fc1_view.bottom)))
        connections.append(
            (fc1_view.name, (fc1_view.top, relu1_view.bottom, relu1_view.top,
                             tensor_view[-1].bottom)))

        for t in range(numstep):
            # Action.
            if t > 0:
                tensor_view.append(
                    self._make_tensor_layer(network,
                                            num_output=512,
                                            shared_name='tensor_view'))
                tensor_view[-1].bottom.append('rotations%d' % t)
                relu2_view.append(self._make_relu_layer(network))
                relu2_view_split.append(self._make_split_layer(network))
            # Decoder.
            concat.append(self._make_concat_layer(network))
            dec_fc1.append(
                self._make_inner_product_layer(network,
                                               num_output=1024,
                                               shared_name='dec_fc1'))
            dec_relu1.append(self._make_relu_layer(network))
            dec_fc2.append(
                self._make_inner_product_layer(network,
                                               num_output=1024,
                                               shared_name='dec_fc2'))
            dec_relu2.append(self._make_relu_layer(network))
            dec_relu2_split.append(self._make_split_layer(network))
            # Dec img path.
            dec_img_fc1.append(
                self._make_inner_product_layer(network,
                                               num_output=16384,
                                               shared_name='dec_img_fc1'))
            dec_img_relu1.append(self._make_relu_layer(network))
            dec_img_fold.append(self._make_folding_layer(network, 256, 8, 8))
            dec_img_up1.append(self._make_upsampling_layer(network, stride=2))
            dec_img_conv1.append(
                self._make_conv_layer(network,
                                      kernel_size=5,
                                      stride=1,
                                      pad=2,
                                      num_output=128,
                                      shared_name='dec_img_conv1'))
            dec_img_relu2.append(self._make_relu_layer(network))
            dec_img_up2.append(self._make_upsampling_layer(network, stride=2))
            dec_img_conv2.append(
                self._make_conv_layer(network,
                                      kernel_size=5,
                                      stride=1,
                                      pad=2,
                                      num_output=64,
                                      shared_name='dec_img_conv2'))
            dec_img_relu3.append(self._make_relu_layer(network))
            dec_img_up3.append(self._make_upsampling_layer(network, stride=2))
            dec_img_conv3.append(
                self._make_conv_layer(network,
                                      kernel_size=5,
                                      stride=1,
                                      pad=2,
                                      num_output=3,
                                      shared_name='dec_img_conv3'))
            # Dec mask path.
            dec_mask_fc1.append(
                self._make_inner_product_layer(network,
                                               num_output=8192,
                                               shared_name='dec_mask_fc1'))
            dec_mask_relu1.append(self._make_relu_layer(network))
            dec_mask_fold.append(self._make_folding_layer(network, 128, 8, 8))
            dec_mask_up1.append(self._make_upsampling_layer(network, stride=2))
            dec_mask_conv1.append(
                self._make_conv_layer(network,
                                      kernel_size=5,
                                      stride=1,
                                      pad=2,
                                      num_output=64,
                                      shared_name='dec_mask_conv1'))
            dec_mask_relu2.append(self._make_relu_layer(network))
            dec_mask_up2.append(self._make_upsampling_layer(network, stride=2))
            dec_mask_conv2.append(
                self._make_conv_layer(network,
                                      kernel_size=5,
                                      stride=1,
                                      pad=2,
                                      num_output=32,
                                      shared_name='dec_mask_conv2'))
            dec_mask_relu3.append(self._make_relu_layer(network))
            dec_mask_up3.append(self._make_upsampling_layer(network, stride=2))
            dec_mask_conv3.append(
                self._make_conv_layer(network,
                                      kernel_size=5,
                                      stride=1,
                                      pad=2,
                                      num_output=1,
                                      shared_name='dec_mask_conv3'))

            # dec connections.
            if t > 0:
                connections.append(
                    (relu2_view_split[-2].name, (relu2_view_split[-2].top,
                                                 tensor_view[-1].bottom)))
            connections.append((tensor_view[-1].name, (tensor_view[-1].top,
                                                       relu2_view[-1].bottom)))
            connections.append(
                (relu2_view[-1].name, (relu2_view[-1].top,
                                       relu2_view_split[-1].bottom)))
            connections.append(
                (id_split.name, (id_split.top, concat[-1].bottom)))
            connections.append((relu2_view_split[-1].name,
                                (relu2_view_split[-1].top, concat[-1].bottom)))
            connections.append(
                (concat[-1].name, (concat[-1].top, dec_fc1[-1].bottom)))
            connections.append(
                (dec_fc1[-1].name, (dec_fc1[-1].top, dec_relu1[-1].bottom,
                                    dec_relu1[-1].top, dec_fc2[-1].bottom)))
            connections.append(
                (dec_fc2[-1].name, (dec_fc2[-1].top, dec_relu2[-1].bottom)))
            connections.append(
                (dec_relu2[-1].name, (dec_relu2[-1].top,
                                      dec_relu2_split[-1].bottom)))
            # dec image connections.
            connections.append(
                (dec_relu2_split[-1].name, (dec_relu2_split[-1].top,
                                            dec_img_fc1[-1].bottom)))
            connections.append(
                (dec_img_fc1[-1].name,
                 (dec_img_fc1[-1].top, dec_img_relu1[-1].bottom,
                  dec_img_relu1[-1].top, dec_img_fold[-1].bottom)))
            connections.append(
                (dec_img_fold[-1].name, (dec_img_fold[-1].top,
                                         dec_img_up1[-1].bottom)))
            connections.append(
                (dec_img_up1[-1].name, (dec_img_up1[-1].top,
                                        dec_img_conv1[-1].bottom)))
            connections.append(
                (dec_img_conv1[-1].name,
                 (dec_img_conv1[-1].top, dec_img_relu2[-1].bottom,
                  dec_img_relu2[-1].top, dec_img_up2[-1].bottom)))
            connections.append(
                (dec_img_up2[-1].name, (dec_img_up2[-1].top,
                                        dec_img_conv2[-1].bottom)))
            connections.append(
                (dec_img_conv2[-1].name,
                 (dec_img_conv2[-1].top, dec_img_relu3[-1].bottom,
                  dec_img_relu3[-1].top, dec_img_up3[-1].bottom)))
            connections.append(
                (dec_img_up3[-1].name, (dec_img_up3[-1].top,
                                        dec_img_conv3[-1].bottom)))
            # dec mask connections.
            connections.append(
                (dec_relu2_split[-1].name, (dec_relu2_split[-1].top,
                                            dec_mask_fc1[-1].bottom)))
            connections.append(
                (dec_mask_fc1[-1].name,
                 (dec_mask_fc1[-1].top, dec_mask_relu1[-1].bottom,
                  dec_mask_relu1[-1].top, dec_mask_fold[-1].bottom)))
            connections.append(
                (dec_mask_fold[-1].name, (dec_mask_fold[-1].top,
                                          dec_mask_up1[-1].bottom)))
            connections.append(
                (dec_mask_up1[-1].name, (dec_mask_up1[-1].top,
                                         dec_mask_conv1[-1].bottom)))
            connections.append(
                (dec_mask_conv1[-1].name,
                 (dec_mask_conv1[-1].top, dec_mask_relu2[-1].bottom,
                  dec_mask_relu2[-1].top, dec_mask_up2[-1].bottom)))
            connections.append(
                (dec_mask_up2[-1].name, (dec_mask_up2[-1].top,
                                         dec_mask_conv2[-1].bottom)))
            connections.append(
                (dec_mask_conv2[-1].name,
                 (dec_mask_conv2[-1].top, dec_mask_relu3[-1].bottom,
                  dec_mask_relu3[-1].top, dec_mask_up3[-1].bottom)))
            connections.append(
                (dec_mask_up3[-1].name, (dec_mask_up3[-1].top,
                                         dec_mask_conv3[-1].bottom)))

        layers = [
            conv1, relu1, conv2, relu2, conv3, relu3, fc1, relu4, fc2, relu5,
            enc_split, fc1_id, relu1_id, id_split
        ]
        layers += tensor_view
        layers += relu2_view
        layers += relu2_view_split
        layers += concat
        layers += dec_fc1
        layers += dec_relu1
        layers += dec_fc2
        layers += dec_relu2
        layers += dec_relu2_split
        layers += dec_img_fc1
        layers += dec_img_relu1
        layers += dec_img_fold
        layers += dec_img_up1
        layers += dec_img_conv1
        layers += dec_img_relu2
        layers += dec_img_up2
        layers += dec_img_conv2
        layers += dec_img_relu3
        layers += dec_img_up3
        layers += dec_img_conv3
        layers += dec_mask_fc1
        layers += dec_mask_relu1
        layers += dec_mask_fold
        layers += dec_mask_up1
        layers += dec_mask_conv1
        layers += dec_mask_relu2
        layers += dec_mask_up2
        layers += dec_mask_conv2
        layers += dec_mask_relu3
        layers += dec_mask_up3
        layers += dec_mask_conv3

        final_img_concat = self._make_concat_layer(network)
        for idx, l in enumerate(dec_img_conv3):
            l.name = 't%d_%s' % (idx, l.name)
            connections.append((l.name, (l.top, final_img_concat.bottom)))
        final_img_concat.top.append('images_concat')
        final_img_concat.loss_weight.append(10.0)

        final_mask_concat = self._make_concat_layer(network)
        for idx, l in enumerate(dec_mask_conv3):
            l.name = 't%d_%s' % (idx, l.name)
            connections.append((l.name, (l.top, final_mask_concat.bottom)))
        final_mask_concat.top.append('masks_concat')
        final_mask_concat.loss_weight.append(1.0)

        layers += [final_img_concat, final_mask_concat]

        # make connections.
        for connection in connections:
            self._tie(connection, name_generator)

        for l in tensor_view[0:]:
            tmp = reversed(l.bottom)
            l.ClearField('bottom')
            l.bottom.extend(tmp)

        # Fix up the names based on the connections that were generated.
        for pos, layer in enumerate(layers):
            layer.name += '_%d' % pos

        return network