예제 #1
0
def setLayers_twoBranches(data_source,
                          batch_size,
                          layername,
                          kernel,
                          stride,
                          outCH,
                          label_name,
                          transform_param_in,
                          deploy=False,
                          batchnorm=0,
                          lr_mult_distro=[1, 1, 1]):
    # it is tricky to produce the deploy prototxt file, as the data input is not from a layer, so we have to creat a workaround
    # producing training and testing prototxt files is pretty straight forward
    n = caffe.NetSpec()
    assert len(layername) == len(kernel)
    assert len(layername) == len(stride)
    assert len(layername) == len(outCH)
    num_parts = transform_param['num_parts']

    if deploy == False and "lmdb" not in data_source:
        if (len(label_name) == 1):
            n.data, n.tops[label_name[0]] = L.HDF5Data(hdf5_data_param=dict(
                batch_size=batch_size, source=data_source),
                                                       ntop=2)
        elif (len(label_name) == 2):
            n.data, n.tops[label_name[0]], n.tops[label_name[1]] = L.HDF5Data(
                hdf5_data_param=dict(batch_size=batch_size,
                                     source=data_source),
                ntop=3)
    # produce data definition for deploy net
    elif deploy == False:
        n.data, n.tops['label'] = L.CPMData(
            data_param=dict(backend=1,
                            source=data_source,
                            batch_size=batch_size),
            cpm_transform_param=transform_param_in,
            ntop=2)
        n.tops[label_name[2]], n.tops[label_name[3]], n.tops[
            label_name[4]], n.tops[label_name[5]] = L.Slice(
                n.label,
                slice_param=dict(
                    axis=1, slice_point=[38, num_parts + 1, num_parts + 39]),
                ntop=4)
        n.tops[label_name[0]] = L.Eltwise(n.tops[label_name[2]],
                                          n.tops[label_name[4]],
                                          operation=P.Eltwise.PROD)
        n.tops[label_name[1]] = L.Eltwise(n.tops[label_name[3]],
                                          n.tops[label_name[5]],
                                          operation=P.Eltwise.PROD)

    else:
        input = "data"
        dim1 = 1
        dim2 = 4
        dim3 = 368
        dim4 = 368
        # make an empty "data" layer so the next layer accepting input will be able to take the correct blob name "data",
        # we will later have to remove this layer from the serialization string, since this is just a placeholder
        n.data = L.Layer()

    # something special before everything
    n.image, n.center_map = L.Slice(n.data,
                                    slice_param=dict(axis=1, slice_point=3),
                                    ntop=2)
    n.silence2 = L.Silence(n.center_map, ntop=0)
    #n.pool_center_lower = L.Pooling(n.center_map, kernel_size=9, stride=8, pool=P.Pooling.AVE)

    # just follow arrays..CPCPCPCPCCCC....
    last_layer = ['image', 'image']
    stage = 1
    conv_counter = 1
    pool_counter = 1
    drop_counter = 1
    local_counter = 1
    state = 'image'  # can be image or fuse
    share_point = 0

    for l in range(0, len(layername)):
        if layername[l] == 'V':  #pretrained VGG layers
            conv_name = 'conv%d_%d' % (pool_counter, local_counter)
            lr_m = lr_mult_distro[0]
            n.tops[conv_name] = L.Convolution(
                n.tops[last_layer[0]],
                kernel_size=kernel[l],
                num_output=outCH[l],
                pad=int(math.floor(kernel[l] / 2)),
                param=[
                    dict(lr_mult=lr_m, decay_mult=1),
                    dict(lr_mult=lr_m * 2, decay_mult=0)
                ],
                weight_filler=dict(type='gaussian', std=0.01),
                bias_filler=dict(type='constant'))
            last_layer[0] = conv_name
            last_layer[1] = conv_name
            print '%s\tch=%d\t%.1f' % (last_layer[0], outCH[l], lr_m)
            ReLUname = 'relu%d_%d' % (pool_counter, local_counter)
            n.tops[ReLUname] = L.ReLU(n.tops[last_layer[0]], in_place=True)
            local_counter += 1
            print ReLUname
        if layername[l] == 'B':
            pool_counter += 1
            local_counter = 1
        if layername[l] == 'C':
            if state == 'image':
                #conv_name = 'conv%d_stage%d' % (conv_counter, stage)
                conv_name = 'conv%d_%d_CPM' % (
                    pool_counter, local_counter
                )  # no image state in subsequent stages
                if stage == 1:
                    lr_m = lr_mult_distro[1]
                else:
                    lr_m = lr_mult_distro[1]
            else:  # fuse
                conv_name = 'Mconv%d_stage%d' % (conv_counter, stage)
                lr_m = lr_mult_distro[2]
                conv_counter += 1
            #if stage == 1:
            #    lr_m = 1
            #else:
            #    lr_m = lr_sub
            n.tops[conv_name] = L.Convolution(
                n.tops[last_layer[0]],
                kernel_size=kernel[l],
                num_output=outCH[l],
                pad=int(math.floor(kernel[l] / 2)),
                param=[
                    dict(lr_mult=lr_m, decay_mult=1),
                    dict(lr_mult=lr_m * 2, decay_mult=0)
                ],
                weight_filler=dict(type='gaussian', std=0.01),
                bias_filler=dict(type='constant'))
            last_layer[0] = conv_name
            last_layer[1] = conv_name
            print '%s\tch=%d\t%.1f' % (last_layer[0], outCH[l], lr_m)

            if layername[l + 1] != 'L':
                if (state == 'image'):
                    if (batchnorm == 1):
                        batchnorm_name = 'bn%d_stage%d' % (conv_counter, stage)
                        n.tops[batchnorm_name] = L.BatchNorm(
                            n.tops[last_layer[0]],
                            param=[
                                dict(lr_mult=0),
                                dict(lr_mult=0),
                                dict(lr_mult=0)
                            ])
                        #scale_filler=dict(type='constant', value=1), shift_filler=dict(type='constant', value=0.001))
                        last_layer[0] = batchnorm_name
                    #ReLUname = 'relu%d_stage%d' % (conv_counter, stage)
                    ReLUname = 'relu%d_%d_CPM' % (pool_counter, local_counter)
                    n.tops[ReLUname] = L.ReLU(n.tops[last_layer[0]],
                                              in_place=True)
                else:
                    if (batchnorm == 1):
                        batchnorm_name = 'Mbn%d_stage%d' % (conv_counter,
                                                            stage)
                        n.tops[batchnorm_name] = L.BatchNorm(
                            n.tops[last_layer[0]],
                            param=[
                                dict(lr_mult=0),
                                dict(lr_mult=0),
                                dict(lr_mult=0)
                            ])
                        #scale_filler=dict(type='constant', value=1), shift_filler=dict(type='constant', value=0.001))
                        last_layer[0] = batchnorm_name
                    ReLUname = 'Mrelu%d_stage%d' % (conv_counter, stage)
                    n.tops[ReLUname] = L.ReLU(n.tops[last_layer[0]],
                                              in_place=True)
                #last_layer = ReLUname
                print ReLUname

            #conv_counter += 1
            local_counter += 1

        elif layername[l] == 'C2':
            for level in range(0, 2):
                if state == 'image':
                    #conv_name = 'conv%d_stage%d' % (conv_counter, stage)
                    conv_name = 'conv%d_%d_CPM_L%d' % (
                        pool_counter, local_counter, level + 1
                    )  # no image state in subsequent stages
                    if stage == 1:
                        lr_m = lr_mult_distro[1]
                    else:
                        lr_m = lr_mult_distro[1]
                else:  # fuse
                    conv_name = 'Mconv%d_stage%d_L%d' % (conv_counter, stage,
                                                         level + 1)
                    lr_m = lr_mult_distro[2]
                    #conv_counter += 1
                #if stage == 1:
                #    lr_m = 1
                #else:
                #    lr_m = lr_sub
                if layername[l + 1] == 'L2' or layername[l + 1] == 'L3':
                    if level == 0:
                        outCH[l] = 28
                    else:
                        outCH[l] = 16

                n.tops[conv_name] = L.Convolution(
                    n.tops[last_layer[level]],
                    kernel_size=kernel[l],
                    num_output=outCH[l],
                    pad=int(math.floor(kernel[l] / 2)),
                    param=[
                        dict(lr_mult=lr_m, decay_mult=1),
                        dict(lr_mult=lr_m * 2, decay_mult=0)
                    ],
                    weight_filler=dict(type='gaussian', std=0.01),
                    bias_filler=dict(type='constant'))
                last_layer[level] = conv_name
                print '%s\tch=%d\t%.1f' % (last_layer[level], outCH[l], lr_m)

                if layername[l + 1] != 'L2' and layername[l + 1] != 'L3':
                    if (state == 'image'):
                        if (batchnorm == 1):
                            batchnorm_name = 'bn%d_stage%d_L%d' % (
                                conv_counter, stage, level + 1)
                            n.tops[batchnorm_name] = L.BatchNorm(
                                n.tops[last_layer[level]],
                                param=[
                                    dict(lr_mult=0),
                                    dict(lr_mult=0),
                                    dict(lr_mult=0)
                                ])
                            #scale_filler=dict(type='constant', value=1), shift_filler=dict(type='constant', value=0.001))
                            last_layer[level] = batchnorm_name
                        #ReLUname = 'relu%d_stage%d' % (conv_counter, stage)
                        ReLUname = 'relu%d_%d_CPM_L%d' % (
                            pool_counter, local_counter, level + 1)
                        n.tops[ReLUname] = L.ReLU(n.tops[last_layer[level]],
                                                  in_place=True)
                    else:
                        if (batchnorm == 1):
                            batchnorm_name = 'Mbn%d_stage%d_L%d' % (
                                conv_counter, stage, level + 1)
                            n.tops[batchnorm_name] = L.BatchNorm(
                                n.tops[last_layer[level]],
                                param=[
                                    dict(lr_mult=0),
                                    dict(lr_mult=0),
                                    dict(lr_mult=0)
                                ])
                            #scale_filler=dict(type='constant', value=1), shift_filler=dict(type='constant', value=0.001))
                            last_layer[level] = batchnorm_name
                        ReLUname = 'Mrelu%d_stage%d_L%d' % (conv_counter,
                                                            stage, level + 1)
                        n.tops[ReLUname] = L.ReLU(n.tops[last_layer[level]],
                                                  in_place=True)
                    print ReLUname

            conv_counter += 1
            local_counter += 1

        elif layername[l] == 'P':  # Pooling
            n.tops['pool%d_stage%d' % (pool_counter, stage)] = L.Pooling(
                n.tops[last_layer[0]],
                kernel_size=kernel[l],
                stride=stride[l],
                pool=P.Pooling.MAX)
            last_layer[0] = 'pool%d_stage%d' % (pool_counter, stage)
            pool_counter += 1
            local_counter = 1
            conv_counter += 1
            print last_layer[0]

        elif layername[l] == 'L':
            # Loss: n.loss layer is only in training and testing nets, but not in deploy net.
            if deploy == False and "lmdb" not in data_source:
                n.tops['map_vec_stage%d' % stage] = L.Flatten(
                    n.tops[last_layer[0]])
                n.tops['loss_stage%d' % stage] = L.EuclideanLoss(
                    n.tops['map_vec_stage%d' % stage], n.tops[label_name[1]])
            elif deploy == False:
                level = 1
                name = 'weight_stage%d' % stage
                n.tops[name] = L.Eltwise(n.tops[last_layer[level]],
                                         n.tops[label_name[(level + 2)]],
                                         operation=P.Eltwise.PROD)
                n.tops['loss_stage%d' % stage] = L.EuclideanLoss(
                    n.tops[name], n.tops[label_name[level]])

            print 'loss %d' % stage
            stage += 1
            conv_counter = 1
            pool_counter = 1
            drop_counter = 1
            local_counter = 1
            state = 'image'

        elif layername[l] == 'L2':
            # Loss: n.loss layer is only in training and testing nets, but not in deploy net.
            weight = [lr_mult_distro[3], 1]
            # print lr_mult_distro[3]
            for level in range(0, 2):
                if deploy == False and "lmdb" not in data_source:
                    n.tops['map_vec_stage%d_L%d' %
                           (stage, level + 1)] = L.Flatten(
                               n.tops[last_layer[level]])
                    n.tops['loss_stage%d_L%d' %
                           (stage, level + 1)] = L.EuclideanLoss(
                               n.tops['map_vec_stage%d' % stage],
                               n.tops[label_name[level]],
                               loss_weight=weight[level])
                elif deploy == False:
                    name = 'weight_stage%d_L%d' % (stage, level + 1)
                    n.tops[name] = L.Eltwise(n.tops[last_layer[level]],
                                             n.tops[label_name[(level + 2)]],
                                             operation=P.Eltwise.PROD)
                    n.tops['loss_stage%d_L%d' %
                           (stage, level + 1)] = L.EuclideanLoss(
                               n.tops[name],
                               n.tops[label_name[level]],
                               loss_weight=weight[level])

                print 'loss %d level %d' % (stage, level + 1)

            stage += 1
            #last_connect = last_layer
            #last_layer = 'image'
            conv_counter = 1
            pool_counter = 1
            drop_counter = 1
            local_counter = 1
            state = 'image'

        elif layername[l] == 'L3':
            # Loss: n.loss layer is only in training and testing nets, but not in deploy net.
            weight = [lr_mult_distro[3], 1]
            # print lr_mult_distro[3]
            if deploy == False:
                level = 0
                n.tops['loss_stage%d_L%d' %
                       (stage, level + 1)] = L.Euclidean2Loss(
                           n.tops[last_layer[level]],
                           n.tops[label_name[level]],
                           n.tops[label_name[2]],
                           loss_weight=weight[level])
                print 'loss %d level %d' % (stage, level + 1)
                level = 1
                n.tops['loss_stage%d_L%d' %
                       (stage, level + 1)] = L.EuclideanLoss(
                           n.tops[last_layer[level]],
                           n.tops[label_name[level]],
                           loss_weight=weight[level])
                print 'loss %d level %d' % (stage, level + 1)

            stage += 1
            #last_connect = last_layer
            #last_layer = 'image'
            conv_counter = 1
            pool_counter = 1
            drop_counter = 1
            local_counter = 1
            state = 'image'

        elif layername[l] == 'D':
            if deploy == False:
                n.tops['drop%d_stage%d' % (drop_counter, stage)] = L.Dropout(
                    n.tops[last_layer[0]],
                    in_place=True,
                    dropout_param=dict(dropout_ratio=0.5))
                drop_counter += 1
        elif layername[l] == '@':
            #if not share_point:
            #    share_point = last_layer
            n.tops['concat_stage%d' % stage] = L.Concat(
                n.tops[last_layer[0]],
                n.tops[last_layer[1]],
                n.tops[share_point],
                concat_param=dict(axis=1))

            local_counter = 1
            state = 'fuse'
            last_layer[0] = 'concat_stage%d' % stage
            last_layer[1] = 'concat_stage%d' % stage
            print last_layer
        elif layername[l] == '$':
            share_point = last_layer[0]
            pool_counter += 1
            local_counter = 1
            print 'share'

    # final process
    stage -= 1
    #if stage == 1:
    #    n.silence = L.Silence(n.pool_center_lower, ntop=0)

    if deploy == False:
        return str(n.to_proto())
        # for generating the deploy net
    else:
        # generate the input information header string
        deploy_str = 'input: {}\ninput_dim: {}\ninput_dim: {}\ninput_dim: {}\ninput_dim: {}'.format(
            '"' + input + '"', dim1, dim2, dim3, dim4)
        # assemble the input header with the net layers string.  remove the first placeholder layer from the net string.
        return deploy_str + '\n' + 'layer {' + 'layer {'.join(
            str(n.to_proto()).split('layer {')[2:])
예제 #2
0
def convert(keras_model, caffe_net_file, caffe_params_file):

    caffe_net = caffe.NetSpec()

    net_params = dict()

    outputs = dict()
    shape = ()

    input_str = ''

    for layer in keras_model.layers:
        name = layer.name
        layer_type = type(layer).__name__

        config = layer.get_config()

        blobs = layer.get_weights()
        blobs_num = len(blobs)

        if type(layer.output) == list:
            raise Exception('Layers with multiply outputs are not supported')
        else:
            top = layer.output.name

        if type(layer.input) != list:
            bottom = layer.input.name

        #first we need to create Input layer
        if layer_type == 'InputLayer' or len(caffe_net.tops) == 0:

            input_name = 'data'
            caffe_net[input_name] = L.Layer()
            input_shape = config['batch_input_shape']
            input_str = 'input: {}\ninput_dim: {}\ninput_dim: {}\ninput_dim: {}\ninput_dim: {}'.format(
                '"' + input_name + '"', 1, input_shape[3], input_shape[1],
                input_shape[2])
            outputs[layer.input.name] = input_name
            if layer_type == 'InputLayer':
                continue

        if layer_type == 'Conv2D' or layer_type == 'Convolution2D':

            strides = config['strides']
            kernel_size = config['kernel_size']

            kwargs = {'num_output': config['filters']}

            if kernel_size[0] == kernel_size[1]:
                kwargs['kernel_size'] = kernel_size[0]
            else:
                kwargs['kernel_h'] = kernel_size[0]
                kwargs['kernel_w'] = kernel_size[1]

            if strides[0] == strides[1]:
                kwargs['stride'] = strides[0]
            else:
                kwargs['stride_h'] = strides[0]
                kwargs['stride_w'] = strides[1]

            if not config['use_bias']:
                kwargs['bias_term'] = False
                #kwargs['param']=[dict(lr_mult=0)]
            else:
                #kwargs['param']=[dict(lr_mult=0), dict(lr_mult=0)]
                pass

            set_padding(config, layer.input_shape, kwargs)

            caffe_net[name] = L.Convolution(caffe_net[outputs[bottom]],
                                            **kwargs)

            blobs[0] = np.array(blobs[0]).transpose(3, 2, 0, 1)
            net_params[name] = blobs

            if config['activation'] == 'relu':
                name_s = name + 's'
                caffe_net[name_s] = L.ReLU(caffe_net[name], in_place=True)
            elif config['activation'] == 'sigmoid':
                name_s = name + 's'
                caffe_net[name_s] = L.Sigmoid(caffe_net[name], in_place=True)
            elif config['activation'] == 'linear':
                #do nothing
                pass
            else:
                raise Exception('Unsupported activation ' +
                                config['activation'])

        elif layer_type == 'DepthwiseConv2D':

            strides = config['strides']
            kernel_size = config['kernel_size']

            kwargs = {'num_output': layer.input_shape[3]}

            if kernel_size[0] == kernel_size[1]:
                kwargs['kernel_size'] = kernel_size[0]
            else:
                kwargs['kernel_h'] = kernel_size[0]
                kwargs['kernel_w'] = kernel_size[1]

            if strides[0] == strides[1]:
                kwargs['stride'] = strides[0]
            else:
                kwargs['stride_h'] = strides[0]
                kwargs['stride_w'] = strides[1]

            set_padding(config, layer.input_shape, kwargs)

            kwargs['group'] = layer.input_shape[3]

            kwargs['bias_term'] = False
            caffe_net[name] = L.Convolution(caffe_net[outputs[bottom]],
                                            **kwargs)
            blob = np.array(blobs[0]).transpose(2, 3, 0, 1)
            blob.shape = (1, ) + blob.shape
            net_params[name] = blob

            if config['activation'] == 'relu':
                name_s = name + 's'
                caffe_net[name_s] = L.ReLU(caffe_net[name], in_place=True)
            elif config['activation'] == 'sigmoid':
                name_s = name + 's'
                caffe_net[name_s] = L.Sigmoid(caffe_net[name], in_place=True)
            elif config['activation'] == 'linear':
                #do nothing
                pass
            else:
                raise Exception('Unsupported activation ' +
                                config['activation'])

        elif layer_type == 'SeparableConv2D':

            strides = config['strides']
            kernel_size = config['kernel_size']

            kwargs = {'num_output': layer.input_shape[3]}

            if kernel_size[0] == kernel_size[1]:
                kwargs['kernel_size'] = kernel_size[0]
            else:
                kwargs['kernel_h'] = kernel_size[0]
                kwargs['kernel_w'] = kernel_size[1]

            if strides[0] == strides[1]:
                kwargs['stride'] = strides[0]
            else:
                kwargs['stride_h'] = strides[0]
                kwargs['stride_w'] = strides[1]

            set_padding(config, layer.input_shape, kwargs)

            kwargs['group'] = layer.input_shape[3]

            kwargs['bias_term'] = False
            caffe_net[name] = L.Convolution(caffe_net[outputs[bottom]],
                                            **kwargs)
            blob = np.array(blobs[0]).transpose(2, 3, 0, 1)
            blob.shape = (1, ) + blob.shape
            net_params[name] = blob

            name2 = name + '_'
            kwargs = {
                'num_output': config['filters'],
                'kernel_size': 1,
                'bias_term': config['use_bias']
            }
            caffe_net[name2] = L.Convolution(caffe_net[name], **kwargs)

            if config['use_bias'] == True:
                blob2 = []
                blob2.append(np.array(blobs[1]).transpose(3, 2, 0, 1))
                blob2.append(np.array(blobs[2]))
                blob2[0].shape = (1, ) + blob2[0].shape
            else:
                blob2 = np.array(blobs[1]).transpose(3, 2, 0, 1)
                blob2.shape = (1, ) + blob2.shape

            net_params[name2] = blob2
            name = name2

        elif layer_type == 'BatchNormalization':

            param = dict()

            variance = np.array(blobs[-1])
            mean = np.array(blobs[-2])

            if config['scale']:
                gamma = np.array(blobs[0])
                sparam = [dict(lr_mult=1), dict(lr_mult=1)]
            else:
                gamma = np.ones(mean.shape, dtype=np.float32)
                #sparam=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=1, decay_mult=1)]
                sparam = [dict(lr_mult=0), dict(lr_mult=1)]
                #sparam=[dict(lr_mult=0), dict(lr_mult=0)]

            if config['center']:
                beta = np.array(blobs[-3])
                param['bias_term'] = True
            else:
                beta = np.zeros(mean.shape, dtype=np.float32)
                param['bias_term'] = False

            caffe_net[name] = L.BatchNorm(caffe_net[outputs[bottom]],
                                          in_place=True)
            #param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=1, decay_mult=1), dict(lr_mult=0, decay_mult=0)])
            #param=[dict(lr_mult=1), dict(lr_mult=1), dict(lr_mult=0)])

            net_params[name] = (mean, variance, np.array(1.0))

            name_s = name + 's'

            caffe_net[name_s] = L.Scale(
                caffe_net[name],
                in_place=True,
                param=sparam,
                scale_param={'bias_term': config['center']})
            net_params[name_s] = (gamma, beta)

        elif layer_type == 'Dense':
            caffe_net[name] = L.InnerProduct(caffe_net[outputs[bottom]],
                                             num_output=config['units'],
                                             weight_filler=dict(type='xavier'))

            if config['use_bias']:
                weight = np.array(blobs[0]).transpose(1, 0)
                if type(layer._inbound_nodes[0].inbound_layers[0]
                        ).__name__ == 'Flatten':
                    flatten_shape = layer._inbound_nodes[0].inbound_layers[
                        0].input_shape
                    for i in range(weight.shape[0]):
                        weight[i] = np.array(weight[i].reshape(
                            flatten_shape[1],
                            flatten_shape[2], flatten_shape[3]).transpose(
                                2, 0, 1).reshape(weight.shape[1]))
                net_params[name] = (weight, np.array(blobs[1]))
            else:
                net_params[name] = (blobs[0])

            name_s = name + 's'
            if config['activation'] == 'softmax':
                caffe_net[name_s] = L.Softmax(caffe_net[name], in_place=True)
            elif config['activation'] == 'relu':
                caffe_net[name_s] = L.ReLU(caffe_net[name], in_place=True)

        elif layer_type == 'Activation':
            if config['activation'] == 'relu':
                #caffe_net[name] = L.ReLU(caffe_net[outputs[bottom]], in_place=True)
                if len(layer.input.consumers()) > 1:
                    caffe_net[name] = L.ReLU(caffe_net[outputs[bottom]])
                else:
                    caffe_net[name] = L.ReLU(caffe_net[outputs[bottom]],
                                             in_place=True)
            elif config['activation'] == 'relu6':
                #TODO
                caffe_net[name] = L.ReLU(caffe_net[outputs[bottom]])
            elif config['activation'] == 'softmax':
                caffe_net[name] = L.Softmax(caffe_net[outputs[bottom]],
                                            in_place=True)
            else:
                raise Exception('Unsupported activation ' +
                                config['activation'])

        elif layer_type == 'Cropping2D':
            shape = layer.output_shape
            ddata = L.DummyData(shape=dict(
                dim=[1, shape[3], shape[1], shape[2]]))
            layers = []
            layers.append(caffe_net[outputs[bottom]])
            layers.append(ddata)  #TODO
            caffe_net[name] = L.Crop(*layers)

        elif layer_type == 'Concatenate' or layer_type == 'Merge':
            layers = []
            for i in layer.input:
                layers.append(caffe_net[outputs[i.name]])
            caffe_net[name] = L.Concat(*layers, axis=1)

        elif layer_type == 'Add':
            layers = []
            for i in layer.input:
                layers.append(caffe_net[outputs[i.name]])
            caffe_net[name] = L.Eltwise(*layers)

        elif layer_type == 'Flatten':
            caffe_net[name] = L.Flatten(caffe_net[outputs[bottom]])

        elif layer_type == 'Reshape':
            shape = config['target_shape']
            if len(shape) == 3:
                #shape = (layer.input_shape[0], shape[2], shape[0], shape[1])
                shape = (1, shape[2], shape[0], shape[1])
            elif len(shape) == 1:
                #shape = (layer.input_shape[0], 1, 1, shape[0])
                shape = (1, 1, 1, shape[0])
            caffe_net[name] = L.Reshape(
                caffe_net[outputs[bottom]],
                reshape_param={'shape': {
                    'dim': list(shape)
                }})

        elif layer_type == 'MaxPooling2D' or layer_type == 'AveragePooling2D':

            kwargs = {}

            if layer_type == 'MaxPooling2D':
                kwargs['pool'] = P.Pooling.MAX
            else:
                kwargs['pool'] = P.Pooling.AVE

            pool_size = config['pool_size']
            strides = config['strides']

            if pool_size[0] != pool_size[1]:
                raise Exception('Unsupported pool_size')

            if strides[0] != strides[1]:
                raise Exception('Unsupported strides')

            set_padding(config, layer.input_shape, kwargs)

            caffe_net[name] = L.Pooling(caffe_net[outputs[bottom]],
                                        kernel_size=pool_size[0],
                                        stride=strides[0],
                                        **kwargs)

        elif layer_type == 'Dropout':
            caffe_net[name] = L.Dropout(
                caffe_net[outputs[bottom]],
                dropout_param=dict(dropout_ratio=config['rate']))

        elif layer_type == 'GlobalAveragePooling2D':
            caffe_net[name] = L.Pooling(
                caffe_net[outputs[bottom]],
                pool=P.Pooling.AVE,
                pooling_param=dict(global_pooling=True))

        elif layer_type == 'UpSampling2D':
            if config['size'][0] != config['size'][1]:
                raise Exception('Unsupported upsampling factor')
            factor = config['size'][0]
            kernel_size = 2 * factor - factor % 2
            stride = factor
            pad = int(math.ceil((factor - 1) / 2.0))
            channels = layer.input_shape[-1]
            caffe_net[name] = L.Deconvolution(
                caffe_net[outputs[bottom]],
                convolution_param=dict(num_output=channels,
                                       group=channels,
                                       kernel_size=kernel_size,
                                       stride=stride,
                                       pad=pad,
                                       weight_filler=dict(type='bilinear'),
                                       bias_term=False),
                param=dict(lr_mult=0, decay_mult=0))

        #TODO

        elif layer_type == 'ZeroPadding2D':
            padding = config['padding']
            #ch = layer.input_shape[3]
            #caffe_net[name] = L.Convolution(caffe_net[outputs[bottom]], num_output=ch, kernel_size=1, stride=1, group=ch,
            #    pad_h=padding[0][0], pad_w=padding[1][0], convolution_param=dict(bias_term = False))
            #params = np.ones((1,ch,1,1))

            #net_params[name] = np.ones((1,ch,1,1,1))
            #net_params[name] = np.ones(layer.output_shape)

            caffe_net[name] = L.Pooling(caffe_net[outputs[bottom]],
                                        kernel_size=1,
                                        stride=1,
                                        pad_h=padding[0][0] + padding[0][1],
                                        pad_w=padding[1][0] + padding[1][1],
                                        pool=P.Pooling.AVE)

        else:
            raise Exception('Unsupported layer type: ' + layer_type)

        outputs[top] = name

    #replace empty layer with input blob
    net_proto = input_str + '\n' + 'layer {' + 'layer {'.join(
        str(caffe_net.to_proto()).split('layer {')[2:])

    f = open(caffe_net_file, 'w')
    f.write(net_proto)
    f.close()

    caffe_model = caffe.Net(caffe_net_file, caffe.TEST)

    for layer in caffe_model.params.keys():
        if 'up_sampling2d' in layer:
            continue
        for n in range(0, len(caffe_model.params[layer])):
            caffe_model.params[layer][n].data[...] = net_params[layer][n]

    caffe_model.save(caffe_params_file)
예제 #3
0
def add_dummy_layer(net, name):
    """Add a dummy data layer """
    net[name] = L.Layer()
예제 #4
0
num_ip1_params = 768  # Number of free parameters in ip1
ip1_lrm_weights = 1   # Learning rate multiplier for ip1 weights
ip1_lrm_bias = 1      # Learning rate multiplyer for ip1 bias
ip2_lrm_weights = 1   # Learning rate multiplier for ip2 weights
ip2_lrm_bias = 1      # Learning rate multiplier for ip2 bias

train_data = caffe.NetSpec()
train_data.data, train_data.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb_train_file, phase=0, ntop=2)

test_data = caffe.NetSpec()
test_data.data, test_data.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb_test_file, phase=1, ntop=2)

merged_data = text_format.Merge(str(test_data.to_proto()), train_data.to_proto())

net = caffe.NetSpec()
net.data = L.Layer()
net.label= L.Layer()
net.ip1 = L.InnerProduct(net.data, num_output=num_ip1_params, \
        weight_filler={'type': 'xavier'}, \
        bias_filler={'type': 'constant', 'value': 0})
net.ip1.fn.params['param'] = [{'lr_mult': ip1_lrm_weights, 'decay_mult': 1}, \
                                  {'lr_mult': ip1_lrm_bias, 'decay_mult': 1}]
net.relu1 = L.ReLU(net.ip1, in_place=True)
net.ip2 = L.InnerProduct(net.relu1, num_output=10, \
        weight_filler={'type': 'xavier'}, \
        bias_filler={'type': 'constant', 'value': 0})
net.ip2.fn.params['param'] = [{'lr_mult': ip2_lrm_weights, 'decay_mult': 1}, \
                                  {'lr_mult': ip2_lrm_bias, 'decay_mult': 1}]
net.train_accuracy = L.Accuracy(net.ip2, net.label, phase=0)
net.test_accuracy = L.Accuracy(net.ip2, net.label, phase=1)
net.loss = L.SoftmaxWithLoss(net.ip2, net.label)
예제 #5
0
from_layers = ['mixed_7/join', 'mixed_10/join', '', '', '', '']
#from_layers = ['ch_concat_mixed_7_chconcat', 'ch_concat_mixed_10_chconcat', '', '', '', '']
num_filters = [-1, -1, 512, 256, 256, 128]
strides = [-1, -1, 2, 2, 2, 2]
pads = [-1, -1, 1, 1, 1, 1]
sizes = [[.1, .141], [.2, .272], [.37, .447], [.54, .619], [.71, .79],
         [.88, .961]]
ratios = [[1, 2, .5], [1, 2, .5, 3, 1. / 3], [1, 2, .5, 3, 1. / 3],
          [1, 2, .5, 3, 1. / 3], [1, 2, .5], [1, 2, .5]]

aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
normalizations = -1

#net.data = L.Data(name = 'data')

net.data = L.Layer()

InceptionV3Body(net,
                from_layer='data',
                fully_conv=True,
                reduced=True,
                dilated=True,
                dropout=False)

min_sizes = [m[0] for m in sizes]

max_sizes = [m[1] for m in sizes]

steps = [8, 16, 32, 64, 100, 300]
use_batchnorm = False
예제 #6
0
파일: genProto.py 프로젝트: DenisTome/caffe
def setLayers(data_source,
              batch_size,
              layername,
              kernel,
              stride,
              outCH,
              label_name,
              transform_param_in,
              deploy=False):
    # it is tricky to produce the deploy prototxt file, as the data input is not from a layer, so we have to creat a workaround
    # producing training and testing prototxt files is pretty straight forward
    n = caffe.NetSpec()
    assert len(layername) == len(kernel)
    assert len(layername) == len(stride)
    assert len(layername) == len(outCH)

    # produce data definition for deploy net
    if deploy == False:
        # here we will return the new structure for loading h36m dataset
        n.data, n.tops['label'] = L.CPMData(cpmdata_param=dict(
            backend=1, source=data_source, batch_size=batch_size),
                                            transform_param=transform_param_in,
                                            ntop=2)
        n.tops[label_name[1]], n.tops[label_name[0]], n.tops[
            label_name[2]] = L.Slice(n.label,
                                     slice_param=dict(axis=1,
                                                      slice_point=[18, 36]),
                                     ntop=3)
        n.image, n.center_map = L.Slice(n.data,
                                        slice_param=dict(axis=1,
                                                         slice_point=3),
                                        ntop=2)
    else:
        input = "data"
        dim1 = 18
        dim2 = 5
        dim3 = 368
        dim4 = 368
        # make an empty "data" layer so the next layer accepting input will be able to take the correct blob name "data",
        # we will later have to remove this layer from the serialization string, since this is just a placeholder
        n.data = L.Layer()
        # Slice layer slices input layer to multiple output along a given dimension
        # axis: 1 define in which dimension to slice
        # slice_point: 3 define the index in the selected dimension (the number of
        # indices must be equal to the number of top blobs minus one)
        # Considering input Nx3x1x1, by slice_point = 2
        # top1 : Nx2x1x1
        # top2 : Nx1x1x1
        n.image, n.center_map, n.tops[label_name[2]] = L.Slice(
            n.data, slice_param=dict(axis=1, slice_point=[3, 4]), ntop=3)

    n.pool_center_lower = L.Pooling(n.center_map,
                                    kernel_size=9,
                                    stride=8,
                                    pool=P.Pooling.AVE)

    # just follow arrays..CPCPCPCPCCCC....
    last_layer = 'image'
    stage = 1
    conv_counter = 1
    last_manifold = 'NONE'
    last_merg = 'NONE'
    pool_counter = 1
    drop_counter = 1
    state = 'image'  # can be image or fuse
    share_point = 0
    manifold_current_stage = False
    merge_init_avg = False

    for l in range(0, len(layername)):
        decay_mult = 1

        if layername[l] == 'C':
            if state == 'image':
                conv_name = 'conv%d_stage%d' % (conv_counter, stage)
            else:
                conv_name = 'Mconv%d_stage%d' % (conv_counter, stage)
            #if stage == 1:
            #    lr_m = 5
            #else:
            #    lr_m = 1
            lr_m = 1e-3  # 1e-3 (best res so far)
            if ((stage == 1 and conv_counter == 7) or
                (stage > 1 and state != 'image' and (conv_counter in [1, 5]))):
                conv_name = '%s_new' % conv_name
                lr_m = 1  #1e-2
                decay_mult = 1

#            if (stage <= 4):
#                lr_m = 0
#                decay_mult = 0
# additional for python layer
#            if (stage > 1 and state != 'image' and (conv_counter == 1)):
#                conv_name = '%s_mf' % conv_name
#                lr_m = 1 #1e-1
            n.tops[conv_name] = L.Convolution(
                n.tops[last_layer],
                kernel_size=kernel[l],
                num_output=outCH[l],
                pad=int(math.floor(kernel[l] / 2)),
                param=[
                    dict(lr_mult=lr_m, decay_mult=decay_mult),
                    dict(lr_mult=lr_m * 2, decay_mult=0)
                ],
                weight_filler=dict(type='gaussian', std=0.01),
                bias_filler=dict(type='constant'))
            last_layer = conv_name
            if not (layername[l + 1] == 'L' or layername[l + 1] == 'M'):
                if (state == 'image'):
                    ReLUname = 'relu%d_stage%d' % (conv_counter, stage)
                    n.tops[ReLUname] = L.ReLU(n.tops[last_layer],
                                              in_place=True)
                else:
                    ReLUname = 'Mrelu%d_stage%d' % (conv_counter, stage)
                    n.tops[ReLUname] = L.ReLU(n.tops[last_layer],
                                              in_place=True)
                last_layer = ReLUname
            conv_counter += 1
        elif layername[l] == 'P':  # Pooling
            n.tops['pool%d_stage%d' % (pool_counter, stage)] = L.Pooling(
                n.tops[last_layer],
                kernel_size=kernel[l],
                stride=stride[l],
                pool=P.Pooling.MAX)
            last_layer = 'pool%d_stage%d' % (pool_counter, stage)
            pool_counter += 1
        elif layername[l] == 'M':
            last_manifold = 'manifolds_stage%d' % stage
            last_merg = 'merge_hm_stage%d' % stage
            debug_mode = 0
            #            if (stage == 5):
            #                debug_mode = 1
            manifold_current_stage = True
            if (stage >= 4):
                merge_init_avg = True
            # TODO: change it back


#            if stage == 4:
#                debug_mode = 4
            parameters = '{"njoints": 17,"sigma": 1, "debug_mode": %r, "max_area": 100, "percentage_max": 3, "train": %u, "Lambda": %.3f }' % (
                debug_mode, not deploy, 0.05)
            # DONE: change it back
            # if manifold_current_stage:
            n.tops[last_manifold] = L.Python(
                n.tops[last_layer],
                n.tops[label_name[2]],
                python_param=dict(module='newheatmaps',
                                  layer='MyCustomLayer',
                                  param_str=parameters))  #,loss_weight=1)
            #            n.tops[last_manifold] = L.Python(n.tops[label_name[1]],n.tops[label_name[2]],python_param=dict(module='newheatmaps',layer='MyCustomLayer',param_str=parameters))#,loss_weight=1)
            init_str = 'zero'
            if merge_init_avg:
                init_str = 'avg'
            merge_lr = 5e-2
            parameters = '{"init": %r, "learning_rate": %r}' % (init_str,
                                                                merge_lr)
            n.tops[last_merg] = L.Python(n.tops[last_layer],
                                         n.tops[last_manifold],
                                         python_param=dict(
                                             module='processheatmaps',
                                             layer='MergeHeatMaps',
                                             param_str=parameters))
        elif layername[l] == 'L':
            # Loss: n.loss layer is only in training and testing nets, but not in deploy net.
            if deploy == False:
                if stage == 1:
                    n.tops['loss_stage%d' % stage] = L.EuclideanLoss(
                        n.tops[last_layer], n.tops[label_name[0]])
                else:
                    n.tops['loss_stage%d' % stage] = L.EuclideanLoss(
                        n.tops[last_layer], n.tops[label_name[1]])

            stage += 1
            last_connect = last_layer
            last_layer = 'image'
            conv_counter = 1
            pool_counter = 1
            drop_counter = 1
            state = 'image'
        elif layername[l] == 'D':
            if deploy == False:
                n.tops['drop%d_stage%d' % (drop_counter, stage)] = L.Dropout(
                    n.tops[last_layer],
                    in_place=True,
                    dropout_param=dict(dropout_ratio=0.5))
                drop_counter += 1
        elif layername[l] == '@':
            # DONE: change it back
            #  no this          n.tops['concat_stage%d' % stage] = L.Concat(n.tops[last_layer], n.tops[last_connect], n.pool_center_lower, n.tops[label_name[1]], concat_param=dict(axis=1))
            #            n.tops['concat_stage%d' % stage] = L.Concat(n.tops[last_layer], n.tops[last_connect], n.pool_center_lower, n.tops[last_manifold], concat_param=dict(axis=1))
            if manifold_current_stage:
                n.tops['concat_stage%d' % stage] = L.Concat(
                    n.tops[last_layer],
                    n.tops[last_merg],
                    n.pool_center_lower,
                    concat_param=dict(axis=1))
            else:
                n.tops['concat_stage%d' % stage] = L.Concat(
                    n.tops[last_layer],
                    n.tops[last_connect],
                    n.pool_center_lower,
                    concat_param=dict(axis=1))

            conv_counter = 1
            state = 'fuse'
            last_layer = 'concat_stage%d' % stage
        elif layername[l] == '$':
            if not share_point:
                share_point = last_layer
            else:
                last_layer = share_point
    # final process
    stage -= 1
    if stage == 1:
        n.silence = L.Silence(n.pool_center_lower, ntop=0)

    if deploy == False:
        return str(n.to_proto())
        # for generating the deploy net
    else:
        # generate the input information header string
        deploy_str = 'input: {}\ninput_dim: {}\ninput_dim: {}\ninput_dim: {}\ninput_dim: {}'.format(
            '"' + input + '"', dim1, dim2, dim3, dim4)
        # assemble the input header with the net layers string.  remove the first placeholder layer from the net string.
        return deploy_str + '\n' + 'layer {' + 'layer {'.join(
            str(n.to_proto()).split('layer {')[2:])