コード例 #1
0
    def load_th_weights_for_tf(self, weights_path, model=None, by_name=False):
        image_dim_ordering = K.image_dim_ordering()
        image_data_format = K.image_data_format()

        if model is None:
            model_tf = self.network
        else:
            model_tf = model

        if not isinstance(weights_path, list):
            weights_path = [
                weights_path,
            ]

        for weight in weights_path:
            K.set_image_dim_ordering('th')
            K.set_image_data_format('channels_first')
            model_th = self.model_function(**self.params)
            model_th.load_weights(weight)
            K.set_image_dim_ordering(image_dim_ordering)
            K.set_image_data_format(image_data_format)

            for layer_th, layer_tf in zip(model_th.layers, model_tf.layers):
                if any(x in layer_th.__class__.__name__
                       for x in ['conv', 'Conv', 'permute', 'Permute']):
                    weights_th = layer_th.get_weights()

                    if weights_th:
                        kernel = weights_th[0]
                        bias = weights_th[1]

                        converted_kernel = convert_kernel(kernel)

                        weights_tensorflow = [converted_kernel, bias]

                        layer_tf.set_weights(weights_tensorflow)
                elif 'TimeDistributed' in layer_th.__class__.__name__ and any(
                        x in layer_th.layer.__class__.__name__
                        for x in ['conv', 'Conv', 'permute', 'Permute']):
                    weights_th = layer_th.get_weights()

                    if weights_th:
                        kernel = weights_th[0]
                        bias = weights_th[1]

                        converted_kernel = convert_kernel(kernel)

                        weights_tensorflow = [converted_kernel, bias]

                        layer_tf.set_weights(weights_tensorflow)
                else:
                    layer_tf.set_weights(layer_th.get_weights())
コード例 #2
0
ファイル: model.py プロジェクト: szandala/cnn-viz
def load_model_weights(model, weights_path):
    print('\nLoading model.')

    # Load pre-trained model
    model.load_weights(weights_path, by_name=True)

    # Theano to Tensoflow - depends on the version
    ops = []
    for layer in model.layers:
        if layer.__class__.__name__ in ['Conv2D']:  # Layers with pre-trained weights
            original_w = K.get_value(layer.kernel)
            converted_w = convert_kernel(original_w)
            ops.append(tf.assign(layer.kernel, converted_w).op)
    K.get_session().run(ops)

    # Prev code
    # f = h5py.File(weights_path)
    # for k in range(f.attrs['nb_layers']):
    #     if k >= len(model.layers):
    #         # we don't look at the last (fully-connected) layers in the savefile
    #         break
    #     g = f['layer_{}'.format(k)]
    #     weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
    #     model.layers[k].set_weights(weights)
    # f.close()

    # model.save_weights(weights_path)
    print('\nModel loaded.')
    return model
コード例 #3
0
ファイル: convert.py プロジェクト: Palpatineli/ca2spike
def _tf2th(model: Model) -> Model:
    for layer in model.layers:
        if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:
            original_w = K.get_value(layer.W)
            converted_w = convert_kernel(original_w)
            K.set_value(layer.W, converted_w)
    return model
コード例 #4
0
ファイル: prototype.py プロジェクト: jamespeterthornton/DHS
def train_3D_resnet():
    model = build_model((1, 64, 64, 64))
    model.load_weights("weights/3D/model_LUNA_64_v29_14.h5")
    unet_checkpoint = ModelCheckpoint("weights/3D/unet.h5",
                                      monitor='loss',
                                      verbose=1,
                                      save_best_only=True,
                                      mode='min')
    from keras import backend as K
    from keras.utils.conv_utils import convert_kernel
    import tensorflow as tf
    ops = []
    for layer in model.layers:
        if layer.__class__.__name__ in [
                'Convolution1D', 'Convolution2D', 'Convolution3D',
                'AtrousConvolution2D'
        ]:
            original_w = K.get_value(layer.W)
            converted_w = convert_kernel(original_w)
            ops.append(tf.assign(layer.W, converted_w).op)
    ps = preseg_generator2(4)
    print("training 3D")
    log = model.fit_generator(ps,
                              2000,
                              epochs=500,
                              verbose=2,
                              callbacks=[unet_checkpoint],
                              max_queue_size=1)
コード例 #5
0
ファイル: vgg16bn2.py プロジェクト: lssxfy123/PythonStudy
    def create(self):
        """
            Creates the VGG16 network achitecture and loads the pretrained weights.

            Args:   None
            Returns:   None
        """
        model = self.model = Sequential()
        model.add(Lambda(vgg_preprocess, input_shape=(3,224,224), output_shape=(3,224,224)))

        self.ConvBlock(2, 64)
        self.ConvBlock(2, 128)
        self.ConvBlock(3, 256)
        self.ConvBlock(3, 512)
        self.ConvBlock(3, 512)

        model.add(Flatten())
        self.FCBlock()
        self.FCBlock()
        model.add(Dense(1000, activation='softmax'))

        fname = 'vgg16.h5'
        model.load_weights(get_file(fname, self.FILE_PATH+fname, cache_subdir='models'))
        
        from keras.utils.conv_utils import convert_kernel
        import tensorflow as tf
        ops = []
        for layer in model.layers:
            if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D', 'Convolution3D', 'AtrousConvolution2D', 'Conv1D', 'Conv2D', 'Conv3D']:
                original_w, original_b = layer.get_weights()
                converted_w = convert_kernel(original_w)
                layer.set_weights([converted_w, original_b])
コード例 #6
0
    def test_conv2d(self):
        # TF kernel shape: (rows, cols, input_depth, depth)

        # channels_first input shape: (n, input_depth, rows, cols)
        for input_shape in [(2, 3, 4, 5), (2, 3, 5, 6)]:
            for kernel_shape in [(2, 2, 3, 4), (4, 3, 3, 4)]:
                for padding in ['valid', 'same']:
                    xval = np.random.random(input_shape)

                    xth = KTH.variable(xval)
                    xtf = KTF.variable(xval)

                    kernel_val = np.random.random(kernel_shape) - 0.5

                    kernel_th = KTH.variable(convert_kernel(kernel_val))
                    kernel_tf = KTF.variable(kernel_val)

                    zth = KTH.eval(
                        KTH.conv2d(xth,
                                   kernel_th,
                                   data_format='channels_first'))
                    ztf = KTF.eval(
                        KTF.conv2d(xtf,
                                   kernel_tf,
                                   data_format='channels_first'))

                    assert zth.shape == ztf.shape
                    assert_allclose(zth, ztf, atol=1e-05)

        input_shape = (1, 6, 5, 3)
        kernel_shape = (3, 3, 3, 2)

        xval = np.random.random(input_shape)

        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)

        kernel_val = np.random.random(kernel_shape) - 0.5

        kernel_th = KTH.variable(convert_kernel(kernel_val))
        kernel_tf = KTF.variable(kernel_val)

        zth = KTH.eval(KTH.conv2d(xth, kernel_th, data_format='channels_last'))
        ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, data_format='channels_last'))

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
コード例 #7
0
ファイル: Net2Net.py プロジェクト: vipmath/NetworkCompress
 def _deeper_conv2d_weight(kw, kh, filters):
     student_w = np.zeros((kw, kh, filters, filters))
     for i in xrange(filters):
         student_w[(kw - 1) // 2, (kh - 1) // 2, i, i] = 1.
     student_b = np.zeros(filters)
     if K.image_data_format() == "channels_first":
         student_w = convert_kernel(student_w)
     return student_w, student_b
コード例 #8
0
    def test_conv3d(self):
        # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
        # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)
        # TH kernel shape: (depth, input_depth, x, y, z)
        # TF kernel shape: (x, y, z, input_depth, depth)

        # test in data_format = channels_first
        for input_shape in [(2, 3, 4, 5, 4), (2, 3, 5, 4, 6)]:
            for kernel_shape in [(2, 2, 2, 3, 4), (3, 2, 4, 3, 4)]:
                xval = np.random.random(input_shape)

                xth = KTH.variable(xval)
                xtf = KTF.variable(xval)

                kernel_val = np.random.random(kernel_shape) - 0.5

                kernel_th = KTH.variable(convert_kernel(kernel_val))
                kernel_tf = KTF.variable(kernel_val)

                zth = KTH.eval(
                    KTH.conv3d(xth, kernel_th, data_format='channels_first'))
                ztf = KTF.eval(
                    KTF.conv3d(xtf, kernel_tf, data_format='channels_first'))

                assert zth.shape == ztf.shape
                assert_allclose(zth, ztf, atol=1e-05)

        # test in data_format = channels_last
        input_shape = (1, 2, 2, 2, 1)
        kernel_shape = (2, 2, 2, 1, 1)

        xval = np.random.random(input_shape)

        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)

        kernel_val = np.random.random(kernel_shape) - 0.5

        kernel_th = KTH.variable(convert_kernel(kernel_val))
        kernel_tf = KTF.variable(kernel_val)

        zth = KTH.eval(KTH.conv3d(xth, kernel_th, data_format='channels_last'))
        ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, data_format='channels_last'))

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
コード例 #9
0
ファイル: backend_test.py プロジェクト: oarriaga/keras
    def test_conv3d(self):
        # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
        # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)
        # TH kernel shape: (depth, input_depth, x, y, z)
        # TF kernel shape: (x, y, z, input_depth, depth)

        # test in data_format = channels_first
        for input_shape in [(2, 3, 4, 5, 4), (2, 3, 5, 4, 6)]:
            for kernel_shape in [(2, 2, 2, 3, 4), (3, 2, 4, 3, 4)]:
                xval = np.random.random(input_shape)

                xth = KTH.variable(xval)
                xtf = KTF.variable(xval)

                kernel_val = np.random.random(kernel_shape) - 0.5

                kernel_th = KTH.variable(convert_kernel(kernel_val))
                kernel_tf = KTF.variable(kernel_val)

                zth = KTH.eval(KTH.conv3d(xth, kernel_th, data_format='channels_first'))
                ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, data_format='channels_first'))

                assert zth.shape == ztf.shape
                assert_allclose(zth, ztf, atol=1e-05)

        # test in data_format = channels_last
        input_shape = (1, 2, 2, 2, 1)
        kernel_shape = (2, 2, 2, 1, 1)

        xval = np.random.random(input_shape)

        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)

        kernel_val = np.random.random(kernel_shape) - 0.5

        kernel_th = KTH.variable(convert_kernel(kernel_val))
        kernel_tf = KTF.variable(kernel_val)

        zth = KTH.eval(KTH.conv3d(xth, kernel_th, data_format='channels_last'))
        ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, data_format='channels_last'))

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
コード例 #10
0
def load_weights_from_tf_checkpoint(model, checkpoint_file, background_label):
    print('Load weights from tensorflow checkpoint')
    progbar = Progbar(target=len(model.layers))

    reader = tf.compat.v1.train.NewCheckpointReader(checkpoint_file)
    for index, layer in enumerate(model.layers):
        progbar.update(current=index)

        if isinstance(layer, layers.convolutional.SeparableConv2D):
            depthwise = reader.get_tensor('{}/depthwise_weights'.format(
                layer.name))
            pointwise = reader.get_tensor('{}/pointwise_weights'.format(
                layer.name))

            if K.image_data_format() == 'channels_first':
                depthwise = convert_kernel(depthwise)
                pointwise = convert_kernel(pointwise)

            layer.set_weights([depthwise, pointwise])
        elif isinstance(layer, layers.convolutional.Convolution2D):
            weights = reader.get_tensor('{}/weights'.format(layer.name))

            if K.image_data_format() == 'channels_first':
                weights = convert_kernel(weights)

            layer.set_weights([weights])
        elif isinstance(layer, layers.BatchNormalization):
            beta = reader.get_tensor('{}/beta'.format(layer.name))
            gamma = reader.get_tensor('{}/gamma'.format(layer.name))
            moving_mean = reader.get_tensor('{}/moving_mean'.format(
                layer.name))
            moving_variance = reader.get_tensor('{}/moving_variance'.format(
                layer.name))

            layer.set_weights([gamma, beta, moving_mean, moving_variance])
        elif isinstance(layer, layers.Dense):
            weights = reader.get_tensor('{}/weights'.format(layer.name))
            biases = reader.get_tensor('{}/biases'.format(layer.name))

            if background_label:
                layer.set_weights([weights, biases])
            else:
                layer.set_weights([weights[:, 1:], biases[1:]])
コード例 #11
0
ファイル: backend_test.py プロジェクト: oarriaga/keras
    def test_conv2d(self):
        # TF kernel shape: (rows, cols, input_depth, depth)

        # channels_first input shape: (n, input_depth, rows, cols)
        for input_shape in [(2, 3, 4, 5), (2, 3, 5, 6)]:
            for kernel_shape in [(2, 2, 3, 4), (4, 3, 3, 4)]:
                for padding in ['valid', 'same']:
                    xval = np.random.random(input_shape)

                    xth = KTH.variable(xval)
                    xtf = KTF.variable(xval)

                    kernel_val = np.random.random(kernel_shape) - 0.5

                    kernel_th = KTH.variable(convert_kernel(kernel_val))
                    kernel_tf = KTF.variable(kernel_val)

                    zth = KTH.eval(KTH.conv2d(xth, kernel_th, data_format='channels_first'))
                    ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, data_format='channels_first'))

                    assert zth.shape == ztf.shape
                    assert_allclose(zth, ztf, atol=1e-05)

        input_shape = (1, 6, 5, 3)
        kernel_shape = (3, 3, 3, 2)

        xval = np.random.random(input_shape)

        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)

        kernel_val = np.random.random(kernel_shape) - 0.5

        kernel_th = KTH.variable(convert_kernel(kernel_val))
        kernel_tf = KTF.variable(kernel_val)

        zth = KTH.eval(KTH.conv2d(xth, kernel_th, data_format='channels_last'))
        ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, data_format='channels_last'))

        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)
コード例 #12
0
ファイル: convert.py プロジェクト: Palpatineli/ca2spike
def _th2tf(model: Model) -> Model:
    import tensorflow as tf
    ops = list()
    for layer in model.layers:
        if layer.__class__.__name__ in [
                'Convolution1D', 'Convolution2D', 'Convolution3D',
                'AtrousConvolution2D'
        ]:
            original_w = K.get_value(layer.W)
            converted_w = convert_kernel(original_w)
            ops.append(tf.assign(layer.W, converted_w).op)
    K.get_session().run(ops)
コード例 #13
0
ファイル: Net2Net.py プロジェクト: vipmath/NetworkCompress
    def deeper_conv2d(self,
                      model,
                      layer_name,
                      config,
                      with_pooling,
                      kernel_size=3,
                      filters='same'):
        # construct graph
        new_graph = model.graph.copy()
        if with_pooling == False:
            type = 'Conv2D'
        else:
            type = 'Conv2D_Pooling'
        new_node = Node(type=type,
                        name='new',
                        config={
                            'kernel_size': kernel_size,
                            'filters': filters
                        })
        new_graph.deeper(layer_name, new_node)
        # logger.debug(new_graph.to_json())

        # construct model
        new_model = MyModel(config=config, graph=new_graph)

        # inherit weight
        # in fact there is no need to get w_conv1 and b_conv1
        # what we actually need is only w_conv1's shape
        # more specifically, only kh, kw and filters are needed, num_channel is not necessary
        node_type = model.graph.get_nodes(layer_name)[0].type
        if node_type == 'Conv2D' or node_type == 'Conv2D_Pooling':
            w_conv1, b_conv1 = new_model.get_layers(
                layer_name)[0].get_weights()
            # tensorflow kernel format: [filter_height, filter_width, in_channels, out_channels] channels_last
            # theano kernel format:     [output_channels, input_channels, filter_rows, filter_columns] channels_first
            if K.image_data_format() == "channels_first":  # theano format
                # convert_kernel function Converts a Numpy kernel matrix from Theano format to TensorFlow format, vise versa
                w_conv1 = convert_kernel(w_conv1)
            kh, kw, num_channel, filters = w_conv1.shape
        elif node_type == 'Group':
            kh, kw, filters = 3, 3, model.graph.get_nodes(
                layer_name)[0].config['filters']

        w_conv2, b_conv2 = new_model.get_layers(
            layer_name, next_layer=True)[0].get_weights()

        new_w_conv2, new_b_conv2 = Net2Net._deeper_conv2d_weight(
            kh=kh, kw=kw, filters=filters)

        new_model.get_layers(layer_name, next_layer=True)[0].set_weights(
            [new_w_conv2, new_b_conv2])
        self.copy_weight(model, new_model)
        return new_model
コード例 #14
0
ファイル: mlnet.py プロジェクト: dksshddl/Sal-test
def convert_model():
    model = Model()
    model.load_weights('salimap/model/vgg16_weights.h5')
    ops = []
    for layer in model.layers:
        if layer.__class__.__name__ in [
                'Convolution1D', 'Convolution2D', 'Convolution3D',
                'AtrousConvolution2D'
        ]:
            original_w = K.get_value(layer.W)
            converted_w = convert_kernel(original_w)
            ops.append(tf.assign(layer.W, converted_w).op)
    print(model.summary())
    K.get_session().run(ops)
コード例 #15
0
def load_multigpu_checkpoint_weights(model,
                                     h5py_file='model_data/yolo_weights.h5'):
    """
    Loads the weights of a weight checkpoint from a multi-gpu
    keras model.

    Input:

        model - keras model to load weights into

        h5py_file - path to the h5py weights file

    Output:
        None
    """
    import h5py
    from keras.utils.conv_utils import convert_kernel

    print("Setting weights...")
    with h5py.File(h5py_file, "r") as file:

        # Get model subset in file - other layers are empty

        for layer in model.layers:
            weight_file = file

            try:
                layer_weights = weight_file[layer.name]

            except:
                print('No weights saved for layer - %s', layer.name)

                continue

            try:
                weights = []
                # Extract weights
                for term in layer_weights:
                    reshaped_weights = convert_kernel(weights)
                    # layer.set_weights(reshaped_weights)
                    if isinstance(reshaped_weights, h5py.Dataset):
                        # Convert weights to numpy array and prepend to list
                        weights.insert(0, np.array(reshaped_weights))
                    else:
                        print('***No weights saved for layer - %s', layer.name)

                # Load weights to model

            except Exception as e:
                print("Error: Could not load weights for layer:", layer.name)
コード例 #16
0
def convertTensorflow2Theano(model, tensorflow_weights_file,
                             output_weights_file):
    """
    TODO untested!!
    :param model:
    :param tensorflow_weights_file:
    :param output_weights_file:
    :return:
    """
    model.load_weights(tensorflow_weights_file)
    for layer in model.layers:
        if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:
            original_w = K.get_value(layer.W)
            converted_w = convert_kernel(original_w)
            K.set_value(layer.W, converted_w)
    model.save_weights(output_weights_file)
コード例 #17
0
 def parse_convolution(self, layer, attributes):
     from keras.utils.conv_utils import convert_kernel
     weights = layer.W.get_value()
     weights = np.transpose(weights, (2, 3, 1, 0))
     import keras
     if keras.backend.backend() != 'theano':  # Assumes lasagne uses theano.
         weights = convert_kernel(weights)
     bias = layer.b.get_value()
     if bias is None:
         bias = np.zeros(layer.num_filters)
     attributes['parameters'] = [weights, bias]
     padding = padding_string(layer.pad, layer.filter_size)
     attributes.update({'input_shape': layer.input_shape,
                        'filters': layer.num_filters,
                        'kernel_size': layer.filter_size,
                        'padding': padding,
                        'strides': layer.stride})
コード例 #18
0
def convertTheano2Tensorflow(model, theano_weights_file, output_weights_file):
    """
    :param model:
    :param theano_weights_file:
    :param output_weights_file:
    :return:
    """
    model.load_weights(theano_weights_file)
    ops = []
    for layer in model.layers:
        if layer.__class__.__name__ in [
                'Convolution1D', 'Convolution2D', 'Convolution3D',
                'AtrousConvolution2D'
        ]:
            original_w = K.get_value(layer.W)
            converted_w = convert_kernel(original_w)
            ops.append(tf.assign(layer.W, converted_w).op)
    K.get_session().run(ops)
    model.save_weights(output_weights_file)
コード例 #19
0
def main():
    model = build_model()
    
    os.chdir('C:/Users/Anoop/Documents/Deeplearning_setup/projects/NIST_alphabet/')
    
    if not os.path.exists('out'):
        os.mkdir('out')
    model.load_weights('weights/CNN5layer.h5')
    ops = []
    for layer in model.layers:
       if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D', 'Convolution3D', 'AtrousConvolution2D']:
          print(layer.__class__.__name__)
          original_w = K.get_value(layer.W)
          converted_w = convert_kernel(original_w)
          ops.append(tf.assign(layer.W, converted_w).op)
              
    K.get_session().run(ops)
    model.save_weights('CNN5layer_tensorflow.h5')

    export_model(tf.train.Saver(), model, ["conv2d_1_input"], "dense_2/Softmax")
コード例 #20
0
def extract_feat_3_yalers(img_path):
    model =load_model(dataPath+'/f1cn_model.49-1.613893.hdf5')
    layer_1 = K.function([model.layers[0].input], [model.layers[7].output])    
    img = image.load_img(img_path, target_size=(224,224))
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = preprocess_input(img)    
    f1 = layer_1([img])[0]
    feat_5=convert_kernel(f1[0].T)
    feature_crow_5=apply_crow_aggregation(feat_5)
    feature_norm_5=normalize(feature_crow_5)
    return np.sqrt(feature_norm_5)
    
    
    
    
    
    
    
        
コード例 #21
0
def convertThtoTf(srcFileName, dstFileName):
    from keras.utils.conv_utils import convert_kernel
    from shutil import copyfile
    copyfile(srcFileName, dstFileName)
    f = h5py.File(dstFileName, 'r+')
    allKeys = [k for k in f.keys()]
    for layerNumber in range(len(allKeys)):
        if 'conv' in allKeys[layerNumber]:
            subKeys = [k for k in f[allKeys[layerNumber]].keys()]
            for i in range(len(subKeys)):
                original_w = f[allKeys[layerNumber]][subKeys[i]][:]
                if i == 0:
                    abcd = f[allKeys[layerNumber]][subKeys[0]]
                    del f[allKeys[layerNumber]][subKeys[0]]
                    pdb.set_trace()
                    original_w = np.transpose(convert_kernel(original_w),
                                              (2, 3, 1, 0))
                    data = f[allKeys[layerNumber]].create_dataset(
                        subKeys[i], original_w.shape)
                    data = original_w
    f.close()
コード例 #22
0
ファイル: vgg16bn2.py プロジェクト: malik201049/1
    def create(self):
        """
            Creates the VGG16 network achitecture and loads the pretrained weights.

            Args:   None
            Returns:   None
        """
        model = self.model = Sequential()
        model.add(
            Lambda(vgg_preprocess,
                   input_shape=(3, 224, 224),
                   output_shape=(3, 224, 224)))

        self.ConvBlock(2, 64)
        self.ConvBlock(2, 128)
        self.ConvBlock(3, 256)
        self.ConvBlock(3, 512)
        self.ConvBlock(3, 512)

        model.add(Flatten())
        self.FCBlock()
        self.FCBlock()
        model.add(Dense(1000, activation='softmax'))

        fname = 'vgg16.h5'
        model.load_weights(
            get_file(fname, self.FILE_PATH + fname, cache_subdir='models'))

        from keras.utils.conv_utils import convert_kernel
        import tensorflow as tf
        ops = []
        for layer in model.layers:
            if layer.__class__.__name__ in [
                    'Convolution1D', 'Convolution2D', 'Convolution3D',
                    'AtrousConvolution2D', 'Conv1D', 'Conv2D', 'Conv3D'
            ]:
                original_w, original_b = layer.get_weights()
                converted_w = convert_kernel(original_w)
                layer.set_weights([converted_w, original_b])
コード例 #23
0
ファイル: conv_utils_test.py プロジェクト: amaaniqbal/keras
def test_invalid_convert_kernel():
    with pytest.raises(ValueError):
        conv_utils.convert_kernel(np.zeros((10, 20)))
コード例 #24
0
    def extract_feat(self, img_path):
        """
        img = image.load_img(img_path, target_size=(self.input_shape[0], self.input_shape[1]))
        img = image.img_to_array(img)
        """
        img = image.load_img(img_path)
        img = image.img_to_array(img)
        h, w, c = img.shape
        resize_h=h
        resize_w=w
        minlength=min(h,w)
        if minlength>224:
            beta=minlength/224
            resize_h=int(h/beta)
            resize_w=int(w/beta)           
        img=cv2.resize(img,(resize_h,resize_w))

        img = np.expand_dims(img, axis=0)
        img = preprocess_input(img)

        feat_0 = self.model_vgg_block1_conv2.predict(img)
        feat_0=convert_kernel(feat_0[0].T)
        feature_crow_0=apply_crow_aggregation(feat_0)
        feature_norm_0=normalize(feature_crow_0)
        feature_mean_norm_0=normalize(preprocessing.scale(feature_crow_0,axis=0, with_mean=True, with_std=False, copy=True))

        feat_1 = self.model_vgg_block2_conv2.predict(img)
        feat_1=convert_kernel(feat_1[0].T)
        feature_crow_1=apply_crow_aggregation(feat_1)
        feature_norm_1=normalize(feature_crow_1)
        feature_mean_norm_1=normalize(preprocessing.scale(feature_crow_1,axis=0, with_mean=True, with_std=False, copy=True))

        feat_2= self.model_vgg_block3_conv1.predict(img)
        feat_2=convert_kernel(feat_2[0].T)
        feature_crow_2=apply_crow_aggregation(feat_2)
        feature_norm_2=normalize(feature_crow_2)
        feature_mean_norm_2=normalize(preprocessing.scale(feature_crow_2,axis=0, with_mean=True, with_std=False, copy=True))

        feature_448=np.hstack((np.hstack((feature_crow_0.T,feature_crow_1.T)),feature_crow_2.T))
        feature_448_norm=np.hstack((np.hstack((feature_norm_0.T,feature_norm_1.T)),feature_norm_2.T))
        feature_448_mean_norm=np.hstack((np.hstack((feature_mean_norm_0.T,feature_mean_norm_1.T)),feature_mean_norm_2.T))
        
        feat_3 = self.model_vgg_block3_conv3.predict(img)
        feat_3=convert_kernel(feat_3[0].T)
        feature_crow_3=apply_crow_aggregation(feat_3)
        feature_norm_3=normalize(feature_crow_3)
        feature_mean_norm_3=normalize(preprocessing.scale(feature_crow_3,axis=0, with_mean=True, with_std=False, copy=True))

        feat_4 = self.model_vgg_block4_conv3.predict(img)
        feat_4=convert_kernel(feat_4[0].T)
        feature_crow_4=apply_crow_aggregation(feat_4)
        feature_norm_4=normalize(feature_crow_4)
        feature_mean_norm_4=normalize(preprocessing.scale(feature_crow_4,axis=0, with_mean=True, with_std=False, copy=True))

        feat_5= self.model_vgg_block5_conv3.predict(img)
        feat_5=convert_kernel(feat_5[0].T)
        feature_crow_5=apply_crow_aggregation(feat_5)
        feature_norm_5=normalize(feature_crow_5)
        feature_mean_norm_5=normalize(preprocessing.scale(feature_crow_5,axis=0, with_mean=True, with_std=False, copy=True))

        feature_1280=np.hstack((np.hstack((feature_crow_3.T,feature_crow_4.T)),feature_crow_5.T))
        feature_1280_norm=np.hstack((np.hstack((feature_norm_3.T,feature_norm_4.T)),feature_norm_5.T))
        feature_1280_mean_norm=np.hstack((np.hstack((feature_mean_norm_3.T,feature_mean_norm_4.T)),feature_mean_norm_5.T))
        #print(feature_norm.shape)
        #feature,pca_prams=run_feature_processing_pipeline(feature_norm)
        return np.hstack((feature_448.T,feature_1280.T)),np.hstack((feature_448_norm.T,feature_1280_norm.T)),np.hstack((feature_448_mean_norm.T,feature_1280_mean_norm.T))
コード例 #25
0
def preprocess_weights_for_loading(layer,
                                   weights,
                                   original_keras_version=None,
                                   original_backend=None,
                                   reshape=False):
    """Converts layers weights from Keras 1 format to Keras 2.

    # Arguments
        layer: Layer instance.
        weights: List of weights values (Numpy arrays).
        original_keras_version: Keras version for the weights, as a string.
        original_backend: Keras backend the weights were trained with,
            as a string.
        reshape: Reshape weights to fit the layer when the correct number
            of values are present but the shape does not match.

    # Returns
        A list of weights values (Numpy arrays).
    """
    def convert_nested_bidirectional(weights):
        """Converts layers nested in `Bidirectional` wrapper.

        # Arguments
            weights: List of weights values (Numpy arrays).
        # Returns
            A list of weights values (Numpy arrays).
        """
        num_weights_per_layer = len(weights) // 2
        forward_weights = preprocess_weights_for_loading(
            layer.forward_layer, weights[:num_weights_per_layer],
            original_keras_version, original_backend)
        backward_weights = preprocess_weights_for_loading(
            layer.backward_layer, weights[num_weights_per_layer:],
            original_keras_version, original_backend)
        return forward_weights + backward_weights

    def convert_nested_time_distributed(weights):
        """Converts layers nested in `TimeDistributed` wrapper.

        # Arguments
            weights: List of weights values (Numpy arrays).
        # Returns
            A list of weights values (Numpy arrays).
        """
        return preprocess_weights_for_loading(layer.layer, weights,
                                              original_keras_version,
                                              original_backend)

    def convert_nested_model(weights):
        """Converts layers nested in `Model` or `Sequential`.

        # Arguments
            weights: List of weights values (Numpy arrays).
        # Returns
            A list of weights values (Numpy arrays).
        """
        new_weights = []
        # trainable weights
        for sublayer in layer.layers:
            num_weights = len(sublayer.trainable_weights)
            if num_weights > 0:
                new_weights.extend(
                    preprocess_weights_for_loading(
                        layer=sublayer,
                        weights=weights[:num_weights],
                        original_keras_version=original_keras_version,
                        original_backend=original_backend))
                weights = weights[num_weights:]

        # non-trainable weights
        for sublayer in layer.layers:
            num_weights = len([
                l for l in sublayer.weights
                if l not in sublayer.trainable_weights
            ])
            if num_weights > 0:
                new_weights.extend(
                    preprocess_weights_for_loading(
                        layer=sublayer,
                        weights=weights[:num_weights],
                        original_keras_version=original_keras_version,
                        original_backend=original_backend))
                weights = weights[num_weights:]
        return new_weights

    # Convert layers nested in Bidirectional/TimeDistributed/Model/Sequential.
    # Both transformation should be ran for both Keras 1->2 conversion
    # and for conversion of CuDNN layers.
    if layer.__class__.__name__ == 'Bidirectional':
        weights = convert_nested_bidirectional(weights)
    if layer.__class__.__name__ == 'TimeDistributed':
        weights = convert_nested_time_distributed(weights)
    elif layer.__class__.__name__ in ['Model', 'Sequential']:
        weights = convert_nested_model(weights)

    if original_keras_version == '1':
        if layer.__class__.__name__ == 'TimeDistributed':
            weights = preprocess_weights_for_loading(layer.layer, weights,
                                                     original_keras_version,
                                                     original_backend)

        if layer.__class__.__name__ == 'Conv1D':
            shape = weights[0].shape
            # Handle Keras 1.1 format
            if shape[:2] != (layer.kernel_size[0],
                             1) or shape[3] != layer.filters:
                # Legacy shape:
                # (filters, input_dim, filter_length, 1)
                assert (shape[0] == layer.filters
                        and shape[2:] == (layer.kernel_size[0], 1))
                weights[0] = np.transpose(weights[0], (2, 3, 1, 0))
            weights[0] = weights[0][:, 0, :, :]

        if layer.__class__.__name__ == 'Conv2D':
            if layer.data_format == 'channels_first':
                # old: (filters, stack_size, kernel_rows, kernel_cols)
                # new: (kernel_rows, kernel_cols, stack_size, filters)
                weights[0] = np.transpose(weights[0], (2, 3, 1, 0))

        if layer.__class__.__name__ == 'Conv2DTranspose':
            if layer.data_format == 'channels_last':
                # old: (kernel_rows, kernel_cols, stack_size, filters)
                # new: (kernel_rows, kernel_cols, filters, stack_size)
                weights[0] = np.transpose(weights[0], (0, 1, 3, 2))
            if layer.data_format == 'channels_first':
                # old: (filters, stack_size, kernel_rows, kernel_cols)
                # new: (kernel_rows, kernel_cols, filters, stack_size)
                weights[0] = np.transpose(weights[0], (2, 3, 0, 1))

        if layer.__class__.__name__ == 'Conv3D':
            if layer.data_format == 'channels_first':
                # old: (filters, stack_size, ...)
                # new: (..., stack_size, filters)
                weights[0] = np.transpose(weights[0], (2, 3, 4, 1, 0))

        if layer.__class__.__name__ == 'GRU':
            if len(weights) == 9:
                kernel = np.concatenate([weights[0], weights[3], weights[6]],
                                        axis=-1)
                recurrent_kernel = np.concatenate(
                    [weights[1], weights[4], weights[7]], axis=-1)
                bias = np.concatenate([weights[2], weights[5], weights[8]],
                                      axis=-1)
                weights = [kernel, recurrent_kernel, bias]

        if layer.__class__.__name__ == 'LSTM':
            if len(weights) == 12:
                # old: i, c, f, o
                # new: i, f, c, o
                kernel = np.concatenate(
                    [weights[0], weights[6], weights[3], weights[9]], axis=-1)
                recurrent_kernel = np.concatenate(
                    [weights[1], weights[7], weights[4], weights[10]], axis=-1)
                bias = np.concatenate(
                    [weights[2], weights[8], weights[5], weights[11]], axis=-1)
                weights = [kernel, recurrent_kernel, bias]

        if layer.__class__.__name__ == 'ConvLSTM2D':
            if len(weights) == 12:
                kernel = np.concatenate(
                    [weights[0], weights[6], weights[3], weights[9]], axis=-1)
                recurrent_kernel = np.concatenate(
                    [weights[1], weights[7], weights[4], weights[10]], axis=-1)
                bias = np.concatenate(
                    [weights[2], weights[8], weights[5], weights[11]], axis=-1)
                if layer.data_format == 'channels_first':
                    # old: (filters, stack_size, kernel_rows, kernel_cols)
                    # new: (kernel_rows, kernel_cols, stack_size, filters)
                    kernel = np.transpose(kernel, (2, 3, 1, 0))
                    recurrent_kernel = np.transpose(recurrent_kernel,
                                                    (2, 3, 1, 0))
                weights = [kernel, recurrent_kernel, bias]

    conv_layers = [
        'Conv1D', 'Conv2D', 'Conv3D', 'Conv2DTranspose', 'ConvLSTM2D'
    ]
    if layer.__class__.__name__ in conv_layers:
        layer_weights_shape = K.int_shape(layer.weights[0])
        if _need_convert_kernel(original_backend):
            weights[0] = conv_utils.convert_kernel(weights[0])
            if layer.__class__.__name__ == 'ConvLSTM2D':
                weights[1] = conv_utils.convert_kernel(weights[1])
        if reshape and layer_weights_shape != weights[0].shape:
            if weights[0].size != np.prod(layer_weights_shape):
                raise ValueError('Weights must be of equal size to ' +
                                 'apply a reshape operation. ' + 'Layer ' +
                                 layer.name + '\'s weights have shape ' +
                                 str(layer_weights_shape) + ' and size ' +
                                 str(np.prod(layer_weights_shape)) + '. ' +
                                 'The weights for loading have shape ' +
                                 str(weights[0].shape) + ' and size ' +
                                 str(weights[0].size) + '. ')
            weights[0] = np.reshape(weights[0], layer_weights_shape)
        elif layer_weights_shape != weights[0].shape:
            weights[0] = np.transpose(weights[0], (3, 2, 0, 1))
            if layer.__class__.__name__ == 'ConvLSTM2D':
                weights[1] = np.transpose(weights[1], (3, 2, 0, 1))

    # convert CuDNN layers
    weights = _convert_rnn_weights(layer, weights)

    return weights
コード例 #26
0
def create_googlenet(weights_path=None):
    # creates GoogLeNet a.k.a. Inception v1 (Szegedy, 2015)
    input = Input(shape=(3, 224, 224))

    input_pad = ZeroPadding2D(padding=(3, 3))(input)
    conv1_7x7_s2 = Conv2D(64, (7, 7),
                          strides=(2, 2),
                          padding='valid',
                          activation='relu',
                          name='conv1/7x7_s2',
                          kernel_regularizer=l2(0.0002))(input_pad)
    conv1_zero_pad = ZeroPadding2D(padding=(1, 1))(conv1_7x7_s2)
    pool1_helper = PoolHelper()(conv1_zero_pad)
    pool1_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                strides=(2, 2),
                                padding='valid',
                                name='pool1/3x3_s2')(pool1_helper)
    pool1_norm1 = LRN(name='pool1/norm1')(pool1_3x3_s2)

    conv2_3x3_reduce = Conv2D(64, (1, 1),
                              padding='same',
                              activation='relu',
                              name='conv2/3x3_reduce',
                              kernel_regularizer=l2(0.0002))(pool1_norm1)
    conv2_3x3 = Conv2D(192, (3, 3),
                       padding='same',
                       activation='relu',
                       name='conv2/3x3',
                       kernel_regularizer=l2(0.0002))(conv2_3x3_reduce)
    conv2_norm2 = LRN(name='conv2/norm2')(conv2_3x3)
    conv2_zero_pad = ZeroPadding2D(padding=(1, 1))(conv2_norm2)
    pool2_helper = PoolHelper()(conv2_zero_pad)
    pool2_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                strides=(2, 2),
                                padding='valid',
                                name='pool2/3x3_s2')(pool2_helper)

    inception_3a_1x1 = Conv2D(64, (1, 1),
                              padding='same',
                              activation='relu',
                              name='inception_3a/1x1',
                              kernel_regularizer=l2(0.0002))(pool2_3x3_s2)
    inception_3a_3x3_reduce = Conv2D(
        96, (1, 1),
        padding='same',
        activation='relu',
        name='inception_3a/3x3_reduce',
        kernel_regularizer=l2(0.0002))(pool2_3x3_s2)
    inception_3a_3x3_pad = ZeroPadding2D(padding=(1,
                                                  1))(inception_3a_3x3_reduce)
    inception_3a_3x3 = Conv2D(
        128, (3, 3),
        padding='valid',
        activation='relu',
        name='inception_3a/3x3',
        kernel_regularizer=l2(0.0002))(inception_3a_3x3_pad)
    inception_3a_5x5_reduce = Conv2D(
        16, (1, 1),
        padding='same',
        activation='relu',
        name='inception_3a/5x5_reduce',
        kernel_regularizer=l2(0.0002))(pool2_3x3_s2)
    inception_3a_5x5_pad = ZeroPadding2D(padding=(2,
                                                  2))(inception_3a_5x5_reduce)
    inception_3a_5x5 = Conv2D(
        32, (5, 5),
        padding='valid',
        activation='relu',
        name='inception_3a/5x5',
        kernel_regularizer=l2(0.0002))(inception_3a_5x5_pad)
    inception_3a_pool = MaxPooling2D(pool_size=(3, 3),
                                     strides=(1, 1),
                                     padding='same',
                                     name='inception_3a/pool')(pool2_3x3_s2)
    inception_3a_pool_proj = Conv2D(
        32, (1, 1),
        padding='same',
        activation='relu',
        name='inception_3a/pool_proj',
        kernel_regularizer=l2(0.0002))(inception_3a_pool)
    inception_3a_output = Concatenate(axis=1, name='inception_3a/output')([
        inception_3a_1x1, inception_3a_3x3, inception_3a_5x5,
        inception_3a_pool_proj
    ])

    inception_3b_1x1 = Conv2D(
        128, (1, 1),
        padding='same',
        activation='relu',
        name='inception_3b/1x1',
        kernel_regularizer=l2(0.0002))(inception_3a_output)
    inception_3b_3x3_reduce = Conv2D(
        128, (1, 1),
        padding='same',
        activation='relu',
        name='inception_3b/3x3_reduce',
        kernel_regularizer=l2(0.0002))(inception_3a_output)
    inception_3b_3x3_pad = ZeroPadding2D(padding=(1,
                                                  1))(inception_3b_3x3_reduce)
    inception_3b_3x3 = Conv2D(
        192, (3, 3),
        padding='valid',
        activation='relu',
        name='inception_3b/3x3',
        kernel_regularizer=l2(0.0002))(inception_3b_3x3_pad)
    inception_3b_5x5_reduce = Conv2D(
        32, (1, 1),
        padding='same',
        activation='relu',
        name='inception_3b/5x5_reduce',
        kernel_regularizer=l2(0.0002))(inception_3a_output)
    inception_3b_5x5_pad = ZeroPadding2D(padding=(2,
                                                  2))(inception_3b_5x5_reduce)
    inception_3b_5x5 = Conv2D(
        96, (5, 5),
        padding='valid',
        activation='relu',
        name='inception_3b/5x5',
        kernel_regularizer=l2(0.0002))(inception_3b_5x5_pad)
    inception_3b_pool = MaxPooling2D(
        pool_size=(3, 3),
        strides=(1, 1),
        padding='same',
        name='inception_3b/pool')(inception_3a_output)
    inception_3b_pool_proj = Conv2D(
        64, (1, 1),
        padding='same',
        activation='relu',
        name='inception_3b/pool_proj',
        kernel_regularizer=l2(0.0002))(inception_3b_pool)
    inception_3b_output = Concatenate(axis=1, name='inception_3b/output')([
        inception_3b_1x1, inception_3b_3x3, inception_3b_5x5,
        inception_3b_pool_proj
    ])

    inception_3b_output_zero_pad = ZeroPadding2D(
        padding=(1, 1))(inception_3b_output)
    pool3_helper = PoolHelper()(inception_3b_output_zero_pad)
    pool3_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                strides=(2, 2),
                                padding='valid',
                                name='pool3/3x3_s2')(pool3_helper)

    inception_4a_1x1 = Conv2D(192, (1, 1),
                              padding='same',
                              activation='relu',
                              name='inception_4a/1x1',
                              kernel_regularizer=l2(0.0002))(pool3_3x3_s2)
    inception_4a_3x3_reduce = Conv2D(
        96, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4a/3x3_reduce',
        kernel_regularizer=l2(0.0002))(pool3_3x3_s2)
    inception_4a_3x3_pad = ZeroPadding2D(padding=(1,
                                                  1))(inception_4a_3x3_reduce)
    inception_4a_3x3 = Conv2D(
        208, (3, 3),
        padding='valid',
        activation='relu',
        name='inception_4a/3x3',
        kernel_regularizer=l2(0.0002))(inception_4a_3x3_pad)
    inception_4a_5x5_reduce = Conv2D(
        16, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4a/5x5_reduce',
        kernel_regularizer=l2(0.0002))(pool3_3x3_s2)
    inception_4a_5x5_pad = ZeroPadding2D(padding=(2,
                                                  2))(inception_4a_5x5_reduce)
    inception_4a_5x5 = Conv2D(
        48, (5, 5),
        padding='valid',
        activation='relu',
        name='inception_4a/5x5',
        kernel_regularizer=l2(0.0002))(inception_4a_5x5_pad)
    inception_4a_pool = MaxPooling2D(pool_size=(3, 3),
                                     strides=(1, 1),
                                     padding='same',
                                     name='inception_4a/pool')(pool3_3x3_s2)
    inception_4a_pool_proj = Conv2D(
        64, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4a/pool_proj',
        kernel_regularizer=l2(0.0002))(inception_4a_pool)
    inception_4a_output = Concatenate(axis=1, name='inception_4a/output')([
        inception_4a_1x1, inception_4a_3x3, inception_4a_5x5,
        inception_4a_pool_proj
    ])

    loss1_ave_pool = AveragePooling2D(
        pool_size=(5, 5), strides=(3, 3),
        name='loss1/ave_pool')(inception_4a_output)
    loss1_conv = Conv2D(128, (1, 1),
                        padding='same',
                        activation='relu',
                        name='loss1/conv',
                        kernel_regularizer=l2(0.0002))(loss1_ave_pool)
    loss1_flat = Flatten()(loss1_conv)
    loss1_fc = Dense(1024,
                     activation='relu',
                     name='loss1/fc',
                     kernel_regularizer=l2(0.0002))(loss1_flat)
    loss1_drop_fc = Dropout(rate=0.7)(loss1_fc)
    loss1_classifier = Dense(1000,
                             name='loss1/classifier',
                             kernel_regularizer=l2(0.0002))(loss1_drop_fc)
    loss1_classifier_act = Activation('softmax')(loss1_classifier)

    inception_4b_1x1 = Conv2D(
        160, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4b/1x1',
        kernel_regularizer=l2(0.0002))(inception_4a_output)
    inception_4b_3x3_reduce = Conv2D(
        112, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4b/3x3_reduce',
        kernel_regularizer=l2(0.0002))(inception_4a_output)
    inception_4b_3x3_pad = ZeroPadding2D(padding=(1,
                                                  1))(inception_4b_3x3_reduce)
    inception_4b_3x3 = Conv2D(
        224, (3, 3),
        padding='valid',
        activation='relu',
        name='inception_4b/3x3',
        kernel_regularizer=l2(0.0002))(inception_4b_3x3_pad)
    inception_4b_5x5_reduce = Conv2D(
        24, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4b/5x5_reduce',
        kernel_regularizer=l2(0.0002))(inception_4a_output)
    inception_4b_5x5_pad = ZeroPadding2D(padding=(2,
                                                  2))(inception_4b_5x5_reduce)
    inception_4b_5x5 = Conv2D(
        64, (5, 5),
        padding='valid',
        activation='relu',
        name='inception_4b/5x5',
        kernel_regularizer=l2(0.0002))(inception_4b_5x5_pad)
    inception_4b_pool = MaxPooling2D(
        pool_size=(3, 3),
        strides=(1, 1),
        padding='same',
        name='inception_4b/pool')(inception_4a_output)
    inception_4b_pool_proj = Conv2D(
        64, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4b/pool_proj',
        kernel_regularizer=l2(0.0002))(inception_4b_pool)
    inception_4b_output = Concatenate(axis=1, name='inception_4b/output')([
        inception_4b_1x1, inception_4b_3x3, inception_4b_5x5,
        inception_4b_pool_proj
    ])

    inception_4c_1x1 = Conv2D(
        128, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4c/1x1',
        kernel_regularizer=l2(0.0002))(inception_4b_output)
    inception_4c_3x3_reduce = Conv2D(
        128, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4c/3x3_reduce',
        kernel_regularizer=l2(0.0002))(inception_4b_output)
    inception_4c_3x3_pad = ZeroPadding2D(padding=(1,
                                                  1))(inception_4c_3x3_reduce)
    inception_4c_3x3 = Conv2D(
        256, (3, 3),
        padding='valid',
        activation='relu',
        name='inception_4c/3x3',
        kernel_regularizer=l2(0.0002))(inception_4c_3x3_pad)
    inception_4c_5x5_reduce = Conv2D(
        24, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4c/5x5_reduce',
        kernel_regularizer=l2(0.0002))(inception_4b_output)
    inception_4c_5x5_pad = ZeroPadding2D(padding=(2,
                                                  2))(inception_4c_5x5_reduce)
    inception_4c_5x5 = Conv2D(
        64, (5, 5),
        padding='valid',
        activation='relu',
        name='inception_4c/5x5',
        kernel_regularizer=l2(0.0002))(inception_4c_5x5_pad)
    inception_4c_pool = MaxPooling2D(
        pool_size=(3, 3),
        strides=(1, 1),
        padding='same',
        name='inception_4c/pool')(inception_4b_output)
    inception_4c_pool_proj = Conv2D(
        64, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4c/pool_proj',
        kernel_regularizer=l2(0.0002))(inception_4c_pool)
    inception_4c_output = Concatenate(axis=1, name='inception_4c/output')([
        inception_4c_1x1, inception_4c_3x3, inception_4c_5x5,
        inception_4c_pool_proj
    ])

    inception_4d_1x1 = Conv2D(
        112, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4d/1x1',
        kernel_regularizer=l2(0.0002))(inception_4c_output)
    inception_4d_3x3_reduce = Conv2D(
        144, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4d/3x3_reduce',
        kernel_regularizer=l2(0.0002))(inception_4c_output)
    inception_4d_3x3_pad = ZeroPadding2D(padding=(1,
                                                  1))(inception_4d_3x3_reduce)
    inception_4d_3x3 = Conv2D(
        288, (3, 3),
        padding='valid',
        activation='relu',
        name='inception_4d/3x3',
        kernel_regularizer=l2(0.0002))(inception_4d_3x3_pad)
    inception_4d_5x5_reduce = Conv2D(
        32, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4d/5x5_reduce',
        kernel_regularizer=l2(0.0002))(inception_4c_output)
    inception_4d_5x5_pad = ZeroPadding2D(padding=(2,
                                                  2))(inception_4d_5x5_reduce)
    inception_4d_5x5 = Conv2D(
        64, (5, 5),
        padding='valid',
        activation='relu',
        name='inception_4d/5x5',
        kernel_regularizer=l2(0.0002))(inception_4d_5x5_pad)
    inception_4d_pool = MaxPooling2D(
        pool_size=(3, 3),
        strides=(1, 1),
        padding='same',
        name='inception_4d/pool')(inception_4c_output)
    inception_4d_pool_proj = Conv2D(
        64, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4d/pool_proj',
        kernel_regularizer=l2(0.0002))(inception_4d_pool)
    inception_4d_output = Concatenate(axis=1, name='inception_4d/output')([
        inception_4d_1x1, inception_4d_3x3, inception_4d_5x5,
        inception_4d_pool_proj
    ])

    loss2_ave_pool = AveragePooling2D(
        pool_size=(5, 5), strides=(3, 3),
        name='loss2/ave_pool')(inception_4d_output)
    loss2_conv = Conv2D(128, (1, 1),
                        padding='same',
                        activation='relu',
                        name='loss2/conv',
                        kernel_regularizer=l2(0.0002))(loss2_ave_pool)
    loss2_flat = Flatten()(loss2_conv)
    loss2_fc = Dense(1024,
                     activation='relu',
                     name='loss2/fc',
                     kernel_regularizer=l2(0.0002))(loss2_flat)
    loss2_drop_fc = Dropout(rate=0.7)(loss2_fc)
    loss2_classifier = Dense(1000,
                             name='loss2/classifier',
                             kernel_regularizer=l2(0.0002))(loss2_drop_fc)
    loss2_classifier_act = Activation('softmax')(loss2_classifier)

    inception_4e_1x1 = Conv2D(
        256, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4e/1x1',
        kernel_regularizer=l2(0.0002))(inception_4d_output)
    inception_4e_3x3_reduce = Conv2D(
        160, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4e/3x3_reduce',
        kernel_regularizer=l2(0.0002))(inception_4d_output)
    inception_4e_3x3_pad = ZeroPadding2D(padding=(1,
                                                  1))(inception_4e_3x3_reduce)
    inception_4e_3x3 = Conv2D(
        320, (3, 3),
        padding='valid',
        activation='relu',
        name='inception_4e/3x3',
        kernel_regularizer=l2(0.0002))(inception_4e_3x3_pad)
    inception_4e_5x5_reduce = Conv2D(
        32, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4e/5x5_reduce',
        kernel_regularizer=l2(0.0002))(inception_4d_output)
    inception_4e_5x5_pad = ZeroPadding2D(padding=(2,
                                                  2))(inception_4e_5x5_reduce)
    inception_4e_5x5 = Conv2D(
        128, (5, 5),
        padding='valid',
        activation='relu',
        name='inception_4e/5x5',
        kernel_regularizer=l2(0.0002))(inception_4e_5x5_pad)
    inception_4e_pool = MaxPooling2D(
        pool_size=(3, 3),
        strides=(1, 1),
        padding='same',
        name='inception_4e/pool')(inception_4d_output)
    inception_4e_pool_proj = Conv2D(
        128, (1, 1),
        padding='same',
        activation='relu',
        name='inception_4e/pool_proj',
        kernel_regularizer=l2(0.0002))(inception_4e_pool)
    inception_4e_output = Concatenate(axis=1, name='inception_4e/output')([
        inception_4e_1x1, inception_4e_3x3, inception_4e_5x5,
        inception_4e_pool_proj
    ])

    inception_4e_output_zero_pad = ZeroPadding2D(
        padding=(1, 1))(inception_4e_output)
    pool4_helper = PoolHelper()(inception_4e_output_zero_pad)
    pool4_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                strides=(2, 2),
                                padding='valid',
                                name='pool4/3x3_s2')(pool4_helper)

    inception_5a_1x1 = Conv2D(256, (1, 1),
                              padding='same',
                              activation='relu',
                              name='inception_5a/1x1',
                              kernel_regularizer=l2(0.0002))(pool4_3x3_s2)
    inception_5a_3x3_reduce = Conv2D(
        160, (1, 1),
        padding='same',
        activation='relu',
        name='inception_5a/3x3_reduce',
        kernel_regularizer=l2(0.0002))(pool4_3x3_s2)
    inception_5a_3x3_pad = ZeroPadding2D(padding=(1,
                                                  1))(inception_5a_3x3_reduce)
    inception_5a_3x3 = Conv2D(
        320, (3, 3),
        padding='valid',
        activation='relu',
        name='inception_5a/3x3',
        kernel_regularizer=l2(0.0002))(inception_5a_3x3_pad)
    inception_5a_5x5_reduce = Conv2D(
        32, (1, 1),
        padding='same',
        activation='relu',
        name='inception_5a/5x5_reduce',
        kernel_regularizer=l2(0.0002))(pool4_3x3_s2)
    inception_5a_5x5_pad = ZeroPadding2D(padding=(2,
                                                  2))(inception_5a_5x5_reduce)
    inception_5a_5x5 = Conv2D(
        128, (5, 5),
        padding='valid',
        activation='relu',
        name='inception_5a/5x5',
        kernel_regularizer=l2(0.0002))(inception_5a_5x5_pad)
    inception_5a_pool = MaxPooling2D(pool_size=(3, 3),
                                     strides=(1, 1),
                                     padding='same',
                                     name='inception_5a/pool')(pool4_3x3_s2)
    inception_5a_pool_proj = Conv2D(
        128, (1, 1),
        padding='same',
        activation='relu',
        name='inception_5a/pool_proj',
        kernel_regularizer=l2(0.0002))(inception_5a_pool)
    inception_5a_output = Concatenate(axis=1, name='inception_5a/output')([
        inception_5a_1x1, inception_5a_3x3, inception_5a_5x5,
        inception_5a_pool_proj
    ])

    inception_5b_1x1 = Conv2D(
        384, (1, 1),
        padding='same',
        activation='relu',
        name='inception_5b/1x1',
        kernel_regularizer=l2(0.0002))(inception_5a_output)
    inception_5b_3x3_reduce = Conv2D(
        192, (1, 1),
        padding='same',
        activation='relu',
        name='inception_5b/3x3_reduce',
        kernel_regularizer=l2(0.0002))(inception_5a_output)
    inception_5b_3x3_pad = ZeroPadding2D(padding=(1,
                                                  1))(inception_5b_3x3_reduce)
    inception_5b_3x3 = Conv2D(
        384, (3, 3),
        padding='valid',
        activation='relu',
        name='inception_5b/3x3',
        kernel_regularizer=l2(0.0002))(inception_5b_3x3_pad)
    inception_5b_5x5_reduce = Conv2D(
        48, (1, 1),
        padding='same',
        activation='relu',
        name='inception_5b/5x5_reduce',
        kernel_regularizer=l2(0.0002))(inception_5a_output)
    inception_5b_5x5_pad = ZeroPadding2D(padding=(2,
                                                  2))(inception_5b_5x5_reduce)
    inception_5b_5x5 = Conv2D(
        128, (5, 5),
        padding='valid',
        activation='relu',
        name='inception_5b/5x5',
        kernel_regularizer=l2(0.0002))(inception_5b_5x5_pad)
    inception_5b_pool = MaxPooling2D(
        pool_size=(3, 3),
        strides=(1, 1),
        padding='same',
        name='inception_5b/pool')(inception_5a_output)
    inception_5b_pool_proj = Conv2D(
        128, (1, 1),
        padding='same',
        activation='relu',
        name='inception_5b/pool_proj',
        kernel_regularizer=l2(0.0002))(inception_5b_pool)
    inception_5b_output = Concatenate(axis=1, name='inception_5b/output')([
        inception_5b_1x1, inception_5b_3x3, inception_5b_5x5,
        inception_5b_pool_proj
    ])

    pool5_7x7_s1 = AveragePooling2D(pool_size=(7, 7),
                                    strides=(1, 1),
                                    name='pool5/7x7_s2')(inception_5b_output)
    loss3_flat = Flatten()(pool5_7x7_s1)
    pool5_drop_7x7_s1 = Dropout(rate=0.4)(loss3_flat)
    loss3_classifier = Dense(1000,
                             name='loss3/classifier',
                             kernel_regularizer=l2(0.0002))(pool5_drop_7x7_s1)
    loss3_classifier_act = Activation('softmax', name='prob')(loss3_classifier)

    googlenet = Model(inputs=input,
                      outputs=[
                          loss1_classifier_act, loss2_classifier_act,
                          loss3_classifier_act
                      ])

    if weights_path:
        googlenet.load_weights(weights_path)

    if keras.backend.backend() == 'tensorflow':
        # convert the convolutional kernels for tensorflow
        ops = []
        for layer in googlenet.layers:
            if layer.__class__.__name__ == 'Conv2D':
                original_w = K.get_value(layer.kernel)
                converted_w = convert_kernel(original_w)
                ops.append(tf.assign(layer.kernel, converted_w).op)
        K.get_session().run(ops)

    return googlenet
コード例 #27
0
# =============================================================================
from keras.utils.conv_utils import convert_kernel
import tensorflow as tf

model.load_weights(
    'C:/Users/aksmenon/Desktop/distracted drivers/Distracted Drivers/vgg16_weights_th_dim_ordering_th_kernels.h5'
)

ops = []
for layer in model.layers:
    if layer.__class__.__name__ in [
            'Convolution1D', 'Convolution2D', 'Convolution3D',
            'AtrousConvolution2D'
    ]:
        original_w = K.get_value(layer.W)
        converted_w = convert_kernel(original_w)
        ops.append(tf.assign(layer.W, converted_w).op)

K.get_session().run(ops)
model.save_weights(
    'C:/Users/aksmenon/Desktop/distracted drivers/Distracted Drivers/my_weights_tensorflow.h5'
)

# building a classifier model on top of the convolutional model:

top_model = Sequential()

top_model.add(Flatten(input_shape=(1000, 1)))
# =============================================================================
# top_model.add(Dense(64, activation='relu', W_regularizer=EigenvalueRegularizer(10)))
# top_model.add(Dense(10, activation='softmax', W_regularizer=EigenvalueRegularizer(10)))
コード例 #28
0
def test_invalid_convert_kernel():
    with pytest.raises(ValueError):
        conv_utils.convert_kernel(np.zeros((10, 20)))
コード例 #29
0
ファイル: Net2Net.py プロジェクト: vipmath/NetworkCompress
    def _wider_conv2d_weight(teacher_w1,
                             teacher_b1,
                             teacher_w2,
                             new_width,
                             init,
                             help=None):

        if K.image_data_format() == "channels_last":
            _teacher_w1 = convert_kernel(teacher_w1)
            _teacher_w2 = convert_kernel(teacher_w2)
        else:
            _teacher_w1 = teacher_w1
            _teacher_w2 = teacher_w2

        _teacher_b1 = teacher_b1
        assert _teacher_w1.shape[3] == _teacher_w2.shape[2], (
            'successive layers from teacher model should have compatible shapes '
            + ' all shape is {} {} {}'.format(
                _teacher_w1.shape, _teacher_w2.shape, _teacher_b1.shape))
        assert _teacher_w1.shape[3] == _teacher_b1.shape[0], (
            'weight and bias from same layer should have compatible shapes')
        assert new_width > _teacher_w1.shape[3], (
            'new width (filters) should be bigger than the existing one')

        n = new_width - _teacher_w1.shape[3]
        if init == 'random-pad':
            new_w1 = np.random.normal(0,
                                      0.1,
                                      size=_teacher_w1.shape[:-1] + (n, ))
            new_b1 = np.ones(n) * 0.1
            new_w2 = np.random.normal(0,
                                      0.1,
                                      size=_teacher_w2.shape[:2] +
                                      (n, _teacher_w2.shape[3]))
        elif init == 'net2wider':
            index = np.random.randint(_teacher_w1.shape[3], size=n)
            factors = np.bincount(index)[index] + 1.
            new_w1 = _teacher_w1[:, :, :, index]
            new_b1 = _teacher_b1[index]
            new_w2 = _teacher_w2[:, :, index, :] / factors.reshape(
                (1, 1, -1, 1))
        else:
            raise ValueError('Unsupported weight initializer: %s' % init)

        student_w1 = np.concatenate((_teacher_w1, new_w1), axis=3)
        if init == 'random-pad':
            student_w2 = np.concatenate((_teacher_w2, new_w2), axis=2)
        elif init == 'net2wider':
            # add small noise to break symmetry, so that student model will have
            # full capacity later
            noise = np.random.normal(0, 5e-2 * new_w2.std(), size=new_w2.shape)
            student_w2 = np.concatenate((_teacher_w2, new_w2 + noise), axis=2)
            student_w2[:, :, index, :] = new_w2
        student_b1 = np.concatenate((_teacher_b1, new_b1), axis=0)

        if K.image_data_format() == "channels_last":
            student_w1 = convert_kernel(student_w1)
            # student_b1=convert_kernel(student_b1)
            student_w2 = convert_kernel(student_w2)

        return student_w1, student_b1, student_w2