예제 #1
0
def raw_model(n_classes):
    model_dir = './models'
    model_weight_filename = os.path.join(model_dir, 'sports1M_weights_tf.h5')
    model_json_filename = os.path.join(model_dir, 'sports1M_weights_tf.json')

    print("[Info] Reading model architecture...")
    # model = model_from_json(open(model_json_filename, 'r').read())
    model = c3d_model.get_model(backend=backend)

    print("[Info] Loading model weights...")
    model.load_weights(model_weight_filename)
    model.add(Dense(n_classes, activation='softmax'))

    print("[Info] Loading model weights -- DONE!")
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    return model
예제 #2
0
def main():

    #dim_ordering = 'th'
    #dim_ordering = 'th'
    import keras.backend as K
    dim_ordering = K.image_dim_ordering()
    print("[Info] image_dim_order (from default ~/.keras/keras.json)={}",
          dim_ordering)

    # get C3D model placeholder
    model = c3d_model.get_model(summary=True, backend=dim_ordering)

    # input caffe model
    caffe_model_filename = './models/conv3d_deepnetA_sport1m_iter_1900000'

    # output dir/files
    model_dir = './models'
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    output_model_filename = os.path.join(
        model_dir, 'sports1M_weights_{}.h5'.format(dim_ordering))
    output_json_filename = os.path.join(
        model_dir, 'sports1M_weights_{}.json'.format(dim_ordering))

    # read caffe model
    print("-" * 19)
    print("Reading model file={}...", caffe_model_filename)
    p = caffe.NetParameter()
    p.ParseFromString(open(caffe_model_filename, 'rb').read())

    params = []
    print("-" * 19)
    print("Converting model...")

    # read every conv/fc layer and append to "params" list
    for i in range(len(p.layers)):
        layer = p.layers[i]
        # skip non-conv/fc layers
        if 'conv' not in layer.name and 'fc' not in layer.name:
            continue
        print("[Info] Massaging \"{}\" layer...", layer.name)
        weights_b = np.array(layer.blobs[1].data, dtype=np.float32)
        weights_p = np.array(layer.blobs[0].data, dtype=np.float32).reshape(
            layer.blobs[0].num,
            layer.blobs[0].channels,
            layer.blobs[0].length,
            layer.blobs[0].height,
            layer.blobs[0].width,
        )
        if 'conv' in layer.name:
            # theano vs tensorflow: https://github.com/fchollet/keras/blob/master/keras/utils/np_utils.py#L90-L115
            if dim_ordering == 'th':
                weights_p = reindex(weights_p)
            else:
                weights_p = np.transpose(weights_p, (2, 3, 4, 1, 0))
        elif 'fc' in layer.name:
            weights_p = weights_p[0, 0, 0, :, :].T
            if 'fc6' in layer.name:
                print(
                    "[Info] First FC layer after flattening layer needs special care..."
                )
                weights_p = convert_dense(weights_p)
        params.append([weights_p, weights_b])

    valid_layer_count = 0
    for layer_indx in range(len(model.layers)):
        layer_name = model.layers[layer_indx].name
        if 'conv' in layer_name or 'fc' in layer_name:
            print("[Info] Transplanting \"{}\" layer...", layer_name)
            model.layers[layer_indx].set_weights(params[valid_layer_count])
            valid_layer_count += 1

    print("-" * 19)
    print("Saving pre-trained model weights as {}...", output_model_filename)
    model.save_weights(output_model_filename, overwrite=True)
    json_string = model.to_json()
    with open(output_json_filename, 'w') as f:
        f.write(json_string)
    print("-" * 39)
    print("Conversion done!")
    print("-" * 39)
예제 #3
0
import c3d_model
import numpy as np
from keras.layers import Input, Dense
from keras.models import Sequential, Model

c3d_model = c3d_model.get_model(nb_classes=22)

weights_path = '/media/lq/C13E-1ED0/dataset/UCF_Crimes/results/gan_1/weights/c3d_TC_GAN_22_outputs_it1000.hdf5'
save_path = weights_path.replace('_22_outputs_', '_21_outputs_')
n_classes =21

c3d_model.load_weights(weights_path)

w = c3d_model.get_layer('fc8').get_weights()
print(w)
w[0] = np.delete(w[0], np.s_[-1], axis=1)
w[1] = np.delete(w[1], np.s_[-1])

#Deleting the old output layer
c3d_model.layers.pop()
last_layer = c3d_model.get_layer('dropout_2').output
#New output layer
out = Dense(n_classes, activation='softmax', name='fc8')(last_layer)
c3d_model = Model(inputs=c3d_model.input, outputs=out)
c3d_model.get_layer('fc8').set_weights(w)
print(c3d_model.get_layer('fc8').get_weights())
c3d_model.summary()

c3d_model.save_weights(save_path)
예제 #4
0
def main():
    # dim_ordering = 'th'
    # dim_ordering = 'th'
    import tensorflow.keras.backend as K
    dim_ordering = "tf"
    print(
        "[Info] image_dim_order (from default ~/.keras/keras.json)={}".format(
            dim_ordering))

    # get C3D model placeholder
    print(c3d_model.get_model.__code__.co_varnames)
    gpus = tf.config.list_physical_devices('GPU')
    if gpus:
        try:
            # Currently, memory growth needs to be the same across GPUs
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
            logical_gpus = tf.config.experimental.list_logical_devices('GPU')
            print(len(gpus), "Physical GPUs,", len(logical_gpus),
                  "Logical GPUs")
        except RuntimeError as e:
            # Memory growth must be set before GPUs have been initialized
            print(e)
    print("test1")

    model = c3d_model.get_model(summary=True, backend=dim_ordering)
    print("test2")
    # input caffe model
    caffe_model_filename = './models/conv3d_deepnetA_sport1m_iter_1900000'

    # output dir/files
    model_dir = './models'
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    output_model_filename = os.path.join(
        model_dir, 'sports1M_weights_{}.h5'.format(dim_ordering))
    output_json_filename = os.path.join(
        model_dir, 'sports1M_weights_{}.json'.format(dim_ordering))

    # read caffe model
    print("-" * 19)
    print("Reading model file={}...".format(caffe_model_filename))
    p = caffe.NetParameter()
    p.ParseFromString(open(caffe_model_filename, 'rb').read())

    params = []
    print("-" * 19)
    print("Converting model...")

    # read every conv/fc layer and append to "params" list
    for i in range(len(p.layers)):
        layer = p.layers[i]
        # skip non-conv/fc layers
        if 'conv' not in layer.name and 'fc' not in layer.name:
            continue
        print("[Info] Massaging \"{}\" layer...".format(layer.name))
        weights_b = np.array(layer.blobs[1].data, dtype=np.float32)
        weights_p = np.array(layer.blobs[0].data, dtype=np.float32).reshape(
            layer.blobs[0].num,
            layer.blobs[0].channels,
            layer.blobs[0].length,
            layer.blobs[0].height,
            layer.blobs[0].width,
        )
        if 'conv' in layer.name:
            # theano vs tensorflow: https://github.com/fchollet/keras/blob/master/keras/utils/np_utils.py#L90-L115
            if dim_ordering == 'th':
                weights_p = reindex(weights_p)
            else:
                weights_p = np.transpose(weights_p, (2, 3, 4, 1, 0))
        elif 'fc' in layer.name:
            weights_p = weights_p[0, 0, 0, :, :].T
            if 'fc6' in layer.name:
                print("[Info] First FC layer after flattening layer needs "
                      "special care...")
                weights_p = convert_dense(weights_p)
        params.append([weights_p, weights_b])

    valid_layer_count = 0
    for layer_indx in range(len(model.layers)):
        layer_name = model.layers[layer_indx].name
        if 'conv' in layer_name or 'fc' in layer_name:
            print("[Info] Transplanting \"{}\" layer...".format(layer_name))
            model.layers[layer_indx].set_weights(params[valid_layer_count])
            valid_layer_count += 1

    print("-" * 19)
    print("Saving pre-trained model weights as {}...".format(
        output_model_filename))
    model.save_weights(output_model_filename, overwrite=True)
    json_string = model.to_json()
    with open(output_json_filename, 'w') as f:
        f.write(json_string)
    print("-" * 39)
    print("Conversion done!")
    print("-" * 39)