Beispiel #1
0
def initialize_weights():
    weight_reader = WeightReader(wt_path)

    weight_reader.reset()
    nb_conv = 23

    for i in range(1, nb_conv + 1):
        conv_layer = model.get_layer('conv_' + str(i))

        if i < nb_conv:
            norm_layer = model.get_layer('norm_' + str(i))

            size = np.prod(norm_layer.get_weights()[0].shape)

            beta = weight_reader.read_bytes(size)
            gamma = weight_reader.read_bytes(size)
            mean = weight_reader.read_bytes(size)
            var = weight_reader.read_bytes(size)

            weights = norm_layer.set_weights([gamma, beta, mean, var])

        if len(conv_layer.get_weights()) > 1:
            bias = weight_reader.read_bytes(
                np.prod(conv_layer.get_weights()[1].shape))
            kernel = weight_reader.read_bytes(
                np.prod(conv_layer.get_weights()[0].shape))
            kernel = kernel.reshape(
                list(reversed(conv_layer.get_weights()[0].shape)))
            kernel = kernel.transpose([2, 3, 1, 0])
            conv_layer.set_weights([kernel, bias])
        else:
            kernel = weight_reader.read_bytes(
                np.prod(conv_layer.get_weights()[0].shape))
            kernel = kernel.reshape(
                list(reversed(conv_layer.get_weights()[0].shape)))
            kernel = kernel.transpose([2, 3, 1, 0])
            conv_layer.set_weights([kernel])

    layer = model.layers[-4]  # the last convolutional layer
    weights = layer.get_weights()

    new_kernel = np.random.normal(size=weights[0].shape) / (GRID_H * GRID_W)
    new_bias = np.random.normal(size=weights[1].shape) / (GRID_H * GRID_W)

    layer.set_weights([new_kernel, new_bias])
def load_weights():
    """
        This method loads the pretrained weights from YOLO
    """
    weight_reader = WeightReader(WEIGHT_PATH)
    weight_reader.reset()
    nb_conv = 23
    for i in range(1, nb_conv + 1):
        conv_layer = model.get_layer('conv_' + str(i))
        if i < nb_conv:
            norm_layer = model.get_layer('norm_' + str(i))
            size = np.prod(norm_layer.get_weights()[0].shape)
            beta = weight_reader.read_bytes(size)
            gamma = weight_reader.read_bytes(size)
            mean = weight_reader.read_bytes(size)
            var = weight_reader.read_bytes(size)
            weights = norm_layer.set_weights([gamma, beta, mean, var])

        if len(conv_layer.get_weights()) > 1:
            bias = weight_reader.read_bytes(
                np.prod(conv_layer.get_weights()[1].shape))
            kernel = weight_reader.read_bytes(
                np.prod(conv_layer.get_weights()[0].shape))
            kernel = kernel.reshape(
                list(reversed(conv_layer.get_weights()[0].shape)))
            kernel = kernel.transpose([2, 3, 1, 0])
            conv_layer.set_weights([kernel, bias])
        else:
            kernel = weight_reader.read_bytes(
                np.prod(conv_layer.get_weights()[0].shape))
            kernel = kernel.reshape(
                list(reversed(conv_layer.get_weights()[0].shape)))
            kernel = kernel.transpose([2, 3, 1, 0])
            conv_layer.set_weights([kernel])
    # Randomize the weights of the last layer
    # We would only be training the last layer again
    layer = model.layers[-4]
    weights = layer.get_weights()
    new_kernel = np.random.normal(size=weights[0].shape) / (GRID_H * GRID_W)
    new_bias = np.random.normal(size=weights[1].shape) / (GRID_H * GRID_W)
    layer.set_weights([new_kernel, new_bias])
Beispiel #3
0
           strides=(1, 1),
           padding='same',
           name='conv_23')(x)
output = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(x)

# small hack to allow true_boxes to be registered when Keras build the model
# for more information: https://github.com/fchollet/keras/issues/2790
output = Lambda(lambda args: args[0])([output, true_boxes])

model = Model([input_image, true_boxes], output)

model.summary()

weight_reader = WeightReader(wt_path)

weight_reader.reset()
nb_conv = 23

for i in range(1, nb_conv + 1):
    conv_layer = model.get_layer('conv_' + str(i))

    if i < nb_conv:
        norm_layer = model.get_layer('norm_' + str(i))

        size = np.prod(norm_layer.get_weights()[0].shape)

        beta = weight_reader.read_bytes(size)
        gamma = weight_reader.read_bytes(size)
        mean = weight_reader.read_bytes(size)
        var = weight_reader.read_bytes(size)
Beispiel #4
0
# for more information: https://github.com/fchollet/keras/issues/2790
# output = Lambda(lambda args: args[0])([output, true_boxes])

model = Model([input_image, true_boxes], output)

model.summary()

yolo = Model([input_image, true_boxes], output)
'''Load pre-trained weights'''
# Load pretrained weights

# **Load the weights originally provided by YOLO**
print("**Load the weights originally provided by YOLO**")
weight_reader = WeightReader(wt_path)

weight_reader.reset()  # don't worry! it doesn't delete the weights.
nb_conv = 23

for i in range(1, nb_conv + 1):
    conv_layer = model.get_layer('conv_' + str(i))

    if i < nb_conv:
        norm_layer = model.get_layer('norm_' + str(i))

        size = np.prod(norm_layer.get_weights()[0].shape)

        beta = weight_reader.read_bytes(size)
        gamma = weight_reader.read_bytes(size)
        mean = weight_reader.read_bytes(size)
        var = weight_reader.read_bytes(size)