Пример #1
0
        model = Model(inputs=inputs, outputs=output)
        vgg16 = VGG16(weights="imagenet",
                      include_top=False,
                      input_shape=(224, 224, 3))

        self.model = model

        if print_summary:
            print(self.model.summary())

        for layer in self.model.layers:
            if layer.name.startswith('conv'):
                block = layer.name[4:].split('-')[0]
                depth = layer.name[4:].split('-')[1]
                # apply vgg16 weights without bias
                layer.set_weights([
                    vgg16.get_layer('block{}_conv{}'.format(
                        block, depth)).get_weights()[0]
                ])

        self.model.compile(optimizer='adam',
                           loss='categorical_crossentropy',
                           metrics=['accuracy', 'mse'])


deconvNet = DeconvNet()
model = deconvNet.model

generate_json(model, "DeconvNet")
Пример #2
0
        x = _conv(filters=n_labels,
                  kernel_size=(1, 1),
                  padding='same',
                  block='out_bilinear_%s' % output_stride)(x)
        out = BilinearUpSampling2D((n_labels, input_shape[0], input_shape[1]),
                                   factor=output_stride)(x)

    elif upsample_type == 'deconv':
        out = Conv2DTranspose(filters=n_labels,
                              kernel_size=(output_stride * 2,
                                           output_stride * 2),
                              strides=(output_stride, output_stride),
                              padding='same',
                              kernel_initializer='he_normal',
                              kernel_regularizer=None,
                              use_bias=False,
                              name='upscore_{}'.format('out'))(x)

    out = Reshape((input_shape[0] * input_shape[1], n_labels),
                  input_shape=(input_shape[0], input_shape[1], n_labels))(out)
    # default "softmax"
    out = Activation(output_mode)(out)

    model = Model(inputs=img_input, outputs=out)

    return model


model = PSPNet50()
generate_json(model, "PSPNet")
Пример #3
0
    unpool_4 = MaxUnpooling2D(pool_size)([conv_22, mask_2])

    conv_23 = Convolution2D(128, (kernel, kernel), padding="same")(unpool_4)
    conv_23 = BatchNormalization()(conv_23)
    conv_23 = Activation("relu")(conv_23)
    conv_24 = Convolution2D(64, (kernel, kernel), padding="same")(conv_23)
    conv_24 = BatchNormalization()(conv_24)
    conv_24 = Activation("relu")(conv_24)

    unpool_5 = MaxUnpooling2D(pool_size)([conv_24, mask_1])

    conv_25 = Convolution2D(64, (kernel, kernel), padding="same")(unpool_5)
    conv_25 = BatchNormalization()(conv_25)
    conv_25 = Activation("relu")(conv_25)

    conv_26 = Convolution2D(n_labels, (1, 1), padding="valid")(conv_25)
    conv_26 = BatchNormalization()(conv_26)
    conv_26 = Reshape(
            (input_shape[0]*input_shape[1], n_labels),
            input_shape=(input_shape[0], input_shape[1], n_labels))(conv_26)

    outputs = Activation(output_mode)(conv_26)
    print("Build decoder done..")

    model = Model(inputs=inputs, outputs=outputs, name="SegNet")

    return model

model = segnet()
generate_json(model, "SegNet")
Пример #4
0
    model = Model(inputs, x, name='deeplabv3plus')

    # load weights

    if weights == 'pascal_voc':
        if backbone == 'xception':
            weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels.h5',
                                    WEIGHTS_PATH_X,
                                    cache_subdir='models')
        else:
            weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5',
                                    WEIGHTS_PATH_MOBILE,
                                    cache_subdir='models')
        model.load_weights(weights_path, by_name=True)
    return model


def preprocess_input(x):
    """Preprocesses a numpy array encoding a batch of images.
    # Arguments
        x: a 4D numpy array consists of RGB values within [0, 255].
    # Returns
        Input array scaled to [-1.,1.]
    """
    return imagenet_utils.preprocess_input(x, mode='tf')

if __name__ == '__main__':
    model = Deeplabv3(weights=None)
    generate_json(model, "DeepLab_v3")
Пример #5
0
    num_anchors = (4, 6, 6, 4)

    x1 = layers.Conv2D((class_num + 4) * 4, kernel_size=3,
                       activation='relu')(x1)
    x1 = layers.Reshape(target_shape=(-1, class_num + 4))(x1)

    x2 = layers.Conv2D((class_num + 4) * 6, kernel_size=3,
                       activation='relu')(x2)
    x2 = layers.Reshape(target_shape=(-1, class_num + 4))(x2)

    x3 = layers.Conv2D((class_num + 4) * 6, kernel_size=3,
                       activation='relu')(x3)
    x3 = layers.Reshape(target_shape=(-1, class_num + 4))(x3)

    x4 = layers.Conv2D((class_num + 4) * 4, kernel_size=3,
                       activation='relu')(x4)
    x4 = layers.Reshape(target_shape=(-1, class_num + 4))(x4)

    output = layers.Concatenate(axis=1)([x1, x2, x3, x4])

    return Model(img_input, output)


model = DSOD()
model.summary()

import json
from generate_template import generate_json
generate_json(model, 'DSOD')
Пример #6
0
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
                                     initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)

# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)

from generate_template import generate_json
generate_json(model, "seq2seq")

# # Run training
# model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
#           batch_size=batch_size,
#           epochs=epochs,
#           validation_split=0.2)
# # Save model
# model.save('s2s.h5')

# # Next: inference mode (sampling).
# # Here's the drill:
# # 1) encode input and retrieve initial decoder state
# # 2) run one step of decoder with this initial state
# # and a "start of sequence" token as target.
Пример #7
0
    # all the outputs so far in the form of (num_samples, timesteps,
    # output_dim). This is necessary as TimeDistributed in the below expects
    # the first dimension to be the timesteps.
    x = RNN(HIDDEN_SIZE, return_sequences=True)(x)

# Apply a dense layer to the every temporal slice of an input. For each of step
# of the output sequence, decide which character should be chosen.
x = layers.TimeDistributed(layers.Dense(len(chars), activation='softmax'))(x)

model = Model(model_input, x)
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()

generate_json(model, "RNN")

# Train the model each generation and show predictions against the validation
# dataset.
# for iteration in range(1, 200):
#     print()
#     print('-' * 50)
#     print('Iteration', iteration)
#     model.fit(x_train, y_train,
#               batch_size=BATCH_SIZE,
#               epochs=1,
#               validation_data=(x_val, y_val))
#     # Select 10 samples from the validation set at random so we can visualize
#     # errors.
#     for i in range(10):
#         ind = np.random.randint(0, len(x_val))
Пример #8
0
    x = layers.MaxPooling2D(pool_size=(2, 2), padding='valid')(x)

    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = layers.MaxPooling2D(pool_size=(2, 2), padding='valid')(x)

    x = layers.Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = layers.MaxPooling2D(pool_size=(2, 2), padding='valid')(x)

    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same')(x)
    x = LeakyReLU(alpha=0.1)(x)

    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same')(x)
    x = LeakyReLU(alpha=0.1)(x)

    x = layers.Conv2D(1024, (3, 3), strides=(1, 1), padding='same')(x)
    x = LeakyReLU(alpha=0.1)(x)

    x = layers.Flatten()(x)
    x = layers.Dense(256)(x)
    x = layers.Dense(4096)(x)
    x = LeakyReLU(0.1)(x)
    x = layers.Dense(1470)(x)

    return Model(model_input, x)


model = create_yolov1_tiny_model()
generate_json(model, "YOLO_v1")
Пример #9
0
    endpoints = [
        'activation_9', 'activation_21', 'activation_39', 'activation_48'
    ]
    f = [resnet.get_layer(name).output for name in endpoints]

    g = [None, None, None, None]
    h = [None, None, None, None]

    for i in range(4):
        h[i] = Conv2D(256, 1, padding="same")(f[i])

    for i in range(4):
        print(i, h[i].shape)

    g[0] = RefineBlock(high_inputs=None, low_inputs=h[0])
    print(0, g[0], h[1])
    g[1] = RefineBlock(g[0], h[1])
    print(1, g[1], h[2])
    g[2] = RefineBlock(g[1], h[2])
    print(2, g[2], h[3])
    g[3] = RefineBlock(g[2], h[3])
    print(3)
    F_score = Conv2D(21, 1, activation="relu", padding="same")(g[3])

    return Model(resnet.inputs, F_score)


model = RefineNet()
model.summary()
generate_json(model, "RefineNet")
Пример #10
0
# x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
# x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
# print('x_train shape:', x_train.shape)
# print('x_test shape:', x_test.shape)

print('Build model...')
# model = Sequential()
model_input = layers.Input(shape=(max_features, ))
x = Embedding(max_features, 128)(model_input)
x = GRU(128, dropout=0.2, recurrent_dropout=0.2)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(model_input, x)

from generate_template import generate_json

generate_json(model, "GRU")

# # try using different optimizers and different optimizer configs
# model.compile(loss='binary_crossentropy',
#               optimizer='adam',
#               metrics=['accuracy'])

# print('Train...')
# model.fit(x_train, y_train,
#           batch_size=batch_size,
#           epochs=15,
#           validation_data=(x_test, y_test))
# score, acc = model.evaluate(x_test, y_test,
#                             batch_size=batch_size)
# print('Test score:', score)
# print('Test accuracy:', acc)
Пример #11
0
    h = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(h)
    h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_1')(h)
    h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_2')(h)
    h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_3')(h)
    h = AtrousConvolution2D(4096, 7, 7, atrous_rate=(4, 4), activation='relu', name='fc6')(h)
    h = Dropout(0.5, name='drop6')(h)
    h = Convolution2D(4096, 1, 1, activation='relu', name='fc7')(h)
    h = Dropout(0.5, name='drop7')(h)
    h = Convolution2D(classes, 1, 1, activation='relu', name='fc-final')(h)
    h = ZeroPadding2D(padding=(33, 33))(h)
    h = Convolution2D(2 * classes, 3, 3, activation='relu', name='ct_conv1_1')(h)
    h = Convolution2D(2 * classes, 3, 3, activation='relu', name='ct_conv1_2')(h)
    h = AtrousConvolution2D(4 * classes, 3, 3, atrous_rate=(2, 2), activation='relu', name='ct_conv2_1')(h)
    h = AtrousConvolution2D(8 * classes, 3, 3, atrous_rate=(4, 4), activation='relu', name='ct_conv3_1')(h)
    h = AtrousConvolution2D(16 * classes, 3, 3, atrous_rate=(8, 8), activation='relu', name='ct_conv4_1')(h)
    h = AtrousConvolution2D(32 * classes, 3, 3, atrous_rate=(16, 16), activation='relu', name='ct_conv5_1')(h)
    h = Convolution2D(32 * classes, 3, 3, activation='relu', name='ct_fc1')(h)
    model_out = Convolution2D(classes, 1, 1, name='ct_final', activation="softmax")(h)

    # if apply_softmax:
    #     model_out = softmax(logits)
    # else:
    #     model_out = logits

    model = Model(input=model_in, output=model_out, name='dilation_voc12')

    return model

model = get_dilation_model_voc()
generate_json(model, "DilatedNet")