Esempio n. 1
0
def gettest_model():
    input = Input(shape=[16, 66, 3])
    A = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input)
    B = Activation("relu", name='relu1')(A)
    C = MaxPool2D(pool_size=2)(B)
    x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(C)
    x = Activation("relu", name='relu2')(x)
    x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x)
    K = Activation("relu", name='relu3')(x)
    x = Flatten()(K)
    dense = Dense(2, name="dense")(x)
    output = Activation("relu", name='relu4')(dense)
    x = Model([input], [output])
    x.load_weights("./model/model12.h5")
    ok = Model([input], [dense])
    return ok
Esempio n. 2
0
def gettest_model():
    input = Input(shape=[16, 66, 3])  # change this shape to [None,None,3] to enable arbitraty shape input
    A = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input)
    B = Activation("relu", name='relu1')(A)
    C = MaxPool2D(pool_size=2)(B)
    x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(C)
    x = Activation("relu", name='relu2')(x)
    x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x)
    K = Activation("relu", name='relu3')(x)

    x = Flatten()(K)
    dense = Dense(2,name = "dense")(x)
    output = Activation("relu", name='relu4')(dense)
    x = Model([input], [output])
    x.load_weights("./model/model12.h5")
    ok = Model([input], [dense])

    for layer in ok.layers:
        print(layer)

    return ok
Esempio n. 3
0
def gettest_model():
    input = Input(shape=[16, 66, 3])  # change this shape to [None,None,3] to enable arbitraty shape input
    A = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input)
    B = Activation("relu", name='relu1')(A)
    C = MaxPool2D(pool_size=2)(B)
    x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(C)
    x = Activation("relu", name='relu2')(x)
    x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x)
    K = Activation("relu", name='relu3')(x)


    x = Flatten()(K)
    dense = Dense(2,name = "dense")(x)
    output = Activation("relu", name='relu4')(dense)
    x = Model([input], [output])
    x.load_weights("./model/model12.h5")
    ok = Model([input], [dense])

    for layer in ok.layers:
        print layer

    return ok
Esempio n. 4
0
def create_cost_module(inputs, adjustable):
    """Implements the cost module of the siamese network.
    :param inputs:          list containing feature tensor from each siamese head
    :return:                some type of distance
    """

    # NOTE: 2017/08 `merge` will become deprecated
    def subtract(x):
        output = x[0] - x[1]
        return output

    def divide(x):
        output = x[0] / x[1]
        return output

    def absolute(x):
        output = abs(x[0] - x[1])
        return output

    def the_shape(shapes):
        shape1, shape2 = shapes
        a_shape = shape1
        return a_shape

    if adjustable.cost_module_type == 'neural_network':
        if adjustable.neural_distance == 'concatenate':
            features = keras.layers.concatenate(inputs)
        elif adjustable.neural_distance == 'add':
            features = keras.layers.add(inputs)
        elif adjustable.neural_distance == 'multiply':
            features = keras.layers.multiply(inputs)
        elif adjustable.neural_distance == 'subtract':
            features = keras.layers.merge(inputs=inputs,
                                          mode=subtract,
                                          output_shape=the_shape)
        elif adjustable.neural_distance == 'divide':
            features = keras.layers.merge(inputs=inputs,
                                          mode=divide,
                                          output_shape=the_shape)
        elif adjustable.neural_distance == 'absolute':
            features = keras.layers.merge(inputs=inputs,
                                          mode=absolute,
                                          output_shape=the_shape)
        else:
            features = None

        dense_layer = Dense(
            adjustable.neural_distance_layers[0],
            name='dense_1',
            trainable=adjustable.trainable_cost_module)(features)
        activation = Activation(adjustable.activation_function)(dense_layer)
        dropout_layer = Dropout(pc.DROPOUT)(activation)
        dense_layer = Dense(
            adjustable.neural_distance_layers[1],
            name='dense_2',
            trainable=adjustable.trainable_cost_module)(dropout_layer)
        activation = Activation(adjustable.activation_function)(dense_layer)
        dropout_layer = Dropout(pc.DROPOUT)(activation)
        output_layer = Dense(pc.NUM_CLASSES, name='ouput')(dropout_layer)
        softmax = Activation('softmax')(output_layer)

        if not adjustable.weights_name == None:
            softmax.load_weights(os.path.join(pc.SAVE_LOCATION_MODEL_WEIGHTS,
                                              adjustable.weights_name),
                                 by_name=True)

        return softmax

    elif adjustable.cost_module_type == 'euclidean':
        distance = Lambda(euclidean_distance,
                          output_shape=eucl_dist_output_shape)(inputs)
        return distance

    elif adjustable.cost_module_type == 'euclidean_fc':
        distance = Lambda(euclidean_distance,
                          output_shape=eucl_dist_output_shape)(inputs)
        dense_layer = Dense(1, name='dense_1')(distance)
        activation = Activation(adjustable.activation_function)(dense_layer)
        output_layer = Dense(pc.NUM_CLASSES, name='ouput')(activation)
        softmax = Activation('softmax')(output_layer)
        return softmax

    elif adjustable.cost_module_type == 'cosine':
        distance = Lambda(cosine_distance,
                          output_shape=cos_dist_output_shape)(inputs)
        return distance
Esempio n. 5
0
model = Conv2D(64, (3, 3), padding='same', name='conv15')(model)
model = Activation('relu', name='act15')(model)

model = Conv2D(64, (3, 3), padding='same', name='conv16')(model)
model = Activation('relu', name='act16')(model)
model = Conv2D(64, (3, 3), padding='same', name='conv17')(model)
model = Activation('relu', name='act17')(model)
model = Conv2D(64, (3, 3), padding='same', name='conv18')(model)
model = Activation('relu', name='act18')(model)
model = Conv2D(64, (3, 3), padding='same', name='conv19')(model)
model = Activation('relu', name='act19')(model)
model = Conv2D(1, (3, 3), padding='same', name='conv20')(model)
model = Activation('relu', name='act20')(model)
res_img = model

output_img = merge([res_img, input_img])

model = Model(input_img, output_img)

model.load_weights('vdsr_model_edges.h5')

img = image.load_img('./patch.png', grayscale=True, target_size=(41, 41, 1))
x = image.img_to_array(img)
x = x.astype('float32') / 255
x = np.expand_dims(x, axis=0)

pred = model.predict(x)

test_img = np.reshape(pred, (41, 41))

imsave('test_img.png', test_img)
Esempio n. 6
0
model = Activation('relu', name='act16')(model)
model = Conv2D(64, (3, 3), padding='same', name='conv17')(model)
model = Activation('relu', name='act17')(model)
model = Conv2D(64, (3, 3), padding='same', name='conv18')(model)
model = Activation('relu', name='act18')(model)
model = Conv2D(64, (3, 3), padding='same', name='conv19')(model)
model = Activation('relu', name='act19')(model)
model = Conv2D(1, (3, 3), padding='same', name='conv20')(model)
# model = Activation('relu', name='act20')(model)
res_img = model

output_img = add([res_img, input_img])

model = Model(input_img, output_img)

model.load_weights('checkpoints2/vdsr-200-32.21.hdf5')

pred = model.predict(data_input, batch_size=1)
sess = tf.InteractiveSession()
print(sess.run(PSNR(data_label, pred)))
print(data_label.shape)
print(data_input.shape)
print(tf.shape(pred))
y = np.reshape(data_label, [256, 256])
t = np.reshape(data_input, [256, 256])
c = np.reshape(pred, [256, 256])

# sio.savemat("yuantu.mat", {'yuan': y})
# sio.savemat("chongjian.mat", {'jian': c})

ax1 = plt.subplot(1, 3, 1)