コード例 #1
0
def create_uncertainty_model(learning_rate=1e-3, num_hidden_units=20, type = 'mobilenet_v2'):
    mu_input = Input(shape=(num_classes,))
    if type == 'mobilenet_v2':
        base_model = mobilenet_v2.MobileNetV2(include_top=False, weights='imagenet', input_tensor=None,
                                          input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    elif type == 'vgg16':
        base_model = vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=None,
                                 input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    elif type == 'resnet50':
        base_model = resnet50.ResNet50(include_top=False, weights='imagenet', input_tensor=None,
                                 input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    elif type == 'vgg19':
        base_model = vgg19.VGG19(include_top=False, weights='imagenet', input_tensor=None,
                                 input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    elif type == 'inception_v3':
        base_model = inception_v3.InceptionV3(include_top=False, weights='imagenet', input_tensor=None,
                                 input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    else:
        base_model = mobilenet_v2.MobileNetV2(include_top=False, weights='imagenet', input_tensor=None,
                                              input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    base_model.trainable = False
    beta = base_model.output
    beta = Dense(num_hidden_units, activation='relu')(beta)
    beta = Dense(num_hidden_units, activation='relu')(beta)
    beta = Dense(num_hidden_units, activation='relu')(beta)
    # beta = Dense(num_hidden_units,activation='relu')(beta)
    beta = Dense(1, activation='sigmoid')(beta)
    output = concatenate([mu_input, beta])

    model = Model(inputs=[mu_input, base_model.input], outputs=output)
    model.compile(loss=dirichlet_aleatoric_cross_entropy,
                  optimizer=Adam(lr=learning_rate),
                  metrics=[max_beta, min_beta]
                  )
    return model
コード例 #2
0
    def create_network(self):
        # ref_img = img_to_array(load_img(P.target_mask_path))
        # img_nrows, img_ncols = ref_img.shape[:2]
        # Create tensor variables for images
        images = K.concatenate([self.style_image, self.target_image, self.content_image], axis=0)
        # Create tensor variables for masks
        raw_style_mask, raw_target_mask = load_mask_labels(self.img_nrows, self.img_ncols)
        style_mask = K.variable(raw_style_mask.astype('float32'))
        target_mask = K.variable(raw_target_mask.astype('float32'))
        masks = K.concatenate([style_mask, target_mask], axis=0)

        # image model as VGG19
        with tf.name_scope("VGG"):
            self.image_model = vgg19.VGG19(include_top=False, input_tensor=images)

        # mask model as a series of pooling
        with tf.name_scope('mask_model'):
            mask_input = tf.keras.layers.Input(tensor=masks, shape=(None, None, None), name='mask_input')
            x = mask_input
            for layer in self.image_model.layers[1:]:
                name = 'mask_%s' % layer.name
                if 'conv' in layer.name:
                    x = tf.keras.layers.AveragePooling2D((3, 3), padding='same', strides=(
                        1, 1), name=name)(x)
                elif 'pool' in layer.name:
                    x = tf.keras.layers.AveragePooling2D((2, 2), name=name)(x)
            self.mask_model = tf.keras.Model(mask_input, x)
コード例 #3
0
def build_vgg():
  #для feature loss создаем vgg модель
  vgg_in = Input(img_shape)
  vgg = vgg19.VGG19(include_top=False, input_shape=img_shape, input_tensor=vgg_in)
  vgg_out = vgg.get_layer('block5_conv4').output
  vgg = Model(vgg_in, vgg_out, name='vgg')
  vgg.trainable = False 
  return vgg
コード例 #4
0
def content_features_model(image_size, layer_name='block4_conv1'):
    from tensorflow.python.keras.applications import vgg19
    x = Input(list(image_size) + [3])

    def preprocess_for_vgg(x):
        x = 255 * (x + 1) / 2
        mean = np.array([103.939, 116.779, 123.68])
        mean = mean.reshape((1, 1, 1, 3))
        x = x - mean
        x = x[..., ::-1]
        return x

    x = Input((128, 64, 3))
    y = Lambda(preprocess_for_vgg)(x)
    vgg = vgg19.VGG19(weights='imagenet', include_top=False, input_tensor=y)
    outputs_dict = dict([(layer.name, layer.output) for layer in vgg.layers])
    if type(layer_name) == list:
        y = [outputs_dict[ln] for ln in layer_name]
    else:
        y = outputs_dict[layer_name]
    return Model(inputs=x, outputs=y)
コード例 #5
0
def createVGG19Model():

    ## Iniciacion de la red VGG19 y de una red sequencial.
    vgg19Model = vgg19.VGG19()
    sequentialModel = Sequential()

    ## Añadidas todas las capas de la red neuronal VGG19 a la red sequencial para poder trabajar con ella.
    for capa in vgg19Model.layers:
        sequentialModel.add(capa)

    ## Extraccion de la ultima capa del modelo, la capa predictiva.
    sequentialModel.pop()

    ## Al tratarse de un modelo prentrenado, evitamos que posteriormente se vuelva a entrenar, ahorrandonos mucho tiempo.
    for capa in sequentialModel.layers:
        capa.trainable = False

    ## Finalmente añadimos una capa de decision con dos neuronas y una funcion de activacion softmax.
    sequentialModel.add(Dense(2, activation='softmax'))

    ## Devolvemos el modelo personalizado.
    return sequentialModel
コード例 #6
0
width, height = load_img(target_image_path).size
img_height = 400
img_width = int(width * img_height / height)

target_image = K.constant(preprocess_image(target_image_path))
style_reference_image = K.constant(
    preprocess_image(style_reference_image_path))

combination_image = K.placeholder((1, img_height, img_width, 3))

input_tensor = K.concatenate(
    [target_image, style_reference_image, combination_image], axis=0)

model = vgg19.VGG19(input_tensor=input_tensor,
                    weights='imagenet',
                    include_top=False)
print('모델 로드 완료.')

outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
content_layer = 'block5_conv2'
style_layers = [
    'block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1',
    'block5_conv1'
]
total_variation_weight = 1e-4
style_weight = 1.
content_weight = 0.025

loss = K.variable(0.)
layer_features = outputs_dict[content_layer]
コード例 #7
0
    output_path, ext = os.path.splitext(args.output_path)
    if ext == '':
        ext = '.png'
    config_gpu(args.gpu, args.allow_growth)

    ## Precomputing the targets for content and style
    # Load content and style images
    content_image = preprocess_image_scale(args.content_image_path,
                                           img_size=args.img_size)
    style_images = [
        preprocess_image_scale(img, img_size=args.style_img_size)
        for img in args.style_image_path
    ]
    nb_styles = len(style_images)

    model = vgg19.VGG19(weights='imagenet', include_top=False)
    outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])

    content_features = get_content_features(outputs_dict, args.content_layers)
    style_features = get_style_features(outputs_dict,
                                        args.style_layers,
                                        norm_by_channels=args.norm_by_channels)

    get_content_fun = K.function([model.input], content_features)
    get_style_fun = K.function([model.input], style_features)

    content_targets = get_content_fun([content_image])
    # List of list of features
    style_targets_list = [get_style_fun([img]) for img in style_images]

    # List of batched features
コード例 #8
0
from tensorflow.python.keras.utils.vis_utils import plot_model
import h5py
from tensorflow.python.keras.models import model_from_json
import tensorflow as tf
from tensorflow.python.keras.applications.vgg19 import preprocess_input
from tensorflow.python.keras.preprocessing.image import load_img
from tensorflow.python.keras.preprocessing.image import img_to_array
import numpy as np
from tensorflow.python.keras.models import load_model
import cv2
import numpy

#import torchvision.datasets.imagenet as imagenet  #to include models like Squeezenet,Alexnet

model = vgg19.VGG19(weights="imagenet",
                    include_top=False,
                    input_shape=(224, 224, 3))
#model.cuda()
# add new classifier layers
x = model.output
x = MaxPooling2D()(x)
for layer in model.layers:
    layer.trainable = False
x = Dense(units=256,
          activation="relu",
          kernel_regularizer=regularizers.l2(0.01))(x)
x = Dropout(0.4)(x)
x = Dense(units=256,
          activation="relu",
          kernel_regularizer=regularizers.l2(0.01))(x)
x = Dropout(0.4)(x)