def get_model():
    input_branch_a = layers.Input(shape=(224, 224, 3))
    input_branch_b = layers.Input(shape=(224, 224, 3))

    base_a = vgg19.VGG19(include_top=False, weights='imagenet')
    base_a.name = 'model_a'

    for layer in base_a.layers:
        if layer.name.startswith('block5'):
            layer.trainable = True
        else:
            layer.trainable = False

    base_b = vgg19.VGG19(include_top=False, weights='imagenet')
    base_b.name = 'model_b'

    for layer in base_b.layers:
        if layer.name.startswith('block5'):
            layer.trainable = True
        else:
            layer.trainable = False

    branch_a = base_a(input_branch_a)
    branch_b = base_b(input_branch_b)

    assert branch_a is not None
    assert branch_b is not None

    print('assertion_complete')

    vgg_feature_a = branch_a
    vgg_feature_b = branch_b

    flattened_fmap1 = layers.Flatten()(branch_a)
    flattened_fmap2 = layers.Flatten()(branch_b)

    print('Flattening complete')

    merged_layer = layers.merge.concatenate([flattened_fmap1, flattened_fmap2])
    normalization_layer = layers.BatchNormalization(axis=-1)(merged_layer)

    dense_layer1 = layers.Dense(256, activation='relu')(normalization_layer)
    dropout_layer1 = layers.Dropout(0.5)(dense_layer1)
    dense_layer2 = layers.Dense(128, activation='relu')(dropout_layer1)
    dropout_layer2 = layers.Dropout(0.5)(dense_layer2)

    prediction_layer = layers.Dense(1, activation='tanh')(dropout_layer2)

    print('predictions complete')

    branched_model = models.Model(inputs=[input_branch_a, input_branch_b],
                                  outputs=[prediction_layer])

    return branched_model, vgg_feature_a, vgg_feature_b
Beispiel #2
0
def build_cnn():

    vgg_inst = vgg19.VGG19(include_top=True,
                           weights='imagenet',
                           input_tensor=None,
                           input_shape=(224, 224, 3),
                           pooling=None,
                           classes=1000)

    x = vgg_inst.output
    x = Dense(64, activation="relu")(x)
    x = Dense(2, activation="softmax")(x)

    model = Model(inputs=vgg_inst.inputs, outputs=x)

    for i in model.layers:
        i.trainable = False

    trainable_layers = [
        "block3_pool", "block4_conv1", "block4_conv2", "block4_conv3",
        "block4_conv4", "block4_pool", "block5_conv1", "block5_conv2",
        "block5_conv3", "block5_conv4", "block5_pool", "flatten", "fc1", "fc2",
        "predictions", "dense_1", "dense_2"
    ]

    for i in trainable_layers:
        model.get_layer(i).trainable = True

    sgd = SGD(lr=1e-6, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Beispiel #3
0
def prepare_model_layers(style_path, content_path):
    style_image = K.variable(load_image_to_tensor(style_path))
    content_image = K.variable(load_image_to_tensor(content_path))
    result_image = K.placeholder((1, 3, image_width, image_height))
    tensor_mix = K.concatenate([content_image, style_image, result_image], axis=0)
    model = vgg19.VGG19(input_tensor=tensor_mix, weights='imagenet', include_top=False)
    return dict([(layer.name, layer.output) for layer in model.layers]), result_image
Beispiel #4
0
    def __init__(self):
        """Prepare the module by loading the required ml models"""

        print("Getting model data...")

        # Loading haarcascade model
        self.face_cascade = cv2.CascadeClassifier(
            '../notebook/haarcascades/haarcascade_frontalface_alt.xml')

        # Loading ResNet50 model
        self.ResNet50 = resnet50.ResNet50(weights='imagenet')
        global graph_resnet
        graph_resnet = tf.get_default_graph()

        # Loading vgg19 model
        self.vgg19 = vgg19.VGG19(weights='imagenet', include_top=False)
        global graph_vgg19
        graph_vgg19 = tf.get_default_graph()

        # Loading dog breed model
        self.model = Sequential()
        self.model.add(GlobalAveragePooling2D(input_shape=(7, 7, 512)))
        self.model.add(Dense(1024, activation='relu'))
        self.model.add(Dense(512, activation='relu'))
        self.model.add(Dense(133, activation='softmax'))
        self.model.load_weights(
            '../notebook/saved_models/weights.best.VGG19.hdf5')
        global graph_model
        graph_model = tf.get_default_graph()

        # Loading dog names list
        with open('../notebook/dog_names', 'rb') as fp:
            self.dog_names = pickle.load(fp)
Beispiel #5
0
def VGG19_Content(dataset='imagenet'):
    # Load VGG, trained on imagenet data
    vgg = vgg19.VGG19(include_top=False, weights=dataset)
    vgg.trainable = False
    content_layers = ['block5_conv2']
    content_outputs = [vgg.get_layer(name).output for name in content_layers]
    return Model(vgg.input, content_outputs)
Beispiel #6
0
    def __init__(self,
                 img_len=112,
                 vgg_in=True,
                 include_top=False,
                 normlize=True,
                 train_imgs=None):
        self.layer_embed = {}
        self.norm_factor = {}
        self.include_top = include_top
        in_img = Input(shape=(img_len, img_len, 3))
        self.img_len = img_len
        self.normlize = normlize
        if (vgg_in):
            x = Lambda(self.vgg_in)(in_img)
        else:
            x = in_img
        model = vgg19.VGG19(weights='imagenet',
                            include_top=include_top,
                            input_shape=(img_len, img_len, 3),
                            input_tensor=x)

        for layer in model.layers:
            layer.trainable = False

        # get the symbolic outputs of each "key" layer (we gave them unique names).
        outputs_dict = dict([(layer.name, layer.output)
                             for layer in model.layers])

        for layer in model.layers:
            self.layer_embed[layer.name] = Model(
                inputs=in_img, outputs=outputs_dict[layer.name])
        if (train_imgs is not None):
            self.calc_norm_factors(train_imgs)
def create_model(input_tensor, model_save_path):
    model = vgg19.VGG19(input_tensor=input_tensor,
                        weights='imagenet',
                        include_top=False)
    plot_model(model, show_shapes=True, to_file=model_save_path)
    print('Model loaded.')
    return model
Beispiel #8
0
    def create_vgg19(weights, include_top):
        """ create vgg19 base model """

        base_model = vgg19.VGG19(weights=weights, include_top=include_top)
        imagesize = (224, 224)

        return base_model, vgg19.preprocess_input, imagesize
def customVGG16(input_tensor):
    vgg16 = vgg19.VGG19(weights='imagenet', include_top=False, input_tensor=input_tensor)
    input_shape = (224, 224, 3)
    if not K.is_keras_tensor(input_tensor):
        img_input = Input(tensor=input_tensor, shape=input_shape)
    else:
        img_input = input_tensor
    x = Conv2D(64, (3, 3), dtype = 'float32', input_shape=input_shape, padding='same',
           activation='relu', weights=vgg16.layers[1].get_weights())(img_input)#--> layer 1
    x = Conv2D(64, (3, 3), activation='relu', padding='same', weights=vgg16.layers[2].get_weights())(x)#--> layer 2
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), weights=vgg16.layers[3].get_weights())(x)#--> layer 3
    x = Conv2D(128, (3, 3), activation='relu', padding='same', weights=vgg16.layers[4].get_weights())(x)#--> layer 4
    x = Conv2D(128, (3, 3), activation='relu', padding='same', weights=vgg16.layers[5].get_weights())(x)#--> layer 5
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), weights=vgg16.layers[6].get_weights())(x)#--> layer 6
    x = Conv2D(256, (3, 3), activation='relu', padding='same', weights=vgg16.layers[7].get_weights())(x)#--> layer 7
    x = Conv2D(256, (3, 3), activation='relu', padding='same', weights=vgg16.layers[8].get_weights())(x) #--> layer 8. 
    x = Conv2D(256, (3, 3), activation='relu', padding='same', weights=vgg16.layers[9].get_weights())(x)#--> layer 9
    x = Conv2D(256, (3, 3), activation='relu', padding='same', weights=vgg16.layers[10].get_weights())(x)#--> layer 9
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), weights=vgg16.layers[11].get_weights())(x)#--> layer 10
    x = Conv2D(512, (3, 3), activation='relu', padding='same', weights=vgg16.layers[12].get_weights())(x)#--> layer 11
    x = Conv2D(512, (3, 3), activation='relu', padding='same', weights=vgg16.layers[13].get_weights())(x)#--> layer 12
    x = Conv2D(512, (3, 3), activation='relu', padding='same', weights=vgg16.layers[14].get_weights())(x)#--> layer 13
    x = Conv2D(512, (3, 3), activation='relu', padding='same', weights=vgg16.layers[15].get_weights())(x)#--> layer 13
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), weights=vgg16.layers[16].get_weights())(x)#--> layer 14
    x = Conv2D(512, (3, 3), activation='relu', padding='same',  weights=vgg16.layers[17].get_weights())(x)#--> layer 15
    x = Conv2D(512, (3, 3), activation='relu', padding='same',  weights=vgg16.layers[18].get_weights())(x)#--> layer 16
    x = Conv2D(512, (3, 3), activation='relu', padding='same',  weights=vgg16.layers[19].get_weights())(x)#--> layer 17
    x = Conv2D(512, (3, 3), activation='relu', padding='same',  weights=vgg16.layers[20].get_weights())(x)#--> layer 17
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), weights=vgg16.layers[21].get_weights())(x)#--> layer 18

    model = Model(img_input, x, name='vgg16')
    return model
    def __init__(self, stop_layer):
        _vgg19model = vgg19.VGG19(include_top=False)
        vgg19_output = _vgg19model.get_layer(stop_layer).output

        super(Vgg19TruncatedModel, self).__init__(inputs=_vgg19model.input,
                                                  outputs=vgg19_output,
                                                  name="vgg19")
Beispiel #11
0
def pretrained_model(model):
    if model == 'densenet':
        base_model = densenet.DenseNet121(include_top=False,
                                          weights='imagenet',
                                          input_shape=(IMG_SIZE, IMG_SIZE, 3))
    elif model == 'inception':
        base_model = inception_v3.InceptionV3(include_top=False,
                                              weights='imagenet',
                                              input_shape=(IMG_SIZE, IMG_SIZE,
                                                           3))
    elif model == 'mobilenet':
        base_model = mobilenet.MobileNet(include_top=False,
                                         weights='imagenet',
                                         input_shape=(IMG_SIZE, IMG_SIZE, 3))
    elif model == 'vgg':
        base_model = vgg19.VGG19(include_top=False,
                                 weights='imagenet',
                                 input_shape=(IMG_SIZE, IMG_SIZE, 3))
    elif model == 'resnet':
        base_model = resnet50.ResNet50(include_top=False,
                                       weights='imagenet',
                                       input_shape=(IMG_SIZE, IMG_SIZE, 3))
    elif model == 'xception':
        base_model = xception.Xception(include_top=False,
                                       weights='imagenet',
                                       input_shape=(IMG_SIZE, IMG_SIZE, 3))
    for layer in base_model.layers:
        layer.trainable = True
    x = base_model.output
    x = Flatten()(x)
    x = Dense(150, activation='relu')(x)
    x = Dropout(0.2)(x)
    predictions = Dense(1, activation='sigmoid')(x)

    return models.Model(base_model.input, predictions)
def transfer_model(shape):
    input_tensor = Input(shape=shape)

    base_model = vgg19.VGG19(include_top=False, weights='imagenet', input_tensor=input_tensor)

    for layer in base_model.layers:
        layer.trainable = False

    # Creating dictionary that maps layer names to the layers
    layer_dict = dict([(layer.name, layer) for layer in base_model.layers])

    # Getting output tensor of the last VGG layer that we want to include
    x = layer_dict['block4_pool'].output
    
    # Build similar architecture from scratch model 
    x = Conv2D(filters=1, kernel_size=1, strides=(1, 1), padding='same', activation='relu')(x)
    x = Flatten()(x)
    x = Dense(64, activation='sigmoid')(x)
    x = Dropout(rate=0.5)(x)
    output_layer = Dense(1, activation='sigmoid')(x)

    model = Model(input=base_model.input, output=output_layer)
    print(model.summary())

    return model
    def __init__(self, input_image: np.ndarray, style_image: np.ndarray, args: Namespace) -> None:
        self.__logger = logging.getLogger(__name__)
        self.__style_layers: List[str] = ["block{}_conv1".format(i) for i in range(1, 6)]
        self.__args = args
        self.__input_img = input_image
        self.__style_img = style_image
        self.__loss_value = None
        self.__gradient_values = None
        self.__channels = 3
        self.__batch_size = 1

        self.__img_placeholder = backend.placeholder((self.__batch_size,
                                                      self.__input_img.shape[1],
                                                      self.__input_img.shape[2],
                                                      self.__channels))
        self.__img_preprocessed = vgg19.preprocess_input(self.__img_placeholder)
        self.__model = vgg19.VGG19(input_tensor=self.__img_preprocessed, weights='imagenet', include_top=False)
        self.__model_outputs = backend.function([self.__img_placeholder], [self.__model.outputs[0]])
        if self.__args.verbose:
            self.__model.summary()

        # Function to fetch the values of the current loss and the current gradients
        loss = self.__total_variation()
        self.__fetch_loss_and_grads = backend.function(
            [self.__img_placeholder], [loss, backend.gradients(loss, self.__img_placeholder)[0]])
    def __init__(self, input_size=224):

        input_shape = (input_size, input_size, 3)

        xception_model = xception.Xception(weights='imagenet',
                                           include_top=False,
                                           pooling='max',
                                           input_shape=input_shape)
        inceptionv3_model = inception_v3.InceptionV3(weights='imagenet',
                                                     include_top=False,
                                                     pooling='max',
                                                     input_shape=input_shape)
        resnet50_model = resnet50.ResNet50(weights='imagenet',
                                           include_top=False,
                                           pooling='max',
                                           input_shape=input_shape)
        vgg19_model = vgg19.VGG19(weights='imagenet',
                                  include_top=False,
                                  pooling='max',
                                  input_shape=input_shape)

        self.input_size = input_size
        self.models = [
            xception_model, inceptionv3_model, resnet50_model, vgg19_model
        ]
        self.graph = tf.get_default_graph()
Beispiel #15
0
def get_model(input_shape, output_shape, last_activation='softmax'):
    """Get model ready to use."""
    model = vgg19.VGG19(
        include_top=False, weights='imagenet', input_shape=input_shape
    )
    model = _set_readonly(model, 18)
    model = _classification(model, output_shape, last_activation)
    return model
Beispiel #16
0
 def build_vgg19(classes):
     model = vgg19.VGG19(include_top=True,
                         weights=None,
                         input_tensor=None,
                         input_shape=None,
                         pooling=None,
                         classes=classes)
     return model
Beispiel #17
0
    def load_network(self):
        model = vgg19.VGG19(input_tensor=self.input_tensor,
                            weights='imagenet', include_top=False)
        print('Model loaded.')

        # get the symbolic outputs of each "key" layer (we gave them unique names).
        self.outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
        pass
def neural_style_transfer(target_folder_path, style_reference_image_path,
                          output_path):
    # get_ipython().system('wget "https://3.bp.blogspot.com/-gG2TK3WUCeE/WEwqlahXgkI/AAAAAAAADLY/SRCcdZn0yeUKDFrTDGgLaVnRHwjQcAabgCLcB/s1600/mariposa.jpg" -O mariposas.jpg')
    #target_folder_path = 'datasetTransformar'
    #style_reference_image_path = 'datasetOriginal/100.jpg'
    style_reference_image = K.variable(
        preprocess_image(style_reference_image_path))
    input_tensor = K.concatenate(
        [target_image, style_reference_image, combination_image], axis=0)
    model = vgg19.VGG19(input_tensor=input_tensor,
                        weights='imagenet',
                        include_top=False)
    outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
    content_layer = 'block5_conv2'
    style_layers = [
        'block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1',
        'block5_conv1'
    ]
    total_variation_weight = 1e-4
    style_weight = 1.
    content_weight = 0.025
    loss = K.variable(0.)
    layer_features = outputs_dict[content_layer]
    target_image_features = layer_features[0, :, :, :]
    combination_features = layer_features[2, :, :, :]
    loss += content_weight * content_loss(target_image_features,
                                          combination_features)
    for layer_name in style_layers:
        layer_features = outputs_dict[layer_name]
        style_reference_features = layer_features[1, :, :, :]
        combination_features = layer_features[2, :, :, :]
        sl = style_loss(style_reference_features, combination_features)
        loss += (style_weight / len(style_layers)) * sl
    loss += total_variation_weight * total_variation_loss(combination_image)
    grads = K.gradients(loss, combination_image)[0]
    fetch_loss_and_grads = K.function([combination_image], [loss, grads])
    evaluator = Evaluator(fetch_loss_and_grads)
    # result_prefix = 'my_result'
    iterations = 100
    imgs = os.listdir(target_folder_path)
    for im in [target_folder_path + "/" + img for img in imgs]:
        x = preprocess_image(im)
        x = x.flatten()
        for i in range(iterations):
            print('Start of iteration', i)
            start_time = time.time()
            x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                             x,
                                             fprime=evaluator.grads,
                                             maxfun=20)
            print('Current loss value:', min_val)
            img = x.copy().reshape((img_height, img_width, 3))
            img = deprocess_image(img)
            fname = im[im.rfind('/'):]
            imageio.imwrite(output_path + fname, img)
            print('Image saved as', fname)
            end_time = time.time()
            print('Iteration %d completed in %ds' % (i, end_time - start_time))
Beispiel #19
0
def VGG19_AvgPool(input_tensor):
    # we want to account for features across the entire image
    # so get rid of the maxpool which throws away information
    vgg = vgg19.VGG19(input_tensor=input_tensor,
                      weights='imagenet',
                      include_top=False,
                      pooling="avg")

    return vgg
Beispiel #20
0
def compute_descriptor(batch):
    if not hasattr(compute_descriptor, "vgg_model"):
        compute_descriptor.vgg_model = vgg19.VGG19(include_top=False,
                                                   input_shape=(None, None, 3))
    descriptors = []
    for shape in [(200, 200), (250, 250), (300, 300)]:
        batch = resize(batch, shape, preserve_range=True)
        descriptor = vgg19.preprocess_input(np.expand_dims(batch, axis=0))
        descriptors.append(compute_descriptor.vgg_model.predict(descriptor))
    return descriptors
Beispiel #21
0
def build_model():
    model = vgg19.VGG19(weights='imagenet',
                        include_top=False)

    outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
    content_layer = 'block5_pool'
    layer_features = outputs_dict[content_layer]

    activation_model = models.Model(inputs=model.input, outputs=layer_features)
    return activation_model
Beispiel #22
0
 def VGG19(self,output_layer='fc2'):
     """VGG1() - fc2 - output_shape = 4096
     """
     base_model = vgg19.VGG19(weights='imagenet')
     _model = Model(input=base_model.input, output=base_model.get_layer(output_layer).output)
     _model.summary()
     self.imageSize = (224,224)
     self.model = _model
     self.preinput = vgg19.preprocess_input
     return self._process()
Beispiel #23
0
def vgg_layers(layer_names):
    # Initialize VGG19 network, don't train the weights
    vgg = vgg19.VGG19(include_top=False, weights='imagenet')
    vgg.trainable = False

    outputs = [vgg.get_layer(name).output for name in layer_names]

    # Use the layers to create a new Keras model
    model = tf.keras.Model([vgg.input], outputs)
    return model
Beispiel #24
0
def compute_embeddings_pretrained(parameters, train_X, val_X, test=None):
    if parameters['cnn'] == 'vgg':
        cnn_model = vgg19.VGG19(weights='imagenet', pooling='avg')
        cnn = Model(inputs=cnn_model.input,
                    outputs=cnn_model.get_layer('fc2').output)

    if parameters['cnn'] == 'resnet':
        #cnn = resnet50.ResNet50(weights='imagenet',include_top=False,pooling=parameters['pool'])
        #cnn = Model(inputs=cnn_model.input, outputs=cnn_model.get_layer('').output)
        cnn = resnet.ResNet101(include_top=False,
                               weights='imagenet',
                               pooling='avg')

    if parameters['cnn'] == 'inc_resnet':
        cnn = inception_resnet_v2.InceptionResNetV2(weights='imagenet',
                                                    include_top=False,
                                                    pooling=parameters['pool'])
        #cnn = Model(inputs=cnn_model.input, outputs=cnn_model.get_layer('').output)

    if parameters['cnn'] == 'densenet':
        cnn = densenet.DenseNet121(weights='imagenet',
                                   include_top=False,
                                   pooling=parameters['pool'])

    if parameters['cnn'] == 'xception':
        cnn = xception.Xception(weights='imagenet',
                                include_top=False,
                                pooling=parameters['pool'])
    '''
    if parameters['freeze']:
        for layer in cnn.layers:
            layer.trainable = False  
        print ('TRAINABLE LAYERS: ')
        for layer in cnn.layers:
            print(layer, layer.trainable)
    '''
    embeddings_train = cnn.predict(train_X)
    embeddings_val = cnn.predict(val_X)
    print('Dim embeddings train: %s', str(embeddings_train.shape))
    print('Dim embeddings val: %s', str(embeddings_val.shape))
    if test is not None:
        embeddings_test = {}
        for k, v in test.items():
            if parameters['cnn'] == 'resnet':
                embeddings_test[k] = cnn.predict(v)  #.reshape((2048,-1))
            elif parameters['cnn'] == 'inc_resnet':
                embeddings_test[k] = cnn.predict(v)  #.reshape((1536,-1))
            elif parameters['cnn'] == 'xception':
                embeddings_test[k] = cnn.predict(v)  #.reshape((,-1))
            else:
                embeddings_test[k] = cnn.predict(v)  #.reshape((1024,-1))
        with open(parameters['embeddings_pretrained_test'], 'wb') as f:
            pickle.dump(embeddings_test, f, protocol=pickle.HIGHEST_PROTOCOL)
        print('Dim embeddings test: %s', str(len(embeddings_test)))
    return embeddings_train.squeeze(), embeddings_val.squeeze()
Beispiel #25
0
    def __init__(self, model, input_size):

        input_shape = (input_size, input_size, 3)

        if model == 'xception':
            base_model = xception.Xception(weights='imagenet',
                                           include_top=False,
                                           pooling='max',
                                           input_shape=input_shape)
        elif model == 'vgg16':
            base_model = vgg16.VGG16(weights='imagenet',
                                     include_top=False,
                                     pooling='max',
                                     input_shape=input_shape)
        elif model == 'vgg19':
            base_model = vgg19.VGG19(weights='imagenet',
                                     include_top=False,
                                     pooling='max',
                                     input_shape=input_shape)
        elif model == 'inception_v3':
            base_model = inception_v3.InceptionV3(weights='imagenet',
                                                  include_top=False,
                                                  pooling='max',
                                                  input_shape=input_shape)
        elif model == 'mobilenet':
            base_model = mobilenet.MobileNet(weights='imagenet',
                                             include_top=False,
                                             pooling='max',
                                             input_shape=input_shape)
        elif model == 'inception_resnet_v2':
            base_model = inception_resnet_v2.InceptionResNetV2(
                weights='imagenet',
                include_top=False,
                pooling='max',
                input_shape=input_shape)
        elif model == 'resnet50':
            base_model = resnet50.ResNet50(weights='imagenet',
                                           include_top=False,
                                           pooling='max',
                                           input_shape=input_shape)
        elif model == 'nasnetlarge':
            base_model = nasnet.NASNetLarge(weights='imagenet',
                                            include_top=False,
                                            pooling='max',
                                            input_shape=input_shape)
        else:
            base_model = nasnet.NASNetMobile(weights='imagenet',
                                             include_top=False,
                                             pooling='max',
                                             input_shape=input_shape)

        self.input_size = input_size
        self.model = base_model
        self.graph = tf.get_default_graph()
        base_model.summary()
def breed_detector(img):
    keras.backend.clear_session()
    vgg19_model = vgg19.VGG19(weights='imagenet', include_top=False)
    img = img.resize((224, 224), PIL.Image.NEAREST)
    img_array = image.img_to_array(img)
    img_array_4d = np.expand_dims(img_array, axis=0)
    x = vgg19.preprocess_input(img_array_4d)
    x_features = vgg19_model.predict(x)
    model = load_model(os.path.join(script_dir, 'dog_breed_classifier.h5'))
    prediction = np.argmax(model.predict(x_features))
    return dog_breed_list[prediction]
Beispiel #27
0
def make_vgg(s, n_filters=None):
    if s == 'M':
        base = vgg16.VGG16(include_top=False)
        x = base.get_layer('block5_conv3').output
    elif s == 'D':
        base = vgg19.VGG19(include_top=False)
        x = base.get_layer('block5_conv4').output
        
    if n_filters is not None:
        x = layers.Conv2D(n_filters, 1, kernel_initializer='orthogonal', name='conv1x1')(x)

    return models.Model(inputs=base.input, outputs=x)
def VGG19_NinaProDB5(input_shape,
                     classes,
                     dropout_rate=0,
                     dense1=0,
                     dense2=0,
                     batch_norm=False):

    #Load pretrained vgg19 model
    vgg19_model = vgg19.VGG19(weights='imagenet', include_top=False)

    #Keep 10/26 layers of original vgg19 model
    vgg19_model = Model(vgg19_model.input,
                        vgg19_model.get_layer('block3_pool').output)

    layers = [l for l in vgg19_model.layers]
    freeze_index, keep_index = 0, 0
    for i in range(0, len(layers)):
        if 'block1' in layers[i].name:
            keep_index = i
        if 'block3' in layers[i].name:
            keep_index = i

    assert (keep_index >= freeze_index), '{} {}'.format(
        keep_index, freeze_index)

    # Define input
    x_input = Input(input_shape)
    x = x_input
    for i in range(1, len(layers)):
        # Freeze layers
        if i <= freeze_index:
            layers[i].trainable = False
        # Fine-tune layers
        elif i > freeze_index and i <= keep_index:
            layers[i].trainable = True
        else:
            break
        x = layers[i](x)

    #Classifier
    if batch_norm is True:
        x = BatchNormalization()(x)
    x = Flatten()(x)
    x = Dropout(dropout_rate)(x)
    x = Dense(dense1, activation='relu')(x)
    x = Dropout(dropout_rate)(x)
    x = Dense(dense2, activation='relu')(x)
    x = Dense(classes, activation='softmax')(x)
    model = Model(inputs=x_input, outputs=x, name='VGG19_DB5_NinaPro')
    model.summary()

    print('The model is fine tuning')
    return model
Beispiel #29
0
def get_model(input_shape, output_shape, readonly_until):
    """Get the model."""
    # load the Inception_V3 model
    model = vgg19.VGG19(weights='imagenet',
                        include_top=False,
                        input_shape=input_shape)
    # make them readonly
    model = _set_readonly(model, readonly_until)
    # add new classification layers
    model = _classification(model, output_shape)

    return model
Beispiel #30
0
def getFetch_loss_and_grads():
    # 1 构建模型
    originnal_img = K.constant(preprocess_image(originnal_img_path))  #常数
    style_reference_img = K.constant(
        preprocess_image(style_reference_img_path))
    generated_img = K.placeholder((1, img_height, img_width, 3))  # 占位符

    input_tensor = K.concatenate(
        [originnal_img, style_reference_img, generated_img],
        axis=0)  # 将三张图像合并为一个批量

    model = vgg19.VGG19(
        input_tensor=input_tensor,  # 利用三张图像组成的批量作为输入 来构建 VGG19 网络
        weights='imagenet',  # 加载模型将 使用预训练的 ImageNet 权重
        include_top=False  # 不加载顶部
    )

    # 2 构建损失函数
    outputs_dict = dict([(layer.name, layer.output) for layer in model.layers
                         ])  # 得到字典 <layer_name, layer_out>
    content_layer = 'block5_conv2'  # 用于内容损失的层
    style_layers = [
        'block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1',
        'block5_conv1'
    ]  # 用于风格损失的层
    total_variation_weight = 1e-4  # 渐变系数
    style_weight = 1.
    content_weight = 0.025

    loss = K.variable(0.)
    layer_features = outputs_dict[content_layer]
    target_img_features = layer_features[
        0, :, :, :]  # 取第0个图像在内容层的激活		即 原始图像在该层的激活
    combination_features = layer_features[
        2, :, :, :]  # 取第2个图像在内容层激活		即 生成图像在该层的激活
    loss = loss + content_weight * content_loss(
        target_img_features, combination_features)  # 内容损失添加到总损失中

    for layer_name in style_layers:
        layer_features = outputs_dict[layer_name]
        style_img_features = layer_features[1, :, :, :]
        combination_features = layer_features[2, :, :, :]
        sl = style_weight * style_loss(style_img_features,
                                       combination_features)
        loss = loss + (style_weight / len(style_layers)) * sl  # 每一层贡献一部分
    loss = loss + total_variation_weight * total_variation_loss(generated_img)

    # 3 构建梯度下降过程
    grads = K.gradients(loss, generated_img)[0]  # 得到损失相对于生成图片的梯度
    fetch_loss_and_grads = K.function(
        [generated_img],
        [loss, grads])  # 构建函数,通过输入generated_img,得到[loss, grads]
    return fetch_loss_and_grads