Exemple #1
0
def get_model():
    # 下载预训练的xception模型,注意到include_top=False表示没有1000分类的head
    base_model = xception.Xception(weights='imagenet', include_top=False)

    # 在基模型的基础上添加几层
    x = base_model.output
    # BN层
    x = BatchNormalization()(x)
    # 全局池化
    x = GlobalAveragePooling2D()(x)
    # 添加dropout提高泛化
    x = Dropout(0.5)(x)
    # 添加全连接增加表达能力
    x = Dense(1024, activation='relu')(x)
    # 继续dropout
    x = Dropout(0.5)(x)
    # 分成NUM_CLASSES这么多类
    predictions = Dense(NUM_CLASSES, activation='softmax')(x)

    # 给定“头”和“尾”以定义好整个模型
    model = Model(inputs=base_model.input, outputs=predictions)

    # 我们把前面的层次固定住(学习率变为0)
    for layer in base_model.layers:
        layer.trainable = False

    # 指定优化器与损失函数,进行编译
    optimizer = RMSprop(lr=0.001, rho=0.9)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=["accuracy"])
    # 输出模型结构
    # model.summary()
    return model
Exemple #2
0
def img2vec_xception(imagedir):

    image_paths = glob.glob(str(imagedir) + '/*.jpg')

    _IMAGE_NET_TARGET_SIZE = (299, 299)
    model = xception.Xception(weights='imagenet')
    layer_name = 'avg_pool'
    intermediate_layer_model = Model(
        inputs=model.input, outputs=model.get_layer(layer_name).output)

    image_vectors = {}
    global image_path
    for c, image_path in enumerate(image_paths):
        print("\r" + str(c + 1) + "/" + str(len(image_paths)), end="")
        img = image.load_img(image_path, target_size=_IMAGE_NET_TARGET_SIZE)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = xception.preprocess_input(x)
        intermediate_output = intermediate_layer_model.predict(x)
        vector = intermediate_output[0]
        image_vectors[image_path] = vector

    embeddings = np.stack(list(image_vectors.values()))
    with open('xception.pkl', 'wb') as f:
        pickle.dump(embeddings, f)
    def __init__(self, 
        model='inception_v3', 
        weights = 'imagenet',
        include_top = False,
        pooling=None, 
        n_channels=None,  
        clf_head_dense_dim = 1024,
    ):
        ''' Creates ImageNet base model for featurization or classification and corresponding image
            preprocessing function
                :param model: options are xception, inception_v3, and mobilenet_v2
                :param weights: 'imagenet' or filepath
                :param include_top: whether to include original ImageNet classification head with 1000 classes
                :param pooling: 'avg', 'max', or None
                :param n_channels: number of channels to keep if performing featurization
                :param clf_head_dense_dim: dimension of dense layer before softmax classification (only applies
                    if `include_top` is false)
        '''

        self.include_top = include_top  # determines if used for classification or featurization
        self.n_channels = n_channels
        self.pooling = pooling
        self.clf_head_dense_dim = clf_head_dense_dim

        if model == 'xception':
            self.model = xception.Xception(weights=weights, include_top=include_top, pooling=pooling)
            self.preprocess = xception.preprocess_input
            self.target_size = (299, 299)
            if include_top:
                self.decode = xception.decode_predictions
            else:
                self.output_dim = (n_channels if n_channels else 2048) * (1 if pooling else 10**2)
        elif model == 'inception_v3':
            self.model = inception_v3.InceptionV3(weights=weights, include_top=include_top, pooling=pooling)
            self.preprocess = inception_v3.preprocess_input
            self.target_size = (299, 299)
            if include_top:
                self.decode = inception_v3.decode_predictions
            else:
                self.output_dim = (n_channels if n_channels else 2048) * (1 if pooling else 8**2)
        elif model == 'mobilenet_v2':
            self.model = mobilenetv2.MobileNetV2(weights=weights, include_top=include_top, pooling=pooling)
            self.preprocess = mobilenetv2.preprocess_input
            self.target_size = (244, 244)
            if include_top:
                self.decode = mobilenetv2.decode_predictions
            else:
                self.output_dim = (n_channels if n_channels else 1280) * (1 if pooling else 7**2)
        else:
            raise Exception('model option not implemented')
Exemple #4
0
def xception(input_shape,
             frozen_layers=0,
             weights=None,
             pooling='avg',
             **kwargs):
    # create the base model
    base_model = keras_xception.Xception(weights=weights,
                                         include_top=False,
                                         input_shape=input_shape,
                                         pooling=pooling)
    predictions = add_classifier(base_model.output, **kwargs)
    model = models.Model(inputs=base_model.input, outputs=predictions)

    # Freeze some layers:
    freeze_layers(model, frozen_layers)

    return model
train_datagen = ImageDataGenerator(rotation_range=45,
                                   width_shift_range=0.2,
                                   height_shift_range=0.2,
                                   shear_range=0.2,
                                   zoom_range=0.25,
                                   horizontal_flip=True,
                                   fill_mode='nearest')

train_generator = train_datagen.flow(x_train, ytr, batch_size=batch_size)

valid_datagen = ImageDataGenerator()

valid_generator = valid_datagen.flow(x_valid, yv, batch_size=batch_size)

# create the base pre-trained model
base_model = xception.Xception(weights='imagenet', include_top=False)
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional Xception layers
for layer in base_model.layers:
    layer.trainable = False

# add a global spatial average pooling layer
x = base_model.output
x = BatchNormalization()(x)
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dropout(0.5)(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
# and a logistic layer and set it to the number of breeds we want to classify,
predictions = Dense(num_classes, activation='softmax')(x)
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
from IPython.display import Image

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.applications import xception

# Size of the input image
img_size = (299, 299, 3)

# Load Xception model with imagenet weights
model = xception.Xception(weights="imagenet")

# The local path to our target image
img_path = keras.utils.get_file("elephant.jpg",
                                "https://i.imgur.com/Bvro0YD.png")
display(Image(img_path))
"""
## Integrated Gradients algorithm
"""


def get_img_array(img_path, size=(299, 299)):
    # `img` is a PIL image of size 299x299
    img = keras.preprocessing.image.load_img(img_path, target_size=size)
    # `array` is a float32 Numpy array of shape (299, 299, 3)
    array = keras.preprocessing.image.img_to_array(img)
            image = preprocess_input(image)
            model = resnet50.ResNet50(weights='imagenet', include_top=True)
            # model.summary()

        if Model == 3:
            from tensorflow.keras.applications.inception_resnet_v2 import preprocess_input

            image = preprocess_input(image)
            model = inception_resnet_v2.InceptionResNetV2(weights='imagenet', include_top=True)
            # model.summary()

        if Model == 4:
            from tensorflow.keras.applications.xception import preprocess_input

            image = preprocess_input(image)
            model = xception.Xception(weights='imagenet', include_top=True)
            # model.summary()

        prediction = model.predict(image)

        from tensorflow.keras.applications.imagenet_utils import decode_predictions

        decoded_prediction = decode_predictions(prediction)
        # decoded_prediction = np.array(decoded_prediction)
        print("The prediction of the model %s is %s with a probability of %f" % (
            models[Model], decoded_prediction[0][0][1], decoded_prediction[0][0][2] * 100))

    plt.figure(1)
    plt.imshow(image.img_to_array(img) / 255)
    plt.show()
Exemple #8
0
# model = efn.EfficientNetB0(classes=n_class)
if args.arch == 'xception':
    model = keras.models.Sequential()
    model.add(keras.layers.InputLayer(input_shape=input_shape))
    model.add(tf.keras.layers.Lambda(
        lambda image: tf.image.resize(
            image,
            (96, 96),
            method=tf.image.ResizeMethod.BILINEAR,
            preserve_aspect_ratio=True
        )
    ))
    model.add(xception.Xception(
        weights=None,
        include_top=True,
        classes=n_class,
        input_shape=(96, 96, 3)
    ))
else:
    if args.arch == 'vgg19':
        arch = vgg19.VGG19
    elif args.arch == 'vgg16':
        arch = vgg16.VGG16
    elif args.arch == 'mobilenetv2':
        arch = mobilenet_v2.MobileNetV2
    elif args.arch == 'resnet2':
        arch = resnet_v2.ResNet50V2
    elif args.arch == 'densenet':
        arch = densenet.DenseNet121
    else:
        raise Exception("Unsupported architecture type")
Exemple #9
0

def feature_table_creator(image_bytes):
    image_size = tuple((224, 224))
    image_bytes = cv2.resize(image_bytes, image_size)
    feature_table = {'xception': None}
    # feature_table['vgg'] = vgg(image_bytes)
    feature_table['xception'] = Xception(image_bytes)
    return feature_table


if __name__ == "__main__":
    # vgg_model = tf.keras.applications.VGG16(weights='imagenet')
    # vgg_extractor = tf.keras.models.Model(inputs=vgg_model.input, outputs=vgg_model.get_layer("fc2").output)
    xception_extractor = xception.Xception(weights='imagenet',
                                           include_top=False,
                                           input_shape=(224, 224, 3))
    img_dir_path = input('[INPUT] image dir path : ')
    features = {'img': [], 'xception': [], 'cluster': []}
    pics_num = os.listdir(img_dir_path)
    bar = progressbar.ProgressBar(maxval=len(pics_num), \
    widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
    bar.start()
    for i, img_path in enumerate(pics_num):
        img_path = img_dir_path + img_path
        with open(img_path, 'rb') as f:
            img_bytes = f.read()
        Image = cv2.imdecode(np.fromstring(img_bytes, np.uint8),
                             cv2.IMREAD_UNCHANGED)
        Image = Image[:, :, :3]
        single_feature_table = feature_table_creator(Image)
Exemple #10
0
    def __init__(self):

        model = xception.Xception(weights='imagenet', pooling='avg')
        layer_name = 'avg_pool'
        self.intermediate_layer_model = keras.Model(
            inputs=model.input, outputs=model.get_layer(layer_name).output)
vgg16_model = vgg16.VGG16(weights="imagenet", include_top=True)
model = Model(inputs=vgg16_model.input,
              outputs=vgg16_model.get_layer("fc2").output)
preprocessor = vgg16.preprocess_input
vectorize_images(IMAGE_DIR, IMAGE_SIZE, preprocessor, model, VECTOR_FILE)

IMAGE_SIZE = 299
VECTOR_FILE = os.path.join(DATA_DIR, "inception-vectors.tsv")
inception_model = inception_v3.InceptionV3(weights="imagenet",
                                           include_top=True)
model = Model(inputs=inception_model.input,
              outputs=inception_model.get_layer("avg_pool").output)
preprocessor = inception_v3.preprocess_input
vectorize_images(IMAGE_DIR, IMAGE_SIZE, preprocessor, model, VECTOR_FILE)

IMAGE_SIZE = 224
VECTOR_FILE = os.path.join(DATA_DIR, "resnet-vectors.tsv")
resnet_model = resnet50.ResNet50(weights="imagenet", include_top=True)
model = Model(inputs=resnet_model.input,
              outputs=resnet_model.get_layer("avg_pool").output)
preprocessor = resnet50.preprocess_input
vectorize_images(IMAGE_DIR, IMAGE_SIZE, preprocessor, model, VECTOR_FILE)

IMAGE_SIZE = 299
VECTOR_FILE = os.path.join(DATA_DIR, "xception-vectors.tsv")
xception_model = xception.Xception(weights="imagenet", include_top=True)
model = Model(inputs=xception_model.input,
              outputs=xception_model.get_layer("avg_pool").output)
preprocessor = xception.preprocess_input
vectorize_images(IMAGE_DIR, IMAGE_SIZE, preprocessor, model, VECTOR_FILE)
Exemple #12
0
def get_siamese_model(name=None, input_shape=(224, 224, 3),
                      embedding_vec_size=512, not_freeze_last=2):
    """
        Model architecture
    """

    if name == "InceptionV3":

        base_model = inception_v3.InceptionV3(

            weights='imagenet', include_top=False)

        model_preprocess_input = inception_v3.preprocess_input

    if name == "InceptionResNetV2":

        base_model = inception_resnet_v2.InceptionResNetV2(

            weights='imagenet', include_top=False)

        model_preprocess_input = inception_resnet_v2.preprocess_input

    if name == "DenseNet121":

        base_model = densenet.DenseNet121(

            weights='imagenet', include_top=False)

        model_preprocess_input = densenet.preprocess_input

    if name == "DenseNet169":

        base_model = densenet.DenseNet169(

            weights='imagenet', include_top=False)

        model_preprocess_input = densenet.preprocess_input

    if name == "DenseNet201":

        base_model = densenet.DenseNet201(

            weights='imagenet', include_top=False)

        model_preprocess_input = densenet.preprocess_input

    if name == "MobileNetV2":

        base_model = mobilenet_v2.MobileNetV2(

            weights='imagenet', include_top=False)

        model_preprocess_input = mobilenet_v2.preprocess_input

    if name == "MobileNet":

        base_model = mobilenet.MobileNet(

            weights='imagenet', include_top=False)

        model_preprocess_input = mobilenet.preprocess_input

    if name == "ResNet50":

        base_model = resnet50.ResNet50(

            weights='imagenet', include_top=False)

        model_preprocess_input = resnet50.preprocess_input

    if name == "VGG16":

        base_model = vgg16.VGG16(

            weights='imagenet', include_top=False)

        model_preprocess_input = vgg16.preprocess_input

    if name == "VGG19":

        base_model = vgg19.VGG19(

            weights='imagenet', include_top=False)

        model_preprocess_input = vgg19.preprocess_input

    if name == "Xception":

        base_model = xception.Xception(

            weights='imagenet', include_top=False)

        model_preprocess_input = xception.preprocess_input

    # Verifica se existe base_model

    if 'base_model' not in locals():

        return ["InceptionV3", "InceptionResNetV2",

                "DenseNet121", "DenseNet169", "DenseNet201",

                "MobileNetV2", "MobileNet",

                "ResNet50",

                "VGG16", "VGG19",

                "Xception"

                ]

    # desativando treinamento

    for layer in base_model.layers[:-not_freeze_last]:

        layer.trainable = False

    x = base_model.layers[-1].output

    x = GlobalAveragePooling2D()(x)

    x = Dense(

        embedding_vec_size,

        activation='linear',  # sigmoid? relu?

        name='embedding',

        use_bias=False

    )(x)

    model = Model(

        inputs=base_model.input,

        outputs=x

    )

    left_input = Input(input_shape)

    right_input = Input(input_shape)

    # Generate the encodings (feature vectors) for the two images

    encoded_l = model(left_input)

    encoded_r = model(right_input)

    # Add a customized layer to compute the absolute difference between the encodings

    L1_layer = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1]))

    L1_distance = L1_layer([encoded_l, encoded_r])

    # Add a dense layer with a sigmoid unit to generate the similarity score

    prediction = Dense(

        1,

        activation=Activation(gaussian),

        use_bias=False,

        kernel_constraint=NonNeg()

    )(L1_distance)

    # Connect the inputs with the outputs

    siamese_net = Model(

        inputs=[left_input, right_input],

        outputs=prediction

    )

    return {

        "model": siamese_net,

        "preprocess_input": model_preprocess_input

    }
Exemple #13
0
targetSize = (Image_Width,Image_Height)
targetSize_withdepth = (Image_Width,Image_Height,Image_Depth)

image_data = image.load_img("../images/73.png",target_size=(100,100))
image_array = image.img_to_array(image_data)
x_train = []
x_train.append(image_array)
x_train = np.array(x_train)
x_train = xception.preprocess_input(x_train)

LAYERS_COUNT = 250
LAYERS_START = 0
layers = []

model = vgg16.VGG16(weights='imagenet',include_top=False,input_shape=targetSize_withdepth)
model = xception.Xception(weights='imagenet',include_top=False,input_shape=targetSize_withdepth)
# summarize feature map shapes
for i in range(len(model.layers)):
    layer = model.layers[i]
    # check for convolutional layer
    if 'conv' not in layer.name:
        continue
    # summarize output shape
    print(i, layer.name, layer.output.shape)
    layers.append(i+1)


#model = Model(inputs=model.inputs, outputs=model.layers[1].output)
layer_outputs = [layer.output for layer in model.layers[LAYERS_START:LAYERS_START+LAYERS_COUNT]] 

Exemple #14
0
    def set_model(self, model_name, top_n=5):
        if model_name == 'densenet':
            self.model = densenet.DenseNet121(include_top=True,
                                              weights='imagenet',
                                              input_tensor=None,
                                              input_shape=None,
                                              pooling=None,
                                              classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: densenet.decode_predictions(x, top=top_n)
            self.ref = """
                <ul>
                <li><a href='https://arxiv.org/abs/1608.06993' target='_blank'>
                Densely Connected Convolutional Networks</a> (CVPR 2017 Best Paper Award)</li>
                </ul>
                """

        elif model_name == 'inception_resnet_v2':
            self.model = inception_resnet_v2.InceptionResNetV2(
                include_top=True,
                weights='imagenet',
                input_tensor=None,
                input_shape=None,
                pooling=None,
                classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: inception_resnet_v2.decode_predictions(
                x, top=top_n)
            self.ref = """
                <ul>
                <li><a href='https://arxiv.org/abs/1602.07261' target='_blank'>
                Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning</a></li>
                </ul>
                """

        elif model_name == 'inception_v3':
            self.model = inception_v3.InceptionV3(include_top=True,
                                                  weights='imagenet',
                                                  input_tensor=None,
                                                  input_shape=None,
                                                  pooling=None,
                                                  classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: inception_v3.decode_predictions(x,
                                                                     top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1512.00567' target='_blank'>
                Rethinking the Inception Architecture for Computer Vision</a></li>
                </ul>
                """

        elif model_name == 'mobilenet':
            self.model = mobilenet.MobileNet(input_shape=None,
                                             alpha=1.0,
                                             depth_multiplier=1,
                                             dropout=1e-3,
                                             include_top=True,
                                             weights='imagenet',
                                             input_tensor=None,
                                             pooling=None,
                                             classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: mobilenet.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1704.04861' target='_blank'>
                MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications</a></li>
                </ul>
                """

        elif model_name == 'mobilenet_v2':
            self.model = mobilenet_v2.MobileNetV2(input_shape=None,
                                                  alpha=1.0,
                                                  include_top=True,
                                                  weights='imagenet',
                                                  input_tensor=None,
                                                  pooling=None,
                                                  classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: mobilenet_v2.decode_predictions(x,
                                                                     top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1801.04381' target='_blank'>
                MobileNetV2: Inverted Residuals and Linear Bottlenecks</a></li>
                </ul>
                """

        elif model_name == 'nasnet':
            self.model = nasnet.NASNetLarge(input_shape=None,
                                            include_top=True,
                                            weights='imagenet',
                                            input_tensor=None,
                                            pooling=None,
                                            classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: nasnet.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1707.07012' target='_blank'>
                Learning Transferable Architectures for Scalable Image Recognition</a></li>
                </ul>
                """

        elif model_name == 'resnet50':
            self.model = resnet50.ResNet50(include_top=True,
                                           weights='imagenet',
                                           input_tensor=None,
                                           input_shape=None,
                                           pooling=None,
                                           classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: resnet50.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li>ResNet : 
                <a href='https://arxiv.org/abs/1512.03385' target='_blank'>Deep Residual Learning for Image Recognition
                </a></li>
                </ul>
                """

        elif model_name == 'vgg16':
            self.model = vgg16.VGG16(include_top=True,
                                     weights='imagenet',
                                     input_tensor=None,
                                     input_shape=None,
                                     pooling=None,
                                     classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: vgg16.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1409.1556' target='_blank'>
            Very Deep Convolutional Networks for Large-Scale Image Recognition</a></li>
            </ul>"""

        elif model_name == 'vgg19':
            self.model = vgg19.VGG19(include_top=True,
                                     weights='imagenet',
                                     input_tensor=None,
                                     input_shape=None,
                                     pooling=None,
                                     classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: vgg19.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1409.1556' target='_blank'>Very Deep Convolutional Networks for Large-Scale Image Recognition</a></li>
            </ul>"""

        elif model_name == 'xception':
            self.model = xception.Xception(include_top=True,
                                           weights='imagenet',
                                           input_tensor=None,
                                           input_shape=None,
                                           pooling=None,
                                           classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: xception.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1610.02357' target='_blank'>Xception: Deep Learning with Depthwise Separable Convolutions</a></li>
            </ul>"""

        else:
            logger.ERROR('There has no model name !!!')
               xytext=(0,10),ha='center')
  plt.legend(['training', 'validation'], loc='upper left')

  plt.show()

def print_classification_report(model, data_gen, batch_size):
  true_labels = data_gen.labels
  data_gen.reset()
  predicted_labels = model.predict(data_gen, steps = np.ceil(len(true_labels)/batch_size))
  #print(list( np.argmax(a) for a in predicted_labels), true_labels)
  print(classification_report(true_labels, list( np.argmax(a) for a in predicted_labels)))
  print("Confusion Matrix:\n ",confusion_matrix(true_labels,list( np.argmax(a) for a in predicted_labels)))

"""## **Build the model**"""

xception_base = xception.Xception(weights='imagenet', include_top= False, input_shape= (299, 299, 3))

xception_base.summary()

model = models.Sequential()
model.add(xception_base)
model.add(keras.layers.GlobalAveragePooling2D())
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.Dense(512, activation='relu'))
# model.add(keras.layers.Dropout(0.4))
# model.add(keras.layers.Dense(512, activation='relu'))
# model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dropout(0.4))
model.add(keras.layers.Dense(256, activation='relu'))
model.add(keras.layers.Dense(7, activation= 'softmax'))
model.summary()
Exemple #16
0
"""Functions for Classification tab
"""
import cv2
import numpy as np
import plotly.graph_objects as go
from tensorflow.keras.applications import resnet_v2, vgg16, xception

# module, model, input shape
models = {
    "xception": (xception, xception.Xception(), (299, 299)),
    "vgg16": (vgg16, vgg16.VGG16(), (224, 224)),
    "resnet50": (resnet_v2, resnet_v2.ResNet50V2(), (224, 224)),
}


def make_plotly_plot(pred, model_name):
    """
    Make the plotly plot given the predictions and model name.
    """
    _, y, x = zip(*pred[::-1])
    x = [round(i * 100, 2) for i in x]

    trace1 = go.Bar(
        x=x,
        y=y,
        orientation="h",
        text=[str(i) + "%" for i in x],
        textfont=dict(size=20),
        textangle=0,
        textposition="auto",
    )
Exemple #17
0
image_to_test = "bird1.png"

# Load the model we trained
model = load_model('bird_feature_classifier_model.h5')

# Load image to test, resizing it to 73 pixels (as required by this model)
img = image.load_img(image_to_test, target_size=(73, 73))

# Convert the image to a numpy array
image_array = image.img_to_array(img)

# Add a forth dimension to the image (since Keras expects a bunch of images, not a single image)
images = np.expand_dims(image_array, axis=0)

# Normalize the data
images = xception.preprocess_input(images)

# Use the pre-trained neural network to extract features from our test image (the same way we did to train the model)
feature_extraction_model = xception.Xception(weights='imagenet',
                                             include_top=False,
                                             input_shape=(73, 73, 3))
features = feature_extraction_model.predict(images)

# Given the extracted features, make a final prediction using our own model
results = model.predict(features)

# Since we are only testing one image with possible class, we only need to check the first result's first element
single_result = results[0][0]

# Print the result
print(f"Likelihood that {image_to_test} is a bird: {single_result * 100}%")
def get_model(args,loss_function='binary_crossentropy',initial_lr=0.0001,weights="imagenet"):
  """
  Select model to classification
  parameters:
    args(argparse) = initial argsparse, contain information of input shape.
    loss_function(str) = define loss function.
    initial_lr(int) = define initial learning rate, using Adam optimizer
  return model compiled 
  """

  if args.model=='Xception':

    if args.optm=='Adam':
        optm = Adam(lr=0.00005,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True)  
            
    pre_trained_model = xception.Xception(weights="/scratch/parceirosbr/bigoilict/share/Polen/clasificacion/weigths/xception_weights_tf_dim_ordering_tf_kernels_notop.h5",include_top=False, input_shape=(args.size, args.size, 3))
    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    #x=Dropout(rate=0.5)(x)    
    x=Dense(2048, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x=Dense(1024, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) 
    x=Dropout(rate=0.2)(x)
    preds=Dense(args.classes,activation='softmax')(x) 
    model=Model(inputs=pre_trained_model.input, outputs=preds)

    for layer in pre_trained_model.layers:
        layer.trainable=False


    for layer in model.layers:
        if hasattr(layer, 'moving_mean') and hasattr(layer, 'moving_variance'):
            layer.trainable = True
            K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
            K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
        else:
            layer.trainable = False
            
    for layer in model.layers[129:]:
        layer.trainable=True
    #for layer in model.layers[:129]:
    #    layer.trainable=False            
    #for layer in model.layers[129:]:
    #    layer.trainable=True

    #for layer in model.layers:
    #    if hasattr(layer, 'moving_mean') and hasattr(layer, 'moving_variance'):
    #        layer.trainable = True
    #        K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
    #        K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
        #else:
        #    layer.trainable = False
            
    #for layer in model.layers[129:]:
    #    layer.trainable = True

    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()    
    return model

  if args.model=='Xception_1':

    if args.optm=='Adam':
        optm = Adam(lr=0.0001,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True)  
            
    pre_trained_model = xception.Xception(weights="/scratch/parceirosbr/bigoilict/share/Polen/clasificacion/weigths/xception_weights_tf_dim_ordering_tf_kernels_notop.h5",include_top=False, input_shape=(args.size, args.size, 3))
    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    #x=Dropout(rate=0.5)(x)    
    x=Dense(2048, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x=Dense(1024, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) 
    x=Dropout(rate=0.2)(x)
    preds=Dense(args.classes,activation='softmax')(x) 
    model=Model(inputs=pre_trained_model.input, outputs=preds)

    for layer in pre_trained_model.layers:
        layer.trainable=True


    for layer in model.layers:
       if hasattr(layer, 'moving_mean') and hasattr(layer, 'moving_variance'):
           layer.trainable = True
           K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
           K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
    #   else:
    #       layer.trainable = False

    #for layer in model.layers[:129]:
    #    layer.trainable=False            
    #for layer in model.layers[129:]:
    #    layer.trainable=True

    #for layer in model.layers:
    #    if hasattr(layer, 'moving_mean') and hasattr(layer, 'moving_variance'):
    #        layer.trainable = True
    #        K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
    #        K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
        #else:
        #    layer.trainable = False
            
    #for layer in model.layers[129:]:
    #    layer.trainable = True

    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()    
    return model


  if args.model=='InceptionV3':

    if args.optm=='Adam':
        optm = Adam(lr=0.0001,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = InceptionV3(input_shape=(args.size, args.size, 3), include_top=False, weights="/scratch/parceirosbr/bigoilict/share/Polen/clasificacion/weigths/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5")

    #last_layer = pre_trained_model.get_layer('mixed10')
    last_output = pre_trained_model.output#last_layer.output
    #x = GlobalMaxPooling2D()(last_output)
 
    x = GlobalAveragePooling2D()(last_output)
    # Add a fully connected layer with 512 hidden units and ReLU activation
    #x=Dropout(rate=0.5)(x)   
    x = Dense(2048, activation='relu')(x)
    #x=Dropout(rate=0.2)(x)
    x#=Dropout(rate=0.5)(x)    
    x=Dense(1024, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) 
    #x=Dropout(rate=0.2)(x)
    #x = Dense(512, activation='relu')(x)    
    x=Dropout(rate=0.2)(x)
    # Add a final sigmoid layer for classification
    x = Dense(args.classes, activation='softmax')(x)
    # Configure and compile the model

    model = Model(pre_trained_model.input, x)

    #for layer in pre_trained_model.layers:
    #    layer.trainable = True
    #for layer in model.layers:
    #    layer.trainable = True


    for layer in pre_trained_model.layers:
        layer.trainable=True

    #for layer in model.layers:
    #   if hasattr(layer, 'mixed10') and hasattr(layer, 'mixed10'):
    #       layer.trainable = True
    #       K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
    #       K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
        #else:
        #   layer.trainable = False
    for layer in model.layers[280:]:
        layer.trainable = True
   # for layer in pre_trained_model.layers:
    #    layer.trainable = False
    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='InceptionV3_1':

    if args.optm=='Adam':
        optm = Adam(lr=0.00005,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = InceptionV3(input_shape=(args.size, args.size, 3), include_top=False, weights="/scratch/parceirosbr/bigoilict/share/Polen/clasificacion/weigths/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5")

    #last_layer = pre_trained_model.get_layer('mixed10')
    last_output = pre_trained_model.output#last_layer.output
    #x = GlobalMaxPooling2D()(last_output)
 
    x = GlobalAveragePooling2D()(last_output)
    # Add a fully connected layer with 512 hidden units and ReLU activation
    x=Dropout(rate=0.5)(x)   
    #x = Dense(2048, activation='relu')(x)
    x=Dense(1024, activation='relu')(x)
    #x=Dropout(rate=0.2)(x)
    x=Dropout(rate=0.5)(x)    
    x=Dense(1024, activation='relu')(x)
    x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) 
    #x=Dropout(rate=0.2)(x)
    #x = Dense(512, activation='relu')(x)    
    x=Dropout(rate=0.5)(x)
    # Add a final sigmoid layer for classification
    x = Dense(args.classes, activation='softmax')(x)
    # Configure and compile the model

    model = Model(pre_trained_model.input, x)

    #for layer in pre_trained_model.layers:
    #    layer.trainable = True
    #for layer in model.layers:
    #    layer.trainable = True


    for layer in pre_trained_model.layers:
        layer.trainable=False

    #for layer in model.layers:
    #   if hasattr(layer, 'mixed10') and hasattr(layer, 'mixed10'):
    #       layer.trainable = True
    #       K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
    #       K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
        #else:
        #   layer.trainable = False
    #for layer in model.layers[280:]:
    #    layer.trainable = True
   # for layer in pre_trained_model.layers:
    #    layer.trainable = False



    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='Resnet50':

    if args.optm=='Adam':
        optm = Adam(lr=0.00005,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = resnet50.ResNet50(weights="/scratch/parceirosbr/bigoilict/share/Polen/radar_temp/weigths/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5",include_top=False, input_shape=(args.size, args.size, 3))
    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    x=Dropout(rate=0.3)(x)
    x=Dense(1024, activation='relu')(x)
    x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) 
    preds=Dense(2,activation='softmax')(x) 

    model=Model(inputs=pre_trained_model.input, outputs=preds)

    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional InceptionV3 layers
    #for layer in model.layers:
    #    if hasattr(layer, 'moving_mean') and hasattr(layer, 'moving_variance'):
    #        layer.trainable = True
    #        K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
    #        K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
    #    else:
    #        layer.trainable = False
    for layer in pre_trained_model.layers:
        layer.trainable=True      
    for layer in model.layers[165:]:
        layer.trainable=True           
    #for layer in model.layers[165:]:
    #    layer.trainable=True

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='vgg16':

    if args.optm=='Adam':
        optm = Adam(lr=0.0001,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = vgg16.VGG16(weights="/scratch/parceirosbr/bigoilict/share/Polen/radar_temp/weigths/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5",include_top=False, input_shape=(args.size, args.size, 3))
    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    #x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
    x=Dropout(rate=0.5)(x)
    x=Dense(1024,activation='relu')(x) #dense layer 2
    x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) #dense layer 3

    preds=Dense(2,activation='softmax')(x) #final layer with softmax activation

    model=Model(inputs=pre_trained_model.input, outputs=preds)

    for layer in pre_trained_model.layers:
        layer.trainable = True

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='MobileNetV2':

    if args.optm=='Adam':
        optm = Adam(lr=0.0001,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = MobileNetV2(weights='/scratch/parceirosbr/bigoilict/share/Polen/clasificacion/weigths/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224_no_top.h5',include_top=False, input_shape=(args.size, args.size, 3))
    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    x=Dense(2048,activation='relu')(x)
    x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
    #preds=Dense(2,activation='softmax')(x) #final layer with softmax activation
    x=Dense(512,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
    preds=Dense(2,activation='softmax')(x) #final layer with softmax activation
    x=Dropout(rate=0.2)(x)
    model=Model(inputs=pre_trained_model.input, outputs=preds)
    for layer in pre_trained_model.layers:
        layer.trainable = True

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='MobileNetV2_1':

    if args.optm=='Adam':
        optm = Adam(lr=0.0002,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = MobileNetV2(weights='/scratch/parceirosbr/bigoilict/share/Polen/clasificacion/weigths/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224_no_top.h5',include_top=False, input_shape=(args.size, args.size, 3))
    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    x=Dense(1024,activation='relu')(x)
    x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
    x=Dense(512,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
    preds=Dense(2,activation='softmax')(x) #final layer with softmax activation
    model=Model(inputs=pre_trained_model.input, outputs=preds)

    for layer in model.layers[:20]:
        layer.trainable=True
    for layer in model.layers[20:]:
        layer.trainable=True

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='inception_resnet_v2':

    if args.optm=='Adam':
        optm = Adam(lr=0.0001,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = InceptionResNetV2(weights='/scratch/parceirosbr/bigoilict/share/Polen/radar_temp/weigths/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5',include_top=False, input_shape=(args.size, args.size, 3))

    for layer in pre_trained_model.layers:
        if hasattr(layer, 'moving_mean') and hasattr(layer, 'moving_variance'):
            layer.trainable = True
            K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))
            K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))
        else:
            layer.trainable = False
    for layer in pre_trained_model.layers:
        layer.trainable = True
        
    last_layer = pre_trained_model.get_layer('conv_7b_ac')
    last_output = last_layer.output
    x = GlobalMaxPooling2D()(last_output)
    # Add a fully connected layer with 512 hidden units and ReLU activation
    #x=Dense(1024, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x = Dense(1024, activation='relu')(x)
    #x=Dropout(rate=0.5)(x)
    x = Dense(512, activation='relu')(x)
    # Add a dropout rate of 0.7
    x = Dropout(0.2)(x)
    # Add a final sigmoid layer for classification
    x = Dense(2, activation='softmax')(x)

    # Configure and compile the model
    model = Model(pre_trained_model.input, x)

    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model

  if args.model=='NASNetLarge':

    if args.optm=='Adam':
        optm = Adam(lr=0.0001,clipnorm=1.) 
    if args.optm=='SGD':
        optm = SGD(lr=0.0001, momentum=0.9, decay=0, nesterov=True,clipnorm=1.)  
            
    pre_trained_model = NASNetLarge(weights='/scratch/parceirosbr/bigoilict/share/Polen/radar_temp/weigths/nasnet_large_no_top.h5',include_top=False, input_shape=(args.size, args.size, 3))


    x=pre_trained_model.output
    x=GlobalAveragePooling2D()(x)
    x=Dropout(rate=0.5)(x)    
    x=Dense(2048, activation='relu')(x)
    x=Dropout(rate=0.5)(x)
    x=Dense(1024, activation='relu')(x)
    x=Dropout(rate=0.5)(x)
    x=Dense(512,activation='relu')(x) 
    #x=Dropout(rate=0.5)(x)
    preds=Dense(2,activation='softmax')(x) 
    model=Model(inputs=pre_trained_model.input, outputs=preds)

    for layer in pre_trained_model.layers:
        layer.trainable=False

    model.compile(loss=loss_function, optimizer=optm,metrics=['accuracy',f1,'AUC','MeanSquaredError'])
    model.summary()
    return model
Exemple #19
0
from keras.utils import *
from keras.losses import *
from keras.layers import *
from keras.metrics import *

from jessica_local_spark_building import sqlContext
from pyspark.sql.types import StructType, StructField, StringType

from pyspark import StorageLevel

from keras.models import *
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications import xception

base_model_Xception = xception.Xception(
    weights='xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
    include_top=False)


def file_json2file_npy(input_json,
                       sqlContext,
                       file_path_column_name=None,
                       x_xception_npy=None,
                       x_document_id_npy=None,
                       y_npy=None,
                       output_json=None):
    start_time = time.time()
    print('loading data from %s' % (input_json))
    input_df = sqlContext.read.json(input_json)
    input_df.registerTempTable('input_df')
    input_df = sqlContext.sql(u"""
print(len(y_train))

# NOW WE LOAD THE PRE_TRAINED MODEL
FEATURE_EXTRACTOR = vgg19.VGG19(weights='imagenet',
                                include_top=False,
                                input_shape=targetSize_withdepth)
model = Sequential()
model.add(FEATURE_EXTRACTOR)
model.add(Flatten())
features_x = model.predict_generator(train_generator)
print(type(features_x).__name__)
print(features_x.shape)
model.save("../model/model.h5", include_optimizer=False)

FEATURE_EXTRACTOR1 = xception.Xception(weights='imagenet',
                                       include_top=False,
                                       input_shape=targetSize_withdepth)
model1 = Sequential()
model1.add(FEATURE_EXTRACTOR1)
model1.add(Flatten())
features_x1 = model1.predict_generator(train_generator)
print(type(features_x1).__name__)
print(features_x1.shape)
model1.save("../model/model1.h5", include_optimizer=False)

FEATURE_EXTRACTOR2 = resnet50.ResNet50(weights='imagenet',
                                       include_top=False,
                                       input_shape=targetSize_withdepth)
model2 = Sequential()
model2.add(FEATURE_EXTRACTOR2)
model2.add(Flatten())