Exemple #1
0
def precompute_inception_resnet(img_paths):
    logging.info('precomputing inception resnet v2 inputs')
    inception = inception_resnet_v2.InceptionResNetV2(weights="imagenet",
                                                      include_top=True)

    embeddings = []
    for i, (img_path, image_name) in enumerate(img_paths):
        img = load_image(img_path) / 255
        embedding = calc_inception_embedding(img, inception)
        embeddings.append({"file_name": image_name, "embedding": embedding})
        if (i + 1) % 100 == 0:
            print(i + 1)

    df = pd.DataFrame(embeddings)
    df.to_csv("./precomputed_inputs/inception_res_net_v2.csv")
Exemple #2
0
def preprocess_inception():
    print("creating inception model (this takes a while)...")
    inception = inception_resnet_v2.InceptionResNetV2(include_top=False,
                                                      weights='imagenet',
                                                      input_tensor=None,
                                                      input_shape=(84, 84, 3),
                                                      pooling=None)
    print("extracting features from training images...")
    features_train = inception.predict(x_train_inc, verbose=1)
    print("extracting features from validation images...")
    features_validation = inception.predict(x_validation_inc, verbose=1)
    print("done extracting")
    # reshape, because predict() returns an array with shape ("num samples", 1, 1, "output layer size")
    features_train = np.reshape(
        features_train, (features_train.shape[0], features_train.shape[3]))
    features_validation = np.reshape(
        features_validation,
        (features_validation.shape[0], features_validation.shape[3]))
    return features_train, features_validation
Exemple #3
0
#### Initialize App and Session ####
####################################
app = Flask(__name__)
SESSION_TYPE = 'filesystem'
app.config.from_object(__name__)
Session(app)

#############################
#### Load Trained Models ####
#############################
model_file = "Output/models/ten/LR.sav"
global graph
graph = ops.get_default_graph()
classifier = joblib.load(model_file)
feature_extractor = inception_resnet_v2.InceptionResNetV2(weights='imagenet',
                                                          include_top=False,
                                                          input_shape=(224,
                                                                       224, 3))


####################
#### App Routes ####
####################
@app.route('/')
def index():
    """Home route."""
    return render_template('index.html', requested_img={})


@app.route("/upload_image", methods=['POST', 'GET'])
def upload():
    """Reads in user input and displays uploaded image to page."""
def get_backbone_model():
    inception_resnet = inception_resnet_v2.InceptionResNetV2(
        include_top=False, weights="imagenet", input_shape=(250, 250, 3))
    output = tf.keras.layers.GlobalAveragePooling2D()(inception_resnet.output)
    base_model = tf.keras.models.Model(inception_resnet.input, output)
    return base_model
Exemple #5
0
                                              target_size=target_size,
                                              batch_size=batch_size,
                                              shuffle=True)
    return train_datagen, val_datagen


def get_scheduler(start_lr):
    def scheduler(epoch):
        lr = start_lr * np.exp(0.15 * (-epoch))
        print("lr =", lr)
        return lr

    return scheduler


transfer = inception_resnet_v2.InceptionResNetV2(include_top=False)
max_pooling = GlobalMaxPooling2D(name="max_pooling")(transfer.output)
drop_out = Dropout(0.05, name="dropout1")(max_pooling)
outputs = Dense(len(label_columns), activation="sigmoid")(drop_out)
model = Model(inputs=transfer.input, outputs=outputs)

for pipeline in pipelines:
    callback = LearningRateScheduler(get_scheduler(pipeline.start_lr))

    train_datagen, val_datagen = get_gens(pipeline.resolution,
                                          pipeline.batch_size)
    for layersN in [1, 100, 200]:
        print("resolution {} last {} layers".format(pipeline.resolution,
                                                    layersN))
        for layer in model.layers:
            layer.trainable = False
            image = preprocess_input(image)
            model = inception_v3.InceptionV3(weights='imagenet', include_top=True)
            # model.summary()

        if Model == 2:
            from tensorflow.keras.applications.resnet50 import preprocess_input

            image = preprocess_input(image)
            model = resnet50.ResNet50(weights='imagenet', include_top=True)
            # model.summary()

        if Model == 3:
            from tensorflow.keras.applications.inception_resnet_v2 import preprocess_input

            image = preprocess_input(image)
            model = inception_resnet_v2.InceptionResNetV2(weights='imagenet', include_top=True)
            # model.summary()

        if Model == 4:
            from tensorflow.keras.applications.xception import preprocess_input

            image = preprocess_input(image)
            model = xception.Xception(weights='imagenet', include_top=True)
            # model.summary()

        prediction = model.predict(image)

        from tensorflow.keras.applications.imagenet_utils import decode_predictions

        decoded_prediction = decode_predictions(prediction)
        # decoded_prediction = np.array(decoded_prediction)
Exemple #7
0
for x, y in train_ds:
    print('Shapes:', x.shape, 'and', y.shape)
    print("Labels: ", y.numpy())

    plt.figure(figsize=(16, 9))
    plot(x.numpy().astype(int), rows=4)
    plt.tight_layout()
    break

# ## Model Definition

# In[9]:

from tensorflow.keras.applications import inception_resnet_v2

encoder = inception_resnet_v2.InceptionResNetV2(
    include_top=False, pooling='avg', input_shape=Config.data.input_shape[1:])
# encoder = Model(encoder.input, encoder.get_layer('block_9_add').output)

# In[10]:


def encoder_pre(x):
    return Lambda(inception_resnet_v2.preprocess_input,
                  name='pre_incresnet')(x)


# In[11]:

from tensorflow.keras.layers import GlobalAveragePooling2D

Exemple #8
0
def get_siamese_model(name=None, input_shape=(224, 224, 3),
                      embedding_vec_size=512, not_freeze_last=2):
    """
        Model architecture
    """

    if name == "InceptionV3":

        base_model = inception_v3.InceptionV3(

            weights='imagenet', include_top=False)

        model_preprocess_input = inception_v3.preprocess_input

    if name == "InceptionResNetV2":

        base_model = inception_resnet_v2.InceptionResNetV2(

            weights='imagenet', include_top=False)

        model_preprocess_input = inception_resnet_v2.preprocess_input

    if name == "DenseNet121":

        base_model = densenet.DenseNet121(

            weights='imagenet', include_top=False)

        model_preprocess_input = densenet.preprocess_input

    if name == "DenseNet169":

        base_model = densenet.DenseNet169(

            weights='imagenet', include_top=False)

        model_preprocess_input = densenet.preprocess_input

    if name == "DenseNet201":

        base_model = densenet.DenseNet201(

            weights='imagenet', include_top=False)

        model_preprocess_input = densenet.preprocess_input

    if name == "MobileNetV2":

        base_model = mobilenet_v2.MobileNetV2(

            weights='imagenet', include_top=False)

        model_preprocess_input = mobilenet_v2.preprocess_input

    if name == "MobileNet":

        base_model = mobilenet.MobileNet(

            weights='imagenet', include_top=False)

        model_preprocess_input = mobilenet.preprocess_input

    if name == "ResNet50":

        base_model = resnet50.ResNet50(

            weights='imagenet', include_top=False)

        model_preprocess_input = resnet50.preprocess_input

    if name == "VGG16":

        base_model = vgg16.VGG16(

            weights='imagenet', include_top=False)

        model_preprocess_input = vgg16.preprocess_input

    if name == "VGG19":

        base_model = vgg19.VGG19(

            weights='imagenet', include_top=False)

        model_preprocess_input = vgg19.preprocess_input

    if name == "Xception":

        base_model = xception.Xception(

            weights='imagenet', include_top=False)

        model_preprocess_input = xception.preprocess_input

    # Verifica se existe base_model

    if 'base_model' not in locals():

        return ["InceptionV3", "InceptionResNetV2",

                "DenseNet121", "DenseNet169", "DenseNet201",

                "MobileNetV2", "MobileNet",

                "ResNet50",

                "VGG16", "VGG19",

                "Xception"

                ]

    # desativando treinamento

    for layer in base_model.layers[:-not_freeze_last]:

        layer.trainable = False

    x = base_model.layers[-1].output

    x = GlobalAveragePooling2D()(x)

    x = Dense(

        embedding_vec_size,

        activation='linear',  # sigmoid? relu?

        name='embedding',

        use_bias=False

    )(x)

    model = Model(

        inputs=base_model.input,

        outputs=x

    )

    left_input = Input(input_shape)

    right_input = Input(input_shape)

    # Generate the encodings (feature vectors) for the two images

    encoded_l = model(left_input)

    encoded_r = model(right_input)

    # Add a customized layer to compute the absolute difference between the encodings

    L1_layer = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1]))

    L1_distance = L1_layer([encoded_l, encoded_r])

    # Add a dense layer with a sigmoid unit to generate the similarity score

    prediction = Dense(

        1,

        activation=Activation(gaussian),

        use_bias=False,

        kernel_constraint=NonNeg()

    )(L1_distance)

    # Connect the inputs with the outputs

    siamese_net = Model(

        inputs=[left_input, right_input],

        outputs=prediction

    )

    return {

        "model": siamese_net,

        "preprocess_input": model_preprocess_input

    }
    def __init__(
            self,
            cls: keras_hierarchicalclassification.KerasHierarchicalClassifier):
        self.cls = cls

        with configuration.ConfigurationContext("KerasIncrementalModel"):
            # Preprocessing
            self.random_crop_to_size = configuration.get(
                "random_crop_to_size", None)
            _channel_mean = configuration.get("channel_mean",
                                              [127.5, 127.5, 127.5])
            self.channel_mean_normalized = np.array(_channel_mean) / 255.0
            _channel_stddev = configuration.get("channel_stddev",
                                                [127.5, 127.5, 127.5])
            self.channel_stddev_normalized = np.array(_channel_stddev) / 255.0

            # Batch size
            self.batchsize_max = configuration.get("batchsize_max", 256)
            self.batchsize_min = configuration.get("batchsize_min", 1)
            self.sequential_training_batches = configuration.get(
                "sequential_training_batches", 1)
            self.autobs_vram = configuration.get(
                "autobs_vram", configuration.get_system("gpu0_vram"))

            # Fine-tuning options
            self.do_train_feature_extractor = configuration.get(
                "train_feature_extractor", False)
            self.use_pretrained_weights = configuration.get(
                "use_pretrained_weights", "ILSVRC2012")

            # Architecture
            self.architecture = configuration.get("architecture",
                                                  "keras::ResNet50V2")

            # Optimization and regularization
            self.l2_regularization = configuration.get("l2_regularization",
                                                       5e-5)
            self.optimizer_name = configuration.get("optimizer", "adam")
            if self.optimizer_name == "sgd":
                self.sgd_momentum = configuration.get("sgd_momentum", 0.9)
            self.lr_schedule_cfg = configuration.get("lr_schedule", {
                "name": "constant",
                "config": {
                    "initial_lr": 0.003
                }
            })
            self.lr_schedule = keras_learningrateschedule.get(
                self.lr_schedule_cfg)

        if self.architecture == "keras::ResNet50V2":
            self.feature_extractor = resnet_v2.ResNet50V2(
                include_top=False,
                input_tensor=None,
                input_shape=None,
                pooling="avg",
                weights="imagenet"
                if self.use_pretrained_weights == "ILSVRC2012" else None,
            )
            self.pixels_per_gb = 1100000

            self._add_regularizers()

        elif self.architecture == "keras::InceptionResNetV2":
            self.feature_extractor = inception_resnet_v2.InceptionResNetV2(
                include_top=False,
                input_tensor=None,
                input_shape=None,
                pooling="avg",
                weights="imagenet"
                if self.use_pretrained_weights == "ILSVRC2012" else None,
            )
            self.pixels_per_gb = 700000

            self._add_regularizers()

        elif self.architecture == "keras::MobileNetV2":
            with configuration.ConfigurationContext("KerasIncrementalModel"):
                self.side_length = configuration.get("side_length",
                                                     no_default=True)
            self.feature_extractor = mobilenet_v2.MobileNetV2(
                include_top=False,
                input_tensor=None,
                input_shape=(self.side_length, self.side_length, 3),
                pooling="avg",
                weights="imagenet"
                if self.use_pretrained_weights == "ILSVRC2012" else None,
            )
            self.pixels_per_gb = 2000000

            self._add_regularizers()

        elif self.architecture == "keras::NASNetMobile":
            with configuration.ConfigurationContext("KerasIncrementalModel"):
                self.side_length = configuration.get("side_length",
                                                     no_default=True)
            self.feature_extractor = nasnet.NASNetMobile(
                include_top=False,
                input_tensor=None,
                input_shape=(self.side_length, self.side_length, 3),
                pooling="avg",
                weights="imagenet"
                if self.use_pretrained_weights == "ILSVRC2012" else None,
            )
            self.pixels_per_gb = 1350000

            self._add_regularizers()

        elif self.architecture == "keras::CIFAR-ResNet56":
            assert (self.do_train_feature_extractor
                    ), "There are no pretrained weights for this architecture!"
            assert (self.use_pretrained_weights is None
                    ), "There are no pretrained weights for this architecture!"

            from chia.methods.common import keras_cifar_resnet

            self.feature_extractor = keras_cifar_resnet.feature_extractor(
                version=2, n=6, l2_norm=self.l2_regularization)
            self.pixels_per_gb = 200000

        else:
            raise ValueError(f'Unknown architecture "{self.architecture}"')

        if self.optimizer_name == "adam":
            self.optimizer = tf.keras.optimizers.Adam(self.lr_schedule(0))
        else:
            self.optimizer = tf.keras.optimizers.SGD(
                learning_rate=self.lr_schedule(0), momentum=self.sgd_momentum)
        self.augmentation = keras_dataaugmentation.KerasDataAugmentation()

        if (self.use_pretrained_weights is not None
                and self.use_pretrained_weights != "ILSVRC2012"):
            print(
                f"Loading alternative pretrained weights {self.use_pretrained_weights}"
            )
            self.feature_extractor.load_weights(self.use_pretrained_weights)

        if not self.do_train_feature_extractor:
            for layer in self.feature_extractor.layers:
                layer.trainable = False

        self.reported_auto_bs = False

        # State here
        self.current_step = 0
Exemple #10
0
from tensorflow.keras.layers import Input , Dense, Flatten, Dropout, Conv2D, MaxPool2D

from tensorflow.keras.applications import resnet_v2, inception_resnet_v2

import tensorflow_datasets as tfds
from tensorflow.keras.callbacks import EarlyStopping

from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.keras.metrics import TopKCategoricalAccuracy



### HERE WE JUST PREPARE THE DS THE SAME WAY WE DID BEFORE

dataset, info = tfds.load(name="stanford_dogs", with_info=True)
resnet_inception = inception_resnet_v2.InceptionResNetV2()

IMG_LEN = 299
IMG_SHAPE = (IMG_LEN, IMG_LEN,3)
N_BREEDS = 120

training_data = dataset['train']
test_data = dataset['test']

def preprocess(ds_row):
  
    image = tf.image.convert_image_dtype(ds_row['image'], dtype=tf.float32)
    image = tf.image.resize(image, (IMG_LEN, IMG_LEN), method='nearest')
  
    label = tf.one_hot(ds_row['label'],N_BREEDS)
Exemple #11
0
    def set_model(self, model_name, top_n=5):
        if model_name == 'densenet':
            self.model = densenet.DenseNet121(include_top=True,
                                              weights='imagenet',
                                              input_tensor=None,
                                              input_shape=None,
                                              pooling=None,
                                              classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: densenet.decode_predictions(x, top=top_n)
            self.ref = """
                <ul>
                <li><a href='https://arxiv.org/abs/1608.06993' target='_blank'>
                Densely Connected Convolutional Networks</a> (CVPR 2017 Best Paper Award)</li>
                </ul>
                """

        elif model_name == 'inception_resnet_v2':
            self.model = inception_resnet_v2.InceptionResNetV2(
                include_top=True,
                weights='imagenet',
                input_tensor=None,
                input_shape=None,
                pooling=None,
                classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: inception_resnet_v2.decode_predictions(
                x, top=top_n)
            self.ref = """
                <ul>
                <li><a href='https://arxiv.org/abs/1602.07261' target='_blank'>
                Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning</a></li>
                </ul>
                """

        elif model_name == 'inception_v3':
            self.model = inception_v3.InceptionV3(include_top=True,
                                                  weights='imagenet',
                                                  input_tensor=None,
                                                  input_shape=None,
                                                  pooling=None,
                                                  classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: inception_v3.decode_predictions(x,
                                                                     top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1512.00567' target='_blank'>
                Rethinking the Inception Architecture for Computer Vision</a></li>
                </ul>
                """

        elif model_name == 'mobilenet':
            self.model = mobilenet.MobileNet(input_shape=None,
                                             alpha=1.0,
                                             depth_multiplier=1,
                                             dropout=1e-3,
                                             include_top=True,
                                             weights='imagenet',
                                             input_tensor=None,
                                             pooling=None,
                                             classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: mobilenet.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1704.04861' target='_blank'>
                MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications</a></li>
                </ul>
                """

        elif model_name == 'mobilenet_v2':
            self.model = mobilenet_v2.MobileNetV2(input_shape=None,
                                                  alpha=1.0,
                                                  include_top=True,
                                                  weights='imagenet',
                                                  input_tensor=None,
                                                  pooling=None,
                                                  classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: mobilenet_v2.decode_predictions(x,
                                                                     top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1801.04381' target='_blank'>
                MobileNetV2: Inverted Residuals and Linear Bottlenecks</a></li>
                </ul>
                """

        elif model_name == 'nasnet':
            self.model = nasnet.NASNetLarge(input_shape=None,
                                            include_top=True,
                                            weights='imagenet',
                                            input_tensor=None,
                                            pooling=None,
                                            classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: nasnet.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1707.07012' target='_blank'>
                Learning Transferable Architectures for Scalable Image Recognition</a></li>
                </ul>
                """

        elif model_name == 'resnet50':
            self.model = resnet50.ResNet50(include_top=True,
                                           weights='imagenet',
                                           input_tensor=None,
                                           input_shape=None,
                                           pooling=None,
                                           classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: resnet50.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li>ResNet : 
                <a href='https://arxiv.org/abs/1512.03385' target='_blank'>Deep Residual Learning for Image Recognition
                </a></li>
                </ul>
                """

        elif model_name == 'vgg16':
            self.model = vgg16.VGG16(include_top=True,
                                     weights='imagenet',
                                     input_tensor=None,
                                     input_shape=None,
                                     pooling=None,
                                     classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: vgg16.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1409.1556' target='_blank'>
            Very Deep Convolutional Networks for Large-Scale Image Recognition</a></li>
            </ul>"""

        elif model_name == 'vgg19':
            self.model = vgg19.VGG19(include_top=True,
                                     weights='imagenet',
                                     input_tensor=None,
                                     input_shape=None,
                                     pooling=None,
                                     classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: vgg19.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1409.1556' target='_blank'>Very Deep Convolutional Networks for Large-Scale Image Recognition</a></li>
            </ul>"""

        elif model_name == 'xception':
            self.model = xception.Xception(include_top=True,
                                           weights='imagenet',
                                           input_tensor=None,
                                           input_shape=None,
                                           pooling=None,
                                           classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: xception.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1610.02357' target='_blank'>Xception: Deep Learning with Depthwise Separable Convolutions</a></li>
            </ul>"""

        else:
            logger.ERROR('There has no model name !!!')
Exemple #12
0
# Class #10 = otheractivities
# Class #11 = pplnoactivity
# Class #12 = rock climbing
# Class #13 = swimming
# Class #14 = trailrunning

##### build our classifier model based on pre-trained InceptionResNetV2:

# Load the base pre-trained model

# do not include the top fully-connected layer
# 1. we don't include the top (fully connected) layers of InceptionResNetV2

model = inception_resnet_v2.InceptionResNetV2(include_top=False,
                                              weights='imagenet',
                                              input_tensor=None,
                                              input_shape=(img_width,
                                                           img_height, 3))
# Freeze the layers which you don't want to train. Here I am freezing the all layers.
# i.e. freeze all InceptionV3 layers
# model.aux_logits=False

# New dataset is small and similar to original dataset:
# There is a problem of over-fitting, if we try to train the entire network. Since the data is similar to the original data, we expect higher-level features in the ConvNet to be relevant to this dataset as well. Hence, the best idea might be to train a linear classifier on the CNN codes.
# So lets freeze all the layers and train only the classifier

# first: train only the top layers

# for layer in net_final.layers[:FREEZE_LAYERS]:
#     layer.trainable = False
# for layer in net_final.layers[FREEZE_LAYERS:]: