Exemplo n.º 1
0
def define_model(config, dset):
    # Set session
    configuration = tf.ConfigProto()
    configuration.gpu_options.visible_device_list = config["train"]["gpus"]
    configuration.gpu_options.allow_growth = True
    sess = tf.Session(config=configuration)
    K.set_session(sess)

    # Load InceptionResnetV2 base architecture
    if config['train']["pretrained"]:
        weights = None
        input_tensor = Input(
            (
                config['train']["sampling"]["box_size"],  # height
                config['train']["sampling"]["box_size"],  # width
                config['prepare']['images']["channel_repeats"]  # channels
            ),
            name='input')
        base = inception_resnet_v2.InceptionResNetV2(include_top=True,
                                                     input_tensor=input_tensor)
        base.get_layer(index=-2).name = "global_{}_pool".format(
            config["train"]["model"]["params"]["pooling"])
        # Define model
        model = base
    else:
        weights = None
        input_tensor = Input(
            (
                config["train"]["sampling"]["box_size"],  # height
                config["train"]["sampling"]["box_size"],  # width
                len(config['prepare']['images']["channels"])  # channels
            ),
            name='input')
        base = inception_resnet_v2.InceptionResNetV2(
            include_top=False,
            weights=weights,
            input_tensor=input_tensor,
            pooling=config["train"]["model"]["params"]["pooling"],
            classes=dset.targets[0].shape[1])
        base.get_layer(index=-1).name = "global_{}_pool".format(
            config["train"]["model"]["params"]["pooling"])
        # Create output embedding for each target
        class_outputs = []
        i = 0
        for t in dset.targets:
            y = Dense(t.shape[1], activation="softmax",
                      name=t.field_name)(base.output)
            class_outputs.append(y)
            i += 1
        # Define model
        model = Model(input_tensor, class_outputs)

    # Define optimizer and loss
    optimizer = Adam(lr=config['train']['model']['params']['learning_rate'])
    loss = 'categorical_crossentropy'

    return model, optimizer, loss
def build_bbx_model():

    #    cnn = resnet50.ResNet50(weights='imagenet', include_top=True)

    #    cnn = inception_v3.InceptionV3(weights='imagenet',
    #                    input_shape = (img_h, img_w, 3),
    #                    include_top=False,
    #                    pooling='avg')

    cnn = inception_resnet_v2.InceptionResNetV2(weights='imagenet',
                                                input_shape=(img_h, img_w, 3),
                                                include_top=False,
                                                pooling='avg')

    for layer in cnn.layers:
        layer.trainable = True

    cnn.trainable = True
    x = cnn.output
    x = Dropout(drop_out_ratio)(x)
    x = Dense(1024, activation='relu')(x)
    x = Dense(512, activation='relu')(x)
    x = Dropout(drop_out_ratio)(x)
    x = Dense(256, activation='relu')(x)
    x = Dense(64, activation='relu')(x)
    Output = Dense(8, activation='linear')(x)
    model = Model(cnn.input, Output)

    return model
Exemplo n.º 3
0
    def __init__(self, model, input_size):

        input_shape = (input_size, input_size, 3)

        if model == 'xception':
            base_model = xception.Xception(weights='imagenet',
                                           include_top=False,
                                           pooling='max',
                                           input_shape=input_shape)
        elif model == 'vgg16':
            base_model = vgg16.VGG16(weights='imagenet',
                                     include_top=False,
                                     pooling='max',
                                     input_shape=input_shape)
        elif model == 'vgg19':
            base_model = vgg19.VGG19(weights='imagenet',
                                     include_top=False,
                                     pooling='max',
                                     input_shape=input_shape)
        elif model == 'inception_v3':
            base_model = inception_v3.InceptionV3(weights='imagenet',
                                                  include_top=False,
                                                  pooling='max',
                                                  input_shape=input_shape)
        elif model == 'mobilenet':
            base_model = mobilenet.MobileNet(weights='imagenet',
                                             include_top=False,
                                             pooling='max',
                                             input_shape=input_shape)
        elif model == 'inception_resnet_v2':
            base_model = inception_resnet_v2.InceptionResNetV2(
                weights='imagenet',
                include_top=False,
                pooling='max',
                input_shape=input_shape)
        elif model == 'resnet50':
            base_model = resnet50.ResNet50(weights='imagenet',
                                           include_top=False,
                                           pooling='max',
                                           input_shape=input_shape)
        elif model == 'nasnetlarge':
            base_model = nasnet.NASNetLarge(weights='imagenet',
                                            include_top=False,
                                            pooling='max',
                                            input_shape=input_shape)
        else:
            base_model = nasnet.NASNetMobile(weights='imagenet',
                                             include_top=False,
                                             pooling='max',
                                             input_shape=input_shape)

        self.input_size = input_size
        self.model = base_model
        self.graph = tf.get_default_graph()
        base_model.summary()
Exemplo n.º 4
0
def get_extractor(layer='conv_7b'):
    """Returns pretrained InceptionResNetV2 model from bottom
    to choosen layer (conv_7b by default).
    
    Returns:
        keras.Model
    """
    base_model = inception_resnet_v2.InceptionResNetV2(include_top=False)
    extractor = Model(inputs=base_model.inputs,
                      outputs=base_model.get_layer(layer).output)
    return extractor
Exemplo n.º 5
0
def define_model(config, dset):

    # Load InceptionResnetV2 base architecture
    if config["profile"]["use_pretrained_input_size"]:
        input_tensor = Input((299, 299, 3), name="input")
        model = inception_resnet_v2.InceptionResNetV2(
            include_top=True,
            input_tensor=input_tensor,
            weights='imagenet',
            pooling="avg")
        model.summary()
    else:
        input_tensor = Input(
            (
                config["dataset"]["locations"]["box_size"],  # height
                config["dataset"]["locations"]["box_size"],  # width
                len(config["dataset"]["images"]["channels"])  # channels
            ),
            name="input")
        base = inception_resnet_v2.InceptionResNetV2(
            include_top=False,
            weights=None,
            input_tensor=input_tensor,
            pooling="avg",
            classes=dset.targets[0].shape[1])
        # Create output embedding for each target
        class_outputs = []
        i = 0
        for t in dset.targets:
            y = Dense(t.shape[1], activation="softmax",
                      name=t.field_name)(base.output)
            class_outputs.append(y)
            i += 1
        # Define model
        model = Model(input_tensor, class_outputs)

    # Define optimizer and loss
    optimizer = Adam(lr=config["train"]["model"]["params"]["learning_rate"])
    loss = "categorical_crossentropy"

    return model, optimizer, loss
def resnet_embedding(batch):
    resnet = inception_resnet_v2.InceptionResNetV2(include_top=True,
                                                   weights='imagenet')
    resnet.graph = tf.get_default_graph()
    batch_resized = []
    for i in batch:
        i = resize(i, (299, 299, 3), mode='constant')
        batch_resized.append(i)
    batch_resized = np.array(batch_resized, dtype=np.float16)
    batch_resized = inception_resnet_v2.preprocess_input(batch_resized)
    with resnet.graph.as_default():
        embed = resnet.predict(batch_resized)
    return embed
Exemplo n.º 7
0
 def inception_resnet_v2_classificator(self, image_path):
     inception_resnet_v2_model = inception_resnet_v2.InceptionResNetV2(
         weights='imagenet')
     filename = image_path
     original = load_img(filename, target_size=(WIDTH, HEIGHT))
     plt.imshow(original)
     numpy_image = img_to_array(original)
     plt.imshow(np.uint8(numpy_image))
     image_batch = np.expand_dims(numpy_image, axis=0)
     plt.imshow(np.uint8(image_batch[0]))
     processed_image = inception_resnet_v2.preprocess_input(
         image_batch.copy())
     predictions = inception_resnet_v2_model.predict(processed_image)
     label = decode_predictions(predictions)
     return sorted(label[0], key=lambda x: x[2], reverse=True)
Exemplo n.º 8
0
def incepres2(input_image, **kwargs):
    from keras.applications import inception_resnet_v2
    model = inception_resnet_v2.InceptionResNetV2(input_tensor=input_image,
                                                  include_top=False,
                                                  **kwargs)
    print(model.summary())
    return [
        KL.ZeroPadding2D(p)(model.get_layer(name=n).output)
        for p, n in [(((1, 1), (1, 1)),
                      'conv2d_1'), (((1, 2), (1, 2)), 'max_pooling2d_1'
                                    ), (((1, 2), (2, 1)), 'max_pooling2d_2'),
                     (((1, 1), (1, 1)),
                      'max_pooling2d_3'), (((1, 1), (1, 1)),
                                           'max_pooling2d_4')]
    ]
Exemplo n.º 9
0
    def load_imagenet_model(self):
        """
        Initialize the pre-trained model architecture and load the model weights.
        The downloaded weights contains only the convolution base. It does not
        contain the top two dense layers. We will have to manually define the top
        two dense layers. The size_dict dictionary object will hold the input sizes
        for various models, which will be further used to train the respective models
        with the given input image dimensions.
        
        Arguments:                    
    
            -model_name : Name of the model, for example - vgg16, inception_v3, resnet50 etc
    
        """

        if (self.input_params['model_name'] == "vgg16"):
            base_model = vgg16.VGG16(weights=None, include_top=False)
            base_model.load_weights(
                self.path_dict["weights_path"] +
                "vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5")
        elif (self.input_params['model_name'] == "inceptionv3"):
            base_model = inception_v3.InceptionV3(weights=None,
                                                  include_top=False)
            base_model.load_weights(
                self.path_dict["weights_path"] +
                "inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5")
        elif (self.input_params['model_name'] == "resnet50"):
            base_model = resnet.ResNet50(weights=None, include_top=False)
            base_model.load_weights(
                self.path_dict["weights_path"] +
                "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5")
        elif (self.input_params['model_name'] == "inception_resnet"):
            base_model = inception_resnet_v2.InceptionResNetV2(
                weights=None, include_top=False)
            base_model.load_weights(
                self.path_dict["weights_path"] +
                "inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5"
            )
        elif (self.input_params['model_name'] == "nasnet"):
            base_model = nasnet.NASNetLarge(weights=None, include_top=False)
            base_model.load_weights(self.path_dict["weights_path"] +
                                    "NASNet-large-no-top.h5")
        elif (self.input_params['model_name'] == "xception"):
            base_model = xception.Xception(weights=None, include_top=False)
            base_model.load_weights(
                self.path_dict["weights_path"] +
                "xception_weights_tf_dim_ordering_tf_kernels.h5")
        return base_model
Exemplo n.º 10
0
def build_inceptionvresnetv2_model():
    # get the model without the denses
    base_model = inceptionresnetv2.InceptionResNetV2(weights='imagenet',
                                                     include_top='false')
    new_dense = base_model.output
    # add the new denses to classify the hate images
    new_dense = Dense(1024, activation='relu')(new_dense)
    predictions = Dense(2, activation='softmax')(new_dense)
    model = Model(inputs=base_model.input, outputs=predictions)
    # we will only train the new denses for the baseline
    for layer in base_model.layers:
        layer.trainable = False
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=["accuracy"])
    return model
Exemplo n.º 11
0
def create_model(img_size, model_type, base_name):

    if model_type == 0:
        print("Creating MobileNet model")
        base = mobilenet.MobileNet(input_shape=img_size,
                                   include_top=False,
                                   weights='imagenet')

    elif model_type == 1:
        print("Creating InceptionV3 model")
        base = inception_v3.InceptionV3(input_shape=img_size,
                                        include_top=False,
                                        weights='imagenet')

    elif model_type == 2:
        print("Creating Resnet50 model")
        base = resnet50.ResNet50(input_shape=img_size,
                                 include_top=False,
                                 weights='imagenet')

    elif model_type == 3:
        print("Creating InceptionResNet-V2 model")
        base = inception_resnet_v2.InceptionResNetV2(input_shape=img_size,
                                                     include_top=False,
                                                     weights='imagenet')

    top = base.output
    top = GlobalAveragePooling2D()(top)

    top = Dense(units=2048,
                activation='relu',
                kernel_regularizer=None,
                name='fc_1')(top)
    predictions = Dense(units=n_classes,
                        activation='softmax',
                        kernel_regularizer=l2(l=wd),
                        name='softmax')(top)

    model_combined = Model(inputs=base.input,
                           outputs=predictions,
                           name=base_name)

    path_to_weights = 'weights/' + weights_filename
    model_combined.load_weights(filepath=path_to_weights, by_name=True)
    print('Loading weights from ' + path_to_weights)

    return model_combined
Exemplo n.º 12
0
def load_dcn_model(model_name = 'VGG16'):
    """
    Returns a pretrained keras DCN model. For each model we return complete activation (last layer)
    and intermediate activation (penultimate layer).
    model_name can be:
        - VGG16
        - VGG19
        - xception
        - inception_resnet_v2
        - resnet50
        - inceptionV3
        - densenet
    """

    print "Loading model "+model_name

    if(model_name == 'VGG16'):
        model = vgg16.VGG16(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
        int_model = Model(inputs=model.input, outputs = model.get_layer('fc2').output)
    elif(model_name == 'VGG19'):
        model = vgg19.VGG19(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
        int_model = Model(inputs=model.input, outputs = model.get_layer('fc2').output)
    elif(model_name == 'xception'):
        model = xception.Xception(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
        int_model = Model(inputs=model.input, outputs = model.get_layer('avg_pool').output)
    elif(model_name == 'inception_resnet_v2'):
        model = inception_resnet_v2.InceptionResNetV2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
        int_model = Model(inputs=model.input, outputs = model.get_layer('avg_pool').output)
    elif(model_name == 'resnet50'):
        model = resnet50.ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
        int_model = Model(inputs=model.input, outputs = model.get_layer('flatten_1').output)
    elif(model_name == 'inceptionV3'):
        model = inception_v3.InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
        int_model = Model(inputs=model.input, outputs = model.get_layer('avg_pool').output)
    elif(model_name == 'densenet'):
        model = densenet.DenseNet121(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
        int_model = Model(inputs=model.input, outputs = model.get_layer('avg_pool').output)
    else:
        print "Model not found."
        return None
    
    print "Model loaded."

    return (model, int_model)
Exemplo n.º 13
0
def main(args):
    
    file_trainval = 'Paintings_Classification_trainval.csv'
    file_test = 'Paintings_Classification_test.csv'
    demonet = 'inception_resnet_v2'
    if demonet == 'inception_resnet_v2':
        base_model = inception_resnet_v2.InceptionResNetV2(include_top=False, weights='imagenet', input_tensor=None, input_shape=None, pooling=None)
    print('Model based loaded')
    outputs = Dense(10, activation='sigmoid')(base_model.output)
    model = Model(base_model.inputs, outputs)
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    batch_size= 16
    print('Before compiling')
    model.compile(loss='binary_crossentropy', optimizer=sgd,metrics=[metrics.categorical_accuracy, average_precision])
    print('model compiled')
    steps_per_epoch = 5000 / batch_size
    model.fit_generator(imageLoader(file_trainval,batch_size),steps_per_epoch=steps_per_epoch, epochs=20,validation_data=imageLoader(file_test,batch_size))
    print('End training')
    return 0
Exemplo n.º 14
0
def get_model_v2():
    baseModel = inception_resnet_v2.InceptionResNetV2(
        include_top=False,
        weights='imagenet',
        input_tensor=Input(shape=(SIZEINPUT, SIZEINPUT, 3)))
    for layer in baseModel.layers:
        layer.trainable = False

    features = baseModel.output
    x = GlobalAveragePooling2D(name="avg_pool")(features)
    #x = Flatten()(x) #注意:使用GlobalAveragePooling2D时,输出即为2D Tensor,无需再用Flatten
    x = Dense(12, activation='softmax')(x)
    x = Dropout(0.5)(x)
    model = Model(inputs=baseModel.inputs, outputs=x)

    sgd = SGD(lr=LR, momentum=0.9)
    model.compile(optimizer=sgd,
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    return model
Exemplo n.º 15
0
def create_base_model(input_shape, base_id):

    # Inception-v3
    if base_id is 0:
        base = inception_v3.InceptionV3(input_shape=input_shape,
                                        weights='imagenet',
                                        include_top=False)
        base_name = 'Inception-V3'

    # MobileNet
    elif base_id is 1:
        base = mobilenet.MobileNet(input_shape=input_shape,
                                   weights='imagenet',
                                   include_top=False)
        base_name = 'MobileNet'

    #Inception-ResNet-v2
    elif base_id is 2:
        base = inception_resnet_v2.InceptionResNetV2(input_shape=input_shape,
                                                     weights='imagenet',
                                                     include_top=False)
        base_name = 'InceptionResNet-v2'

    #ResNet50
    elif base_id is 3:
        base = resnet50.ResNet50(input_shape=input_shape,
                                 weights='imagenet',
                                 include_top=False)
        base_name = 'ResNet50'

    print("\nBase Network: %s" % base_name)

    top = GlobalAveragePooling2D()(base.output)

    # freeze all layers in the base network
    for layers in base.layers:
        layers.trainable = False

    model = Model(inputs=base.input, outputs=top, name='base_model')

    return model
Exemplo n.º 16
0
def create_model(feature_extraction_method, num_classes, path_cnn_pre_trained,
                 input_size):

    if (feature_extraction_method == 'fine_tuning_lenet'):
        model = load_model(path_cnn_pre_trained)
        input_image = input_size
    if (feature_extraction_method == 'fine_tuning_vgg16'):
        model = vgg16.VGG16(weights='imagenet', include_top=True)
        #layer_name = 'fc2'
        input_image = 224
    elif (feature_extraction_method == 'fine_tuning_vgg19'):
        model = vgg19.VGG19(weights='imagenet', include_top=True)
        #layer_name = 'fc2'
        input_image = 224
    elif (feature_extraction_method == 'fine_tuning_xception'):
        model = xception.Xception(weights='imagenet', include_top=True)
        #layer_name = 'avg_pool'
        input_image = 299
    elif (feature_extraction_method == 'fine_tuning_resnet'):
        model = resnet.ResNet50(weights='imagenet', include_top=True)
        #layer_name = 'avg_pool'
        input_image = 224
    elif (feature_extraction_method == 'fine_tuning_inception_resnet'):
        model = inception_resnet.InceptionResNetV2(weights='imagenet',
                                                   include_top=True)
        #layer_name = 'avg_pool'
        input_image = 299
    elif (feature_extraction_method == 'fine_tuning_nasnet'):
        model = nasnet.NASNetLarge(weights='imagenet', include_top=True)
        #layer_name = 'global_average_pooling2d_1'
        input_image = 331

    #Removing the last layer
    model.layers.pop()
    new_layer = Dense(num_classes, activation='softmax', name='predictions')
    model = Model(model.input, new_layer(model.layers[-1].output))

    model.summary()

    return model, input_image
def build_model_Inception_Resnet():

    inception_resnet = inception_resnet_v2.InceptionResNetV2(
    weights='imagenet',
    include_top=False,
    input_shape=(224,224,3)
    )

    model = Sequential()
    model.add(inception_resnet)
    model.add(layers.GlobalMaxPooling2D())
    model.add(layers.Dropout(0.5))
    
    model.add(layers.Dense(2, activation='sigmoid'))
    
    model.compile(
        loss='categorical_crossentropy',
        optimizer=Adam(lr=0.0005),
        metrics=['accuracy']
    )
    
    return model
Exemplo n.º 18
0
def create_model(feature_extraction_method, path_cnn_pre_trained, input_size):

    if (feature_extraction_method == 'pretrained_lenet'):
        model = load_model(path_cnn_pre_trained)
        input_image = input_size
    elif (feature_extraction_method == 'pretrained_vgg16'):
        model = vgg16.VGG16(weights='imagenet', include_top=True)
        #layer_name = 'fc2'
        input_image = 224
    elif (feature_extraction_method == 'pretrained_vgg19'):
        model = vgg19.VGG19(weights='imagenet', include_top=True)
        #layer_name = 'fc2'
        input_image = 224
    elif (feature_extraction_method == 'pretrained_xception'):
        model = xception.Xception(weights='imagenet', include_top=True)
        #layer_name = 'avg_pool'
        input_image = 299
    elif (feature_extraction_method == 'pretrained_resnet'):
        model = resnet.ResNet50(weights='imagenet', include_top=True)
        #layer_name = 'avg_pool'
        input_image = 224
    elif (feature_extraction_method == 'pretrained_inception_resnet'):
        model = inception_resnet.InceptionResNetV2(weights='imagenet',
                                                   include_top=True)
        #layer_name = 'avg_pool'
        input_image = 299
    elif (feature_extraction_method == 'pretrained_nasnet'):
        model = nasnet.NASNetLarge(weights='imagenet', include_top=True)
        #layer_name = 'global_average_pooling2d_1'
        input_image = 331

    intermediate_layer_model = Model(inputs=model.input,
                                     outputs=model.layers[-2].output)

    model.summary()

    return intermediate_layer_model, input_image
Exemplo n.º 19
0
def inception_res(input_shape=(224, 224, 3), freeze=0):
    model = inception_resnet_v2.InceptionResNetV2(weights='imagenet',
                                                  input_shape=input_shape,
                                                  include_top=False)
    return model
Exemplo n.º 20
0
#     classlabel.append(CLASS_INDEX[str(i)][1])
# classes = np.array(classlabel)

num_classes = len(classes)

##### Predict

# Load the retrained CNN model

# Model reconstruction from JSON file
# with open(model_json, 'r') as f:
#    model_trained = model_from_json(f.read())

model_trained = inception_resnet_v2.InceptionResNetV2(include_top=False,
                                                      weights='imagenet',
                                                      input_tensor=None,
                                                      input_shape=(img_width,
                                                                   img_height,
                                                                   3))
x = model_trained.output
x = GlobalAveragePooling2D()(x)  # before dense layer
x = Dense(1024, activation='relu')(x)
predictions_new = Dense(num_classes, activation='softmax', name='softmax')(x)
model_trained = Model(inputs=model_trained.input, outputs=predictions_new)

# Load weights into the new model
model_trained.load_weights(trainedweights_name)

# model_final = multi_gpu_model(model_final, gpus=2, cpu_merge=True, cpu_relocation=False)

# https://www.kaggle.com/giuseppemerendino/deep-mushroom-keras-t-sne
Exemplo n.º 21
0
try:
    _create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
    # Legacy Python that doesn't verify HTTPS certificates by default
    pass
else:
    # Handle target environment that doesn't support HTTPS verification
    ssl._create_default_https_context = _create_unverified_https_context



#Load the Inception_V3 model
# inception_v3_model = inception_v3.InceptionV3(weights='imagenet')

#Load the Inception_V4_resnetv2 model
inceptionResnet_v2_model = inception_resnet_v2.InceptionResNetV2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
# inceptionResnet_v2_model = inception_resnet_v2.InceptionResNetV2( weights='imagenet')
# nasnet_model = nasnet.NASNetLarge(input_shape=None, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
# xception_model = xception.Xception(include_top=True, weights='imagenet')


from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.imagenet_utils import decode_predictions
import matplotlib.pyplot as plt
%matplotlib osx
# or qt5.
# PyQt5 or similar required.. (https://stackoverflow.com/questions/52346254/importerror-failed-to-import-any-qt-binding-python-tensorflow)


Exemplo n.º 22
0
        img = image.load_img(img_path, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
    return


x_train, y_train = load_data(
    '/media/tang/code/tianchi/data/trainset',
    '/media/tang/code/tianchi/data/trainset/Annotations/label.csv')

width = 299

base_model = inception_resnet_v2.InceptionResNetV2(weights='imagenet',
                                                   input_shape=(width, width,
                                                                3),
                                                   include_top=False,
                                                   pooling='avg')

input_tensor = Input((width, width, 3))
x = input_tensor
x = Lambda(inception_resnet_v2.preprocess_input)(x)
x = base_model(x)
x = Dropout(0.5)(x)
x = [
    Dense(count, activation='softmax', name=name)(x)
    for name, count in label_count.items()
]

model = Model(input_tensor, x)
model.compile(optimizer='adam',
Exemplo n.º 23
0
def main():
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("train_class", type=str, choices=classes.keys())
    parser.add_argument("data_dir", type=str)
    parser.add_argument('-e', "--epoch",
                        required=False,
                        type=int,
                        default=64,
                        dest="epoch")
    parser.add_argument('-ef', "--epoch-fine-tune",
                        required=False,
                        type=int,
                        default=200,
                        dest="epoch_fine_tune")
    parser.add_argument('-b', '--batch-size',
                        required=False,
                        default=1024,
                        type=int,
                        dest="batch")
    parser.add_argument('-lr', '--learning-rate',
                        required=False,
                        default=1e-4,
                        type=float,
                        dest="lr")
    parser.add_argument('-decay', '--learning-rate-decay',
                        required=False,
                        default=1e-6,
                        type=float,
                        dest="decay")
    parser.add_argument('-ignore-npz', '--ignore-precomputed-learning-file',
                        required=False,
                        default=False,
                        type=bool,
                        dest="ignore_npz")
    parser.add_argument('-ri', '--use-random-weight-initialisation',
                        required=False,
                        default=False,
                        type=bool,
                        dest="random_init")
    parser.add_argument('-ua', '--unfroze-all-convolution-layer-directly',
                        required=False,
                        default=False,
                        type=bool,
                        dest="unfroze_all")
    parser.add_argument('-m', '--model-name',
                        required=False,
                        default="MobileNetV2",
                        type=str,
                        dest="model_name")
    parser.add_argument('-d', '--dense-layer-size',
                        required=False,
                        nargs="*",
                        default=[],
                        type=int,
                        dest="dense_size")
    parser.add_argument('-is', '--input-size',
                        required=False,
                        default=96,
                        type=int,
                        dest="input_size")
    parser.add_argument('-viz', '--data-visualisation',
                        required=False,
                        default=False,
                        type=bool,
                        dest="data_visualisation")
    args = parser.parse_args()
    batch_size = args.batch

    class_name = args.train_class
    out_classes = classes[class_name]["signs_classes"]
    rotation_and_flips = classes[class_name]["rotation_and_flips"]
    h_symmetry_classes = classes[class_name]["h_symmetry"]
    try:
        merge_sign_classes = classes[class_name]["merge_sign_classes"]
    except KeyError:
        merge_sign_classes = None

    mapping = {c: i for i, c in enumerate(out_classes)}
    mapping_id_to_name = {i: c for c, i in mapping.items()}

    os.makedirs(class_name, exist_ok=True)

    x_train, y_train, x_test, y_test = get_data_for_master_class(class_name=class_name,
                                                                 mapping=mapping,
                                                                 mapping_id_to_name=mapping_id_to_name,
                                                                 rotation_and_flips=rotation_and_flips,
                                                                 data_dir=args.data_dir,
                                                                 merge_sign_classes=merge_sign_classes,
                                                                 h_symmetry_classes=h_symmetry_classes,
                                                                 image_size=(args.input_size, args.input_size),
                                                                 ignore_npz=args.ignore_npz,
                                                                 out_classes=out_classes)
    if args.data_visualisation:
        preprocess_input = lambda x: x
        model = None
    else:
        if args.random_init:
            weights = None
        else:
            weights = 'imagenet'
        if args.model_name == "MobileNetV2":
            preprocess_input = mobilenetv2.preprocess_input
            base_model = mobilenetv2.MobileNetV2(weights=weights,
                                                 include_top=False,
                                                 input_shape=(args.input_size, args.input_size, 3),
                                                 pooling='avg')
        elif args.model_name == "InceptionResNetV2":
            preprocess_input = inception_resnet_v2.preprocess_input
            base_model = inception_resnet_v2.InceptionResNetV2(weights=weights,
                                                               include_top=False,
                                                               input_shape=(args.input_size, args.input_size, 3),
                                                               pooling='avg')
        elif args.model_name == "NASNetLarge":
            preprocess_input = nasnet.preprocess_input
            base_model = nasnet.NASNetLarge(weights=weights,
                                            include_top=False,
                                            input_shape=(args.input_size, args.input_size, 3),
                                            pooling='avg')
        else:
            raise ValueError("unknown model name {}, should be one of {}".format(args.model_name,
                                                                                 ["MobileNetV2", "InceptionResNetV2",
                                                                                  "NASNetLarge"]))
        predictions = base_model.outputs[0]
        for s in args.dense_size:
            predictions = Dense(s, activation='relu')(predictions)
        predictions = Dense(len(out_classes), activation='softmax')(predictions)
        model = Model(inputs=base_model.input, outputs=predictions)

    # model.summary()
    # blocks = {}
    # for i, layer in enumerate(base_model.layers):
    #     s = layer.name.split('_')
    #     if s[0] == "block":
    #         b = int(s[1])
    #         if b not in blocks:
    #             blocks[b] = [i]
    #         else:
    #             blocks[b].append(i)
    # exit(0)

    callbacks = [ModelCheckpoint(filepath="{}/checkpoint.h5".format(class_name),
                                 monitor="val_loss",
                                 mode='min',
                                 verbose=0,
                                 save_best_only="True",
                                 save_weights_only=False,
                                 period=1),
                 EarlyStopping(monitor='val_acc',
                               mode='max',
                               min_delta=0.001,
                               patience=40,
                               verbose=1,
                               restore_best_weights=True)
                 ]

    x_test = np.stack([preprocess_input(i) for i in x_test])
    datagen = ImageDataGenerator(featurewise_center=False,
                                 featurewise_std_normalization=False,
                                 rotation_range=10,
                                 width_shift_range=0.1,
                                 height_shift_range=0.1,
                                 brightness_range=(0.5, 1.4),
                                 shear_range=3.0,
                                 zoom_range=(0.7, 1.1),
                                 fill_mode='nearest',
                                 horizontal_flip=False,
                                 vertical_flip=False,
                                 preprocessing_function=preprocess_input)
    datagen.fit(x_train)

    if args.data_visualisation:
        for b in datagen.flow(x_train, y_train, batch_size=1):
            im, im_class = b[0][0], b[1][0]
            im_class = int(np.argmax(im_class))
            plt.imshow(im.astype(np.int))
            plt.title(out_classes[im_class])
            plt.show()
        return

    if not args.random_init:
        # if the network is not randomly initialized, we first fine tune the last layers
        for layer in base_model.layers:
            layer.trainable = False
        model.compile(optimizer=rmsprop(lr=args.lr, decay=args.decay),
                      loss='categorical_crossentropy', metrics=["accuracy"])
        history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                                      steps_per_epoch=ceil(len(x_train) / batch_size),
                                      epochs=args.epoch,
                                      verbose=1,
                                      validation_data=(x_test, y_test),
                                      use_multiprocessing=True,
                                      callbacks=callbacks)
        plot_history(history, "{0}/{1}_{0}_dense_".format(class_name, args.model_name))
        model.save("{0}/{1}_{0}_dense.h5".format(class_name, args.model_name), overwrite=True)

        if not args.unfroze_all:
            # unfroze the 3 last blocks of mobile net
            for layer in model.layers[:113]:
                layer.trainable = False
            for layer in model.layers[113:]:
                layer.trainable = True
            model.compile(optimizer=SGD(lr=args.lr, momentum=0.9, decay=args.decay),
                          loss='categorical_crossentropy', metrics=["accuracy"])
            history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                                          steps_per_epoch=ceil(len(x_train) / batch_size),
                                          epochs=args.epoch_fine_tune,
                                          verbose=1,
                                          validation_data=(x_test, y_test),
                                          use_multiprocessing=True,
                                          callbacks=callbacks)
            plot_history(history, "{0}/{1}_{0}_fine_tuning_1_".format(class_name, args.model_name))

            model.save("{0}/{1}_{0}_1.h5".format(class_name, args.model_name), overwrite=True)

            # unfroze the 6 last blocks of mobile net
            for layer in model.layers[:87]:
                layer.trainable = False
            for layer in model.layers[87:]:
                layer.trainable = True
            model.compile(optimizer=SGD(lr=args.lr, momentum=0.9, decay=args.decay),
                          loss='categorical_crossentropy', metrics=["accuracy"])
            history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                                          steps_per_epoch=ceil(len(x_train) / batch_size),
                                          epochs=args.epoch_fine_tune,
                                          verbose=1,
                                          validation_data=(x_test, y_test),
                                          use_multiprocessing=True,
                                          callbacks=callbacks)
            plot_history(history, "{0}/{1}_{0}_fine_tuning_2_".format(class_name, args.model_name))

            model.save("{0}/{1}_{0}_2.h5".format(class_name, args.model_name), overwrite=True)

    # unfroze all model
    for layer in model.layers:
        layer.trainable = True
    model.compile(optimizer=SGD(lr=args.lr, momentum=0.9, decay=args.decay),
                  loss='categorical_crossentropy', metrics=["accuracy"])
    history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                                  steps_per_epoch=ceil(len(x_train) / batch_size),
                                  epochs=args.epoch_fine_tune,
                                  verbose=1,
                                  validation_data=(x_test, y_test),
                                  use_multiprocessing=True,
                                  callbacks=callbacks)
    plot_history(history, "{0}/{1}_{0}_fine_tuning_f_".format(class_name, args.model_name))

    model.save("{0}/{1}_{0}_final.h5".format(class_name, args.model_name), overwrite=True)
def get_model():

    num_classes = 10
    input_shape = (MODELS[MODEL]['size'], MODELS[MODEL]['size'], 3)
    #preprocess = imagenet_utils.preprocess_input

    input_image = Input(shape=input_shape)

    if MODEL == "densenet121":
        base_model = densenet.DenseNet121(include_top=False,
                                          pooling=None,
                                          weights='imagenet',
                                          input_shape=input_shape)
    elif MODEL == "densenet169":
        base_model = densenet.DenseNet169(include_top=False,
                                          pooling=None,
                                          weights='imagenet',
                                          input_shape=input_shape)
    elif MODEL == "densenet201":
        base_model = densenet.DenseNet201(include_top=False,
                                          pooling=None,
                                          weights='imagenet',
                                          input_shape=input_shape)
    elif MODEL == "inceptionresnet":
        base_model = inception_resnet_v2.InceptionResNetV2(
            include_top=False,
            pooling=None,
            weights='imagenet',
            input_shape=input_shape)
    elif MODEL == "inception":
        base_model = inception_v3.InceptionV3(include_top=False,
                                              pooling=None,
                                              weights='imagenet',
                                              input_shape=input_shape)
    elif MODEL == "mobilenet":
        base_model = mobilenet.MobileNet(include_top=False,
                                         pooling=None,
                                         weights='imagenet',
                                         input_shape=input_shape)
    elif MODEL == "resnet":
        base_model = resnet50.ResNet50(include_top=False,
                                       pooling=None,
                                       weights='imagenet',
                                       input_shape=input_shape)
    elif MODEL == "vgg16":
        base_model = vgg16.VGG16(include_top=False,
                                 pooling=None,
                                 weights='imagenet',
                                 input_shape=input_shape)
    elif MODEL == "vgg19":
        base_model = vgg19.VGG19(include_top=False,
                                 pooling=None,
                                 weights='imagenet',
                                 input_shape=input_shape)
    else:
        print("Bad model type:", MODEL)
        sys.exit(-1)

    x = input_image
    x = base_model(x)
    x = Reshape((-1, ))(x)
    #x = Dropout(rate=?)(x)
    x = Dense(512, activation='relu', name='fc1')(x)
    x = Dropout(0.3, name='dropout_fc1')(x)
    x = Dense(128, activation='relu', name='fc2')(x)
    x = Dropout(0.3, name='dropout_fc2')(x)
    prediction = Dense(nclass, activation="softmax", name="predictions")(x)

    # this is the model we will train
    my_model = Model(inputs=(input_image), outputs=prediction)

    # compile the model (should be done *after* setting layers to non-trainable)
    opt = optimizers.Adam(lr=1e-4)
    my_model.compile(optimizer=opt,
                     loss='categorical_crossentropy',
                     metrics=['acc'])

    my_model.summary()
    return my_model
Exemplo n.º 25
0
            #modelo.summary()
        if (Modelo == 1):
            from keras.applications.inception_v3 import preprocess_input
            imagen = preprocess_input(imagen)
            modelo = inception_v3.InceptionV3(weights='imagenet',
                                              include_top=True)
            #modelo.summary()
        if (Modelo == 2):
            from keras.applications.resnet50 import preprocess_input
            imagen = preprocess_input(imagen)
            modelo = resnet50.ResNet50(weights='imagenet', include_top=True)
            #modelo.summary()
        if (Modelo == 3):
            from keras.applications.inception_resnet_v2 import preprocess_input
            imagen = preprocess_input(imagen)
            modelo = inception_resnet_v2.InceptionResNetV2(weights='imagenet',
                                                           include_top=True)
            #modelo.summary()
        if (Modelo == 4):
            from keras.applications.xception import preprocess_input
            imagen = preprocess_input(imagen)
            modelo = xception.Xception(weights='imagenet', include_top=True)
            #modelo.summary()

        prediccion = modelo.predict(imagen)

        from keras.applications.imagenet_utils import decode_predictions

        prediccion_decodificada = decode_predictions(prediccion)
        #prediccion_decodificada=np.array(prediccion_decodificada)
        print("La prediccion del modelo %s es %s con una probabilidad de %f" %
              (modelos[Modelo], prediccion_decodificada[0][0][1],
y[12500:] = 1

from sklearn.model_selection import train_test_split
# 需要把集合打乱么?
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2)

# from keras.applications.resnet50 import ResNet50

# base_model = ResNet50(input_tensor=Input((img_height, img_width, 3)), weights='imagenet', include_top=False)

i_input_tensor = Input((img_height, img_width, 3))
# x_input_tensor = inception_resnet_v2.preprocess_input(i_input_tensor)	# 错误的处理
x_input_tensor = Lambda(inception_resnet_v2.preprocess_input)(i_input_tensor)

base_model = inception_resnet_v2.InceptionResNetV2(input_tensor=x_input_tensor,
                                                   weights='imagenet',
                                                   include_top=False)
# 上面的 input_tensor 没有进行处理吧,参考里面的 ResNet50 是不用预处理的。
# 但是上面会找不到数据
# 下面的可以进行计算,但是结果。。。训练集还不错,loss:0.16;acc:0.93。但是验证集,loss:5.9197;acc:0.5622
# base_model = inception_resnet_v2.InceptionResNetV2(weights='imagenet', include_top=False)

for layers in base_model.layers:
    layers.trainable = False

i_output = GlobalAveragePooling2D()(base_model.output)
i_output = Dropout(0.5)(i_output)
i_predictions = Dense(1, activation='sigmoid')(i_output)
model = Model(base_model.input, i_predictions)

model.compile(optimizer='adadelta',
Exemplo n.º 27
0
    non_trainable_count = np.sum([K.count_params(w) for w in model.non_trainable_weights])
    return trainable_count, non_trainable_count
    #print('Total params: {:,}'.format(trainable_count + non_trainable_count))
    #print('Trainable params: {:,}'.format(trainable_count))
    #print('Non-trainable params: {:,}'.format(non_trainable_count))

paramcount_list= []
for model_name in ["vgg16","nasnet","inception_resnet","inceptionv3","xception"]:
    if(model_name=="vgg16"):
        base_model = vgg16.VGG16(weights=None, include_top=False)
    elif(model_name=="inceptionv3"):
        base_model = inception_v3.InceptionV3(weights=None, include_top=False)
    elif(model_name=="resnet50"):
        base_model = resnet.ResNet50(weights=None, include_top=False)
    elif(model_name=="inception_resnet"):
        base_model = inception_resnet_v2.InceptionResNetV2(weights=None, include_top=False)
    elif(model_name=="nasnet"):
        base_model = nasnet.NASNetLarge(weights=None, include_top=False)
    elif(model_name=="xception"):
        base_model = xception.Xception(weights=None, include_top=False)
    print("model name is:",model_name)
    paramcount_list.append((model_name,"base",count_params(base_model)))
    #Adding a global spatial average pooling layer
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    customlayers= customlayer()
    output_layer = customlayers(x)
    model_stg1 = Model(inputs=base_model.input, outputs=output_layer)
    model_json = model_stg1.to_json()
    modelpath= liby+model_name+".json"
    with open(modelpath, "w") as json_file:
from keras.models import Model
from keras.applications import inception_v3, inception_resnet_v2
from keras.layers import Dense, ELU, Dropout, BatchNormalization
filename = "finalEnsemble/inception_v4_difftop_832.h5"
model = inception_resnet_v2.InceptionResNetV2(include_top=False,
                                              input_shape=(256, 256, 3),
                                              pooling='avg')
x = model.output
x = Dense(512)(x)
x = BatchNormalization(name="lol")(x)
x = ELU()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
pred = Dense(18, activation='softmax')(x)

new_model = Model(inputs=model.input, outputs=pred)
new_model.load_weights(filename)
new_model.save(filename)
Exemplo n.º 29
0
 def __init__(self):
     self.extractor = inception_resnet_v2.InceptionResNetV2(
         weights='imagenet', include_top=False, pooling="avg")
     self.classifier = inception_resnet_v2.InceptionResNetV2(
         weights='imagenet')
Exemplo n.º 30
0
print('trainX.shape:', trainX.shape, ' valX.shape:', valX.shape)

# augmenting datset
aug = ImageDataGenerator(rotation_range=25,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")
print("loading the model")
#set the model
model = ''
if model_name == 'inception':
    model = inception_resnet_v2.InceptionResNetV2(include_top=False,
                                                  weights='imagenet',
                                                  input_shape=img_dims)
elif model_name in ['vgg16', 'VGG16']:
    model = vgg16.VGG16(include_top=False,
                        weights='imagenet',
                        input_shape=img_dims)
elif model_name in ['vgg19', 'VGG19']:
    model = vgg19.VGG19(include_top=False,
                        weights='imagenet',
                        input_shape=img_dims)
elif model_name in ['resnet50', 'ResNet50']:
    model = ResNet50(include_top=False,
                     weights='imagenet',
                     input_shape=img_dims)
elif model_name in ['resnet101', 'ResNet101']:
    model = resnet.ResNet101(include_top=False,