コード例 #1
0
def main():

    architecture = 'resnet50'
    # No es necesario cargar todo el checkpoint
    vggface = VGGFace(include_top=False, 
                      model=architecture, 
                      input_shape=(224, 224, 3)
                     )
    # TODO: Si lo que queremos es cargar el checkpoint de una determinada 
    # granja tras el entrenamiento, se podría hacer mediante 
    # load_model()

    # Añadimos capas de clasificación a la red convolucional
    if architecture == 'vgg16':
        last_layer = vggface.get_layer('flatten').output
    elif architecture == 'resnet50':
        last_layer = vggface.get_layer('flatten_1').output
    elif architecture == 'senet50':
        last_layer = vggface.get_layer('flatten_1').output

    model = Model(inputs=vggface.input, outputs=last_layer)
    print('Model loaded.')
    model.summary()

    # the name of the layer we want to visualize
    # (see model definition at keras/applications/vgg16.py)
    LAYER_NAME = 'conv5_3'

    # TODO: limpiar el script un poco.
    # vgg = vgg16.VGG16(weights='imagenet', include_top=False)
    # vgg.summary()

    # example function call
    visualize_layer(model, LAYER_NAME)
コード例 #2
0
 def __init__(self, layer='fc6', model='vgg16'):
     self.model = model
     self.layer = layer
     # Get model with pretrained weights. model: vgg16, resnet50
     vgg_model = VGGFace(model=model)
     if self.model == 'vgg16':
         # We'll extract features at the fc6 layer
         self.model = Model(inputs=vgg_model.input,
                            outputs=vgg_model.get_layer(layer).output)
     elif self.model == 'resnet50':
         resent_out = vgg_model.get_layer(layer).output
         out = Flatten(name='flatten')(resent_out)
         self.model = Model(inputs=vgg_model.input, outputs=out)
コード例 #3
0
    def initialize_networkmodel(self):
        """
        Funkcja inicjalizuje model sieci. Model użyty to VGGFace bez trzech ostatnich warstw, które
        zostały zamienione na warstwy o 64, 32 neuronach, z ostatnią warstwą o ilości neuronów
        równej ilości klas. Następuje inicjalizacja parametrów sieci takich jak momentum czy wskaźnik
        uczenia się. Określane są metody ewaluacji precyzji i straty.
        """

        ###### Struktura modelu ######
        vgg_model = VGGFace(include_top=False,
                            weights='vggface',
                            input_shape=(180, 180, 3))
        last_layer = vgg_model.get_layer('pool5').output
        # Add layers
        x = Flatten(name='flatten')(last_layer)
        x = Dense(64, activation='relu', name='fc6')(x)
        x = Dense(32, activation='relu', name='fc7')(x)
        out = Dense(self.nb_class, activation='softmax', name='fc8')(x)
        self.custom_vgg_model = Model(vgg_model.input, out)
        # Zamrożenie uczenia wszystkich warstw poza trzema ostatnimi, które zostały dodane
        layer_count = 0
        for layer in self.custom_vgg_model.layers:
            layer_count = layer_count + 1
        for l in range(layer_count - 3):
            self.custom_vgg_model.layers[l].trainable = False

        ###### Kompilacja modelu i parametrów uczenia (learning rate, momentum) ######
        sgd = optimizers.SGD(lr=5e-3, decay=1e-6, momentum=0.9, nesterov=True)
        self.custom_vgg_model.compile(optimizer=sgd,
                                      loss='categorical_crossentropy',
                                      metrics=['accuracy'])
コード例 #4
0
ファイル: Age_Model.py プロジェクト: Nitin-Mane/FaceAgingGANs
def ageModel(input_shape=(224, 224, 3)):

    vgg_model = VGGFace(include_top=False,
                        model='vgg16',
                        weights='vggface',
                        input_shape=input_shape)
    print("Base VGG model summary.")
    vgg_model.summary()

    # add top layers
    last_layer = vgg_model.get_layer('pool5').output
    for layer in vgg_model.layers:
        layer.trainable = False

    mid_layer = Conv2D(4096, (7, 7), activation='relu')(last_layer)
    mid_layer = Dropout(0.5)(mid_layer)
    mid_layer = Conv2D(4096, (1, 1), activation='relu')(mid_layer)
    mid_layer = Dropout(0.5)(mid_layer)  #prevent overfitting issue
    mid_layer = Conv2D(101, (1, 1), name="prediction")(mid_layer)
    mid_layer = Flatten(name='flatten')(mid_layer)
    output = Activation('softmax')(mid_layer)

    faceage_vgg_model = Model(vgg_model.input, output)

    print("Face age model summary.")
    faceage_vgg_model.summary()
    return faceage_vgg_model
コード例 #5
0
 def vggface_arhitecture(self):
     vgg_model = VGGFace(model='resnet50', include_top=False,
                         input_shape=(200, 200, 3))
     last_layer = vgg_model.get_layer('avg_pool').output
     x = Flatten(name='flatten')(last_layer)
     out = Dense(7, activation='softmax', name='classifier')(x)
     return Model(vgg_model.input, out)
コード例 #6
0
 def vggfaces_features(self):
     print('using vggfaces network for feature extraction')
     # Convolution Features
     features = np.zeros([tf.flags.FLAGS.no_of_images, 2048])
 #    vgg_model_conv = VGGFace(include_top=False, input_shape=(224, 224, 3), pooling='avg') # pooling: None, avg or max
     # FC7 Features
     vgg_model = VGGFace() # pooling: None, avg or max
     out = vgg_model.get_layer('fc7').output
     vgg_model_fc7 = Model(vgg_model.input, out)
 
     feature_filename = '%s-feature-vggfaces-%d.p'%(
             tf.flags.FLAGS.image_path.split('/')[-2], tf.flags.FLAGS.no_of_images)
     if os.path.exists(feature_filename):
         with open(feature_filename, 'rb') as f:
             features, self.image_names = pickle.load(f)
     else:
         features = np.zeros([tf.flags.FLAGS.no_of_images, 4096])
         for i, name in enumerate(self.image_names):
             img = image.load_img(name, target_size=(224, 224))
             x = image.img_to_array(img)
             x = np.expand_dims(x, axis=0)
             x = utils.preprocess_input(x)
             print('image name: %s progress: %d/%d'%(
                     name, i, tf.flags.FLAGS.no_of_images))
             features[i, :] = vgg_model_fc7.predict(x)
         with open(feature_filename, 'wb') as f:
             pickle.dump((features, self.image_names), f)
     return features
コード例 #7
0
    def build_network(self):
        # VGG16 Facenet (v1)
        print('[+] Building CNN')

        vgg_notop = VGGFace(include_top=False,
                            input_shape=(224, 224, 3),
                            pooling='avg')
        last_layer = vgg_notop.get_layer('pool5').output
        x = Conv2D(filters=64, kernel_size=1, activation='relu')(last_layer)
        x = BatchNormalization()(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=1)(x)
        x = Conv2D(filters=64, kernel_size=2, activation='relu')(x)
        x = BatchNormalization()(x)
        x = Flatten(name='flatten')(x)
        x = Dense(1024, activation='relu', name='fc6')(x)
        x = Dense(2048, activation='relu', name='fc7')(x)
        print("Emotions count", len(EMOTIONS))

        out = Dense(6, activation='softmax', name='classifier')(x)

        custom_vgg_model = Model(vgg_notop.input, out)

        # adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0)

        custom_vgg_model.compile(optimizer='adam',
                                 loss='categorical_crossentropy',
                                 metrics=['accuracy'])
        plot_model(custom_vgg_model, to_file='model2.png', show_shapes=True)
        self.model = custom_vgg_model
コード例 #8
0
def resnet_model_build(input_shape=(224, 224, 3), weights="vggface"):
    print("Building resnet-50 ", input_shape, "- num_classes 1 - weights",
          weights)

    from keras_vggface.vggface import VGGFace
    # create a vggface2 model

    vgg_model = VGGFace(model='resnet50',
                        weights=weights,
                        input_shape=input_shape,
                        include_top=False)
    last_layer = vgg_model.get_layer('avg_pool').output
    x = Flatten(name='flatten')(last_layer)
    middle = Dense(512,
                   kernel_initializer='normal',
                   activation='relu',
                   name='features')(x)
    out = Dense(1,
                kernel_initializer='normal',
                activation='linear',
                name='age_estiamte')(middle)
    custom_vgg_model = keras.models.Model(vgg_model.input, out)

    # Fine tuning all the net
    for layer in custom_vgg_model.layers:
        layer.trainable = True
    '''
    # Train last 2 layers
    custom_vgg_model.layers[-1].trainable = True
    custom_vgg_model.layers[-2].trainable = True
    '''
    features = custom_vgg_model.layers[-4].output

    return custom_vgg_model, features
コード例 #9
0
ファイル: train.py プロジェクト: raulperod/FaceLock
    def build_model(self, nb_classes=2, hidden_dim=512):
        """ KERAS MODEL WITH IMAGENET
        self.model = Sequential()
        self.model.add(InceptionResNetV2(include_top=False, pooling='avg', weights="imagenet", input_shape=(IMAGE_SIZE,IMAGE_SIZE,3)))
        self.model.add(VGG19(include_top=False, pooling='avg', weights="imagenet", input_shape=(IMAGE_SIZE,IMAGE_SIZE,3)))
        self.model.add(VGG16(include_top=False, pooling='avg', weights="imagenet", input_shape=(IMAGE_SIZE,IMAGE_SIZE,3)))
        self.model.add(Dense(nb_classes))
        self.model.add(Activation('softmax'))

        # Say not to train first layer (ResNet) model. It is already trained
        self.model.layers[0].trainable = False
        self.model.summary() 
        """
        """  VGG16 
        vgg_model = VGGFace(include_top=False, input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
        last_layer = vgg_model.get_layer('pool5').output
        x = Flatten(name='flatten')(last_layer)
        x = Dense(hidden_dim, activation='relu', name='fc6')(x)
        x = Dense(hidden_dim, activation='relu', name='fc7')(x)
        out = Dense(nb_classes, activation='softmax', name='fc8')(x)
        custom_vgg_model = keras.engine.Model(vgg_model.input, out)
        """
        # RESNET50
        vgg_model = VGGFace(include_top=False,
                            model='resnet50',
                            input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
        last_layer = vgg_model.get_layer('avg_pool').output
        x = Flatten(name='flatten')(last_layer)
        x = Dense(nb_classes)(x)
        out = Activation('softmax')(x)
        custom_vgg_model = keras.engine.Model(vgg_model.input, out)

        self.model = custom_vgg_model

        self.model.summary()
コード例 #10
0
def loadModel():
    vgg_model = VGGFace(include_top=True,
                        model='vgg16',
                        input_shape=(224, 224, 3))
    embeddings = vgg_model.get_layer('fc7').output
    output = Lambda(lambda x: tf.math.l2_normalize(x, axis=1))(embeddings)
    return Model(vgg_model.input, output)
コード例 #11
0
    def __init__(self, image_shape=(299, 299, 3), weights=None):
        """Either load pretrained from imagenet, or load our saved
        weights from our own training."""

        self.weights = weights  # so we can check elsewhere which model

        input_tensor = Input(image_shape)
        # Get model with pretrained weights.
        # base_model = InceptionV3(
        #     input_tensor=input_tensor,
        #     weights='imagenet',
        #     include_top=True
        # )

        # base_model = InceptionResNetV2(
        #     input_tensor=input_tensor,
        #     weights='imagenet',
        #     include_top=True
        # ) # 2622

        base_model = VGGFace()
        # print(base_model.summary())
        # sys.exit()

        # We'll extract features at the final pool layer.
        self.model = Model(
            inputs=base_model.input,
            outputs=base_model.get_layer(
                'fc8').output  # vggface use fc8, rest avg_pool
        )
コード例 #12
0
ファイル: face_fit.py プロジェクト: pmmf/EMO-INESC
def myVGGFACE(inputs, model='vgg16', n_dense=2, hidden_dim=512, l2_reg=1e-04, layer_name='pool5', train_mode=None):

    vgg_model = VGGFace(include_top=False, model=model, weights='vggface', input_shape=(96, 96, 3))
    out = vgg_model.get_layer(layer_name).output
    vgg_model_new = Model(vgg_model.input, out)
        
    for layer in vgg_model_new.layers:
        layer.trainable = False
        # print(layer.name, layer.trainable)

    vgg_shared_streams = [vgg_model_new(i) for i in inputs]
    last_layer = Add()(vgg_shared_streams)
    # x = Flatten(name='flatten')(last_layer)
    x = GlobalAveragePooling2D()(last_layer)
    for i in range(n_dense):
        name='fc' + str(i + 6)
        x = Dense(hidden_dim, activation='relu', kernel_regularizer=l2(l2_reg), name=name)(x)
        x = Dropout(0.4)(x)
    out_arousal = Dense(1, activation='sigmoid', name='out_arousal')(x)
    out_valence = Dense(1, activation='tanh', name='out_valence')(x)
    if __EMOTIONS__:
        out_categorical = Dense(__N_CLASSES__, activation='softmax', name='out_categorical')(x)
        custom_vgg_model = Model(inputs, [out_arousal, out_valence, out_categorical])
    else:
        custom_vgg_model = Model(inputs, [out_arousal, out_valence])

    if train_mode is not None:
        for layer in vgg_model_new.layers[1:]:
            layer.trainable = True
            print(layer.name, layer.trainable)

    return custom_vgg_model
コード例 #13
0
def model(shape, weights='vggface'):
    landmarks = Input(shape=(20, 2), name='landmarks')
    vgg_model = VGGFace(include_top=False,
                        input_shape=(224, 224, 3),
                        weights=weights)
    last_layer = vgg_model.get_layer('pool5').output

    # DYNAMIC REGIONS OF INTEREST
    roi_layer = RoiLayer()
    roi_layer.split(layer=last_layer,
                    landmarks=landmarks,
                    original_size=shape,
                    region_height=REGION_HEIGHT,
                    region_width=REGION_WIDTH)

    # regions operations
    roi_layer.add(_region_operation)

    # regions concat
    fcs_concatenated = roi_layer.concatenate_fully_connected()
    drop1 = Dropout(0.5)(fcs_concatenated)

    fc1 = Dense(2000, activation='relu', name='fc6')(drop1)
    drop2 = Dropout(0.5)(fc1)

    fc2 = Dense(2000, activation='relu', name='fc7')(drop2)

    au_detection_fc8 = Dense(10, activation='sigmoid',
                             kernel_initializer='normal',
                             name=AU_OCCURRENCE_OUTPUT_NAME)(fc2)

    model = Model(inputs=[vgg_model.input, landmarks],
                  outputs=[au_detection_fc8])

    return model
コード例 #14
0
    def __init__(self, weights=None):
        """Either load pretrained from imagenet, or load our saved
        weights from our own training."""

        self.weights = weights  # so we can check elsewhere which model

        if weights is None:
            # Get model with pretrained weights.

            #base_model = VGG16(weights='imagenet', include_top=True)
            base_model = VGGFace(include_top=True, input_shape=(224, 224, 3))

            # We'll extract features at the final pool layer.

            self.model = Model(inputs=base_model.input,
                               outputs=base_model.get_layer('flatten').output)
            #self.model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output)

        else:
            # Load the model first.
            self.model = load_model(weights)

            # Then remove the top so we get features not predictions.
            # From: https://github.com/fchollet/keras/issues/2371
            self.model.layers.pop()
            self.model.layers.pop()  # two pops to get to pool layer
            self.model.outputs = [self.model.layers[-1].output]
            self.model.output_layers = [self.model.layers[-1]]
            self.model.layers[-1].outbound_nodes = []
コード例 #15
0
def vggfacewithoutTOP():
    from keras.engine import Model
    from keras.layers import Flatten, Dense, Input, Lambda
    from keras_vggface.vggface import VGGFace
    #custom parameters
    #hidden_dim=512
    hidden_dim = 1024
    #hidden_dim2 = 512
    #hidden_dim3 = 90
    vgg_model = VGGFace(include_top=False, input_shape=(224, 224, 3))
    last_layer = vgg_model.get_layer('pool5').output
    x = Flatten(name='flatten')(last_layer)
    x = Dense(hidden_dim, activation=None, name="fc6")(x)
    #x = Dense(hidden_dim1, activation='relu', name='fc6')(x)
    #x = Dense(hidden_dim2, activation='relu', name='fc7')(x)
    #x = Dense(hidden_dim3, activation=None, name='fc8')(x)
    out = Lambda(lambda x: tf.math.l2_normalize(x, axis=1))(x)
    model = Model(vgg_model.input, out)
    print("Architecture custom")
    print(model.summary())
    for layer in model.layers[:-2]:
        layer.trainable = False
    for layer in model.layers:
        print(layer, layer.trainable)
    return (model)
コード例 #16
0
def VGGface_structure():  #VGGFACE架構
    # Convolution Features

    base_model = VGGFace(include_top=False,
                         input_shape=(48, 48, 3),
                         pooling='avg',
                         weights='vggface')
    last_layer = base_model.get_layer('pool5').output  #

    # 新分類器
    x = Flatten(name='flatten')(last_layer)
    x = Dense(512, activation='relu', name='fc6')(x)
    #x=  Dropout(0.25)(x)
    #x = Dense(512, activation='relu', name='fc7')(x) #later try 256
    #x=  Dropout(0.25)(x)
    out = Dense(7, activation='softmax', name='fc7')(x)
    model = Model(base_model.input, out)

    #base_model.trainable=False

    #凍結權重
    for layer in base_model.layers:
        layer.trainable = False

    model.summary()

    return model
コード例 #17
0
ファイル: VisionModels.py プロジェクト: katebell483/samples
    def build_new_facial_recognition_model(self, num_categories):
        image_input = Input(shape=(224, 224, 3))

        hidden_dim = 512  # todo: no idea why 512. figure out

        vgg_model = VGGFace(input_tensor=image_input, include_top=False)

        # only train the classifier
        #for layer in vgg_model.layers:
        #    layer.trainable = false

        last_layer = vgg_model.get_layer('pool5').output
        x = Flatten(name='flatten')(last_layer)
        x = Dense(hidden_dim, activation='relu', name='fc6')(x)
        x = Dense(hidden_dim, activation='relu', name='fc7')(x)
        out = Dense(num_categories, activation='softmax', name='fc8')(x)
        final_model = Model(image_input, out)

        # compile model using stochastic gradient descent
        sgd = SGD(lr=1e-3, momentum=0.9)
        final_model.compile(optimizer=sgd,
                            loss='categorical_crossentropy',
                            metrics=['accuracy'])

        return final_model
コード例 #18
0
def vggface_vgg16_gender(input_shape=(224, 224, 3), model_type='vgg16'):

    input_shape = input_shape
    model_type = model_type
    vggface_model = VGGFace(include_top=False, model=model_type, weights='vggface',\
                        input_shape=input_shape)

    print("Base VGG model summary.")
    vggface_model.summary()
    # add top layers, fix other layers
    last_layer = vggface_model.get_layer('pool5').output
    for layer in vggface_model.layers:
        layer.trainable = False

    mid_layer = Conv2D(4096, (7, 7), activation='relu')(last_layer)
    mid_layer = Dropout(0.5)(mid_layer)
    mid_layer = Conv2D(4096, (1, 1), activation='relu')(mid_layer)
    mid_layer = Dropout(0.5)(mid_layer)  #prevent overfitting issue
    mid_layer = Conv2D(2, (1, 1), name="prediction")(mid_layer)
    mid_layer = Flatten(name='flatten')(mid_layer)
    output = Activation('softmax')(mid_layer)

    facegender_vgg16_model = Model(model_type.input, output)

    print("face gender (VGG_16) model summary.")
    facegender_vgg16_model.summary()
    return facegender_vgg16_model
コード例 #19
0
def train():
    # extracting file saved by data_prep.py
    data = np.load('face_data.npz')
    x , y  =  data['x'], data['y']
    #categorical conversion of data label
    y = keras.utils.to_categorical(y, 6)
    # using transfer learning to reduce the time required to train the algo
    resnet = VGGFace(model='resnet50',input_shape=(224, 224, 3))
    
    layer_name = resnet.layers[-2].name
    #adding our own custom layers to make the model work on our datatset
    out = resnet.get_layer(layer_name).output
    out = Dense(6,activation='softmax')(out)
    resnet_4 = Model(resnet.input, out)
    # removing last layer of the model and adding my own layer to it 
    for layer in resnet_4.layers[:-1]:
        layer.trainable = False
        
        resnet_4.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
        #checking the final created dataset 
        print (resnet_4.summary())
        # training the model we have created with our own dataset
        resnet_4.fit(x, y,batch_size=10,epochs=10,shuffle=True)
        #saving the trained model so that it can be used afterwards
        resnet_4.save("C:\\Users\\hseth\\Desktop\\face recogination\\model_save_face.h5")
        # checking the accuracy of the model on training data only as i used a very small dataset
        scores = resnet_4.evaluate(x, y, verbose=1)
        print('Test accuracy:', scores[1])
コード例 #20
0
    def __init__(self):
        self.input_size = 224
        input_shape = (224, 224, 3)
        image1_batch = Input(shape=input_shape, name='in_t1')
        image2_batch = Input(shape=input_shape, name='in_t2')

        base = VGGFace(include_top=False,
                       model='vgg16',
                       weights='vggface',
                       input_shape=(224, 224, 3))
        last_layer = base.get_layer('pool5').output
        top_layer = Flatten(name='flatten')(last_layer)
        top_layer = Dense(1024, name='common_fc', activation='relu')(top_layer)

        inter_model = Model(inputs=base.input, output=top_layer)
        common1_feat = inter_model(image1_batch)
        common2_feat = inter_model(image2_batch)

        emotion_FC = Dense(128, name='emotion_FC_1',
                           activation='relu')(common1_feat)
        emotion_out = Dense(7, name='emotion_prediction',
                            activation='softmax')(emotion_FC)

        # gender_FC = Dense(128, name='gender_FC_1', activation='relu')(common2_feat)
        # gender_out = Dense(2, name='gender_prediction', activation='softmax')(gender_FC)

        age_FC = Dense(128, name='age_FC_1', activation='relu')(common2_feat)
        age_out = Dense(70, name='age_prediction',
                        activation='softmax')(age_FC)

        super().__init__(inputs=[image1_batch, image2_batch],
                         outputs=[emotion_out, age_out],
                         name='two_output_VGGFacenet')
コード例 #21
0
    def __init__(self):
        self.input_size = 224
        base = VGGFace(include_top=False,
                       model='vgg16',
                       weights='vggface',
                       input_shape=(224, 224, 3))
        last_layer = base.get_layer('pool5').output
        x = Flatten(name='flatten')(last_layer)

        x = Dense(1024, name='common_fc', activation='relu')(x)

        emotion_FC = Dense(128, name='emotion_FC_1', activation='relu')(x)
        emotion_out = Dense(8, name='emotion_prediction',
                            activation='softmax')(emotion_FC)

        gender_FC = Dense(128, name='gender_FC_1', activation='relu')(x)
        gender_out = Dense(2, name='gender_prediction',
                           activation='softmax')(gender_FC)

        age_FC = Dense(128, name='age_FC_1', activation='relu')(x)
        age_out = Dense(101, name='age_prediction',
                        activation='softmax')(age_FC)

        super().__init__(inputs=base.input,
                         outputs=emotion_out,
                         name='EmotionNetVGGFace_vgg16')
コード例 #22
0
ファイル: newfacemodel.py プロジェクト: conantsai/3W_Analysis
def NewFaceModel(classes):
    base_model = VGGFace(include_top=False,
                         input_shape=(image_size, image_size, 3),
                         model='resnet50')

    for i in range(len(base_model.layers[:])):
        base_model.layers[i].trainable = True

    train_layer = Flatten(name='flatten')(
        base_model.get_layer('avg_pool').output)
    train_layer = Dense(2048, activation='relu', name='fc1')(train_layer)
    train_layer = Dropout(0.5, name='drop_fc1')(train_layer)

    train_layer = Dense(2048, activation='relu', name='fc2')(train_layer)
    train_layer = Dropout(0.5, name='drop_fc2')(train_layer)

    train_layer = Dense(1024, activation='relu', name='fc3')(train_layer)
    train_layer = Dropout(0.5, name='drop_fc3')(train_layer)

    train_layer = Dense(1024, activation='relu', name='fc4')(train_layer)
    train_layer = Dropout(0.5, name='drop_fc4')(train_layer)

    train_layer = Dense(classes, activation='softmax',
                        name="predictions")(train_layer)

    model = keras.models.Model(inputs=base_model.input, outputs=train_layer)

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.rmsprop(lr=0.0001, decay=1e-6),
                  metrics=['accuracy'])

    return model
コード例 #23
0
def custom_vgg_model(SEQUENCE_LENGTH):
    vggface = VGGFace(model='resnet50',
                      include_top=False,
                      input_shape=(224, 224, 3),
                      pooling='avg')
    model_VGGFace = Model(inputs=vggface.input,
                          outputs=vggface.get_layer('avg_pool').output)
    model_VGGFace.summary()

    input_tensor = Input(shape=(SEQUENCE_LENGTH, 224, 224, 3))
    x = TimeDistributed(model_VGGFace)(input_tensor)
    x = TimeDistributed(Flatten())(x)

    x = TimeDistributed(Dense(1500, activation='relu', name='dense'))(x)
    # x = Dropout(0.2, name='dropout_1')(x)
    # x = GRU(64, activation='relu', return_sequences=True, name='lstm_1')(x)
    # x = Dropout(0.2, name='dropout_2')(x)
    # x = GRU(64, activation='relu', return_sequences=True, name='lstm_2')(x)

    # x = TimeDistributed(Dense(128, activation='relu'), name='dense_2')(x)
    x = TimeDistributed(Dense(512, activation='relu', name='dense'))(x)
    x = TimeDistributed(Dense(128, activation='relu', name='dense'))(x)
    x = TimeDistributed(Dense(32, activation='relu', name='dense'))(x)
    out1 = TimeDistributed(Dense(2, activation='tanh'), name='out1')(x)

    model = Model(inputs=[input_tensor], outputs=[out1])
    return model
コード例 #24
0
ファイル: siamese.py プロジェクト: iamgroot42/A-LINK
 def __init__(self, shape):
     self.shape = shape + (3, )
     vgg_model = VGGFace(model='resnet50',
                         include_top=False,
                         input_shape=self.shape)
     last_layer = vgg_model.get_layer('avg_pool').output
     out = Flatten(name='flatten')(last_layer)
     self.model = Model(vgg_model.input, out)
コード例 #25
0
 def vggface_arhitecture(self):
     vgg_model = VGGFace(include_top=False, input_shape=(48, 48, 3))
     last_layer = vgg_model.get_layer('pool5').output
     x = Flatten(name='flatten')(last_layer)
     x = Dense(4096, activation='relu', name='fc6')(x)
     x = Dense(4096, activation='relu', name='fc7')(x)
     out = Dense(7, activation='softmax', name='fc8')(x)
     return Model(vgg_model.input, out)
コード例 #26
0
ファイル: model.py プロジェクト: iamgroot42/A-LINK
 def __init__(self, shape, out_dim):
     super(SENET50, self).__init__(shape, out_dim, "SETNET50")
     vgg_model = VGGFace(model='senet50',
                         include_top=False,
                         input_shape=self.shape)
     last_layer = vgg_model.get_layer('avg_pool').output
     x = Flatten(name='flatten')(last_layer)
     out = Dense(self.out_dim, activation='softmax', name='classifier')(x)
     self.model = Model(vgg_model.input, out)
コード例 #27
0
ファイル: models.py プロジェクト: ahn19/face2bmi
    def define_model(self,
                     hidden_dim=128,
                     drop_rate=0.0,
                     freeze_backbone=True):

        if self.model_type == 'vgg16_fc6':
            vgg_model = VGGFace(model='vgg16',
                                include_top=True,
                                input_shape=(224, 224, 3))
            last_layer = vgg_model.get_layer('fc6').output
            flatten = Activation('relu')(last_layer)
        else:
            vgg_model = VGGFace(model=self.model_type,
                                include_top=False,
                                input_shape=(224, 224, 3))
            last_layer = vgg_model.output
            flatten = Flatten()(last_layer)

        if freeze_backbone:
            for layer in vgg_model.layers:
                layer.trainable = False

        def block(flatten, name):
            x = Dense(hidden_dim, name=name + '_fc1')(flatten)
            x = BatchNormalization(name=name + '_bn1')(x)
            x = Activation('relu', name=name + '_act1')(x)
            x = Dropout(drop_rate)(x)
            x = Dense(hidden_dim, name=name + '_fc2')(x)
            x = BatchNormalization(name=name + '_bn2')(x)
            x = Activation('relu', name=name + '_act2')(x)
            x = Dropout(drop_rate)(x)
            return x

        x = block(flatten, name='bmi')
        out_bmi = Dense(1, activation='linear', name='bmi')(x)

        x = block(flatten, name='age')
        out_age = Dense(1, activation='linear', name='age')(x)

        x = block(flatten, name='sex')
        out_sex = Dense(1, activation='sigmoid', name='sex')(x)

        custom_vgg_model = Model(vgg_model.input, [out_bmi, out_age, out_sex])
        custom_vgg_model.compile('adam', {
            'bmi': 'mae',
            'age': 'mae',
            'sex': 'binary_crossentropy'
        }, {'sex': 'accuracy'},
                                 loss_weights={
                                     'bmi': 0.8,
                                     'age': 0.1,
                                     'sex': 0.1
                                 })

        self.model = custom_vgg_model
コード例 #28
0
    def __init__(self, layer='fc6'):
        logging.info('loading VGG face')
        self.layer = layer

        vgg_face = VGGFace()

        self.model = Model(inputs=vgg_face.layers[0].input,
                           outputs=vgg_face.get_layer(self.layer).output)

        session = K.get_session()
        K.set_session(session)
コード例 #29
0
 def __init__(self, layer='fc6', model='vgg16'):
     self.model = model
     self.layer = layer
     # Get model with pretrained weights. model: vgg16, resnet50
     vgg_model = VGGFace(model=model)
     if self.model == 'vgg16':
         # We'll extract features at the fc6 layer
         if layer == 'fc6' or 'fc7':
             self.model = Model(inputs=vgg_model.input,
                                outputs=vgg_model.get_layer(layer).output)
         else:
             # convolution layer output needs to be flattened
             vgg_out = vgg_model.get_layer(layer).output
             out = Flatten(name='flatten')(vgg_out)
             self.model = Model(inputs=vgg_model.input, outputs=out)
     elif self.model == 'resnet50':
         # layer=avg_pool
         resent_out = vgg_model.get_layer(layer).output
         out = Flatten(name='flatten')(resent_out)
         self.model = Model(inputs=vgg_model.input, outputs=out)
コード例 #30
0
ファイル: FaceDemo.py プロジェクト: linxi1213/ExamSystem
def bulid_model():
    vgg_model = VGGFace(include_top=False, input_shape=(64, 64, 3))
    last_layer = vgg_model.get_layer('pool5').output
    x = Flatten(name='flatten')(last_layer)
    x = Dense(HIDDEN_DIM, activation='relu', name='fc6')(x)
    #x = Dense(HIDDEN_DIM, activation='relu', name='fc7')(x)
    x = Dropout(0.4)(x)
    out = Dense(NB_CLASS, activation='softmax', name='fc8')(x)
    custom_vgg_model = Model(vgg_model.input, out)
    custom_vgg_model.summary()
    return custom_vgg_model