Beispiel #1
0
 def generate_features(self):
     base_model = VGGFace(model='vgg16')
     self.features_raw = base_model.predict(self.X);
#change the range of target class labels to (1, n)
for i in range(0, y_train.shape[0]):
    y_train[i] = train_set_ids.index(y_train[i]) + 1
for i in range(0, y_test.shape[0]):
    y_test[i] = train_set_ids.index(y_test[i]) + 1

temp = np.zeros((y_train.shape[0], int(np.max(y_train))))
temp[np.arange(y_train.shape[0]), y_train.astype(int) - 1] = 1
y_train = temp

temp = np.zeros((y_test.shape[0], int(np.max(y_test))))
temp[np.arange(y_test.shape[0]), y_test.astype(int) - 1] = 1
y_test = temp

if newLayers == 1:
    model = VGGFace(model='vgg16', pooling='max')
    last_layer = model.get_layer('fc7/relu').output
    x = Dense(target_class, name='fc8-2')(last_layer)
    out = Activation('softmax', name='fc8-2/softmax')(x)
    custom_model = Model(model.input, out)
elif newLayers == 2:
    model = VGGFace(model='vgg16', pooling='max')
    last_layer = model.get_layer('fc7/relu').output
    x = Dense(4096, name='fc8-2')(last_layer)
    x = Activation('relu', name='fc8-2/relu')(x)
    x = Dense(target_class, name='fc8-2-1')(x)
    out = Activation('softmax', name='fc8-2/softmax')(x)
    custom_model = Model(model.input, out)
elif newLayers == 3:
    model = VGGFace(model='vgg16', pooling='max')
    last_layer = model.get_layer('fc7/relu').output
import tensorflow as tf

physical_devices= tf.config.experimental.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)

import json
import zmq

connection = pymongo.MongoClient('localhost',27017)

database = connection["supermarket"]

collection = database["internal_camera_recognition"]
entry_collection = database["entry_gate"]

face_verification_model = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3), pooling='avg')

context = zmq.Context()
receiver = context.socket(zmq.PULL)
receiver.bind("tcp://*:8081")


def get_embedding(frame, x1, y1, x2, y2, required_size = (224, 224)):
   
    face = frame[y1:y2, x1:x2]
    image = Image.fromarray((face).astype(np.uint8))
    image = image.resize(required_size)
    face_array = np.asarray(image)
            
    samples = np.asarray([face_array], 'float32')
    samples = preprocess_input(samples, version=2)
Beispiel #4
0
from PIL import Image
import UsolDeepCore

usolDlib = usolDlib()
myModel = load_model('hs_model.h5')
with open('knn_model_resnet50vgg_max_0511.pkl', 'rb') as f:
    knn = pickle.load(f)
'''
vgg_conv = VGG16(weights='imagenet',
                  include_top=False,
                  input_shape=(224, 224, 3))
vgg_conv.summary()
'''
resnet_vgg = VGGFace(model='resnet50',
                     include_top=False,
                     input_shape=(224, 224, 3),
                     pooling=None,
                     weights='vggface')
resnet_vgg.summary()

test = np.load('testXY_resnet50vgg_max_0511.npz')
testFeatures, testLabel = test['x'], test['y']
#idx2label = usolUtil.makeIndexToLabelFromDir('../dataset/pre/people/validation')
with open('hs_model_label.pkl', 'rb') as f:
    idx2label = pickle.load(f)
print(idx2label)


class WSHandler(tornado.websocket.WebSocketHandler):
    clients = []
from keras.engine import Model
from keras_vggface.vggface import VGGFace
from keras.preprocessing import image
import numpy as np
from keras_vggface import utils
import scipy.io
import glob
import os

# Layer Features
model = 'vgg16'
layer_name = 'fc7'
vgg_model = VGGFace(model=model)
out = vgg_model.get_layer(layer_name).output
vgg_model_new = Model(vgg_model.input, out)

#rootdir = "C:/Users/Newbie/Desktop/MasterThesis/images/LFW/result/lfw/"
arrsource = ["1235", "1245", "1345", "2345"]  #["1234"]
# arrsource = ["GENDER-FERET"]
# subfolder = "/"

for source in arrsource:
    rootdir = "C:/Users/Newbie/Desktop/FinalMasterThesis/implementasi/datasource/processedrawdata/LFW/7433grayscalebalancedout/"
    print("================================" + source +
          "========================================")
    rootdir = rootdir + source
    trainingfemale_directory = rootdir + "/female/training_set/*.jpg"
    trainingmale_directory = rootdir + "/male/training_set/*.jpg"
    testfemale_directory = rootdir + "/female/test_set/*.jpg"
    testmale_directory = rootdir + "/male/test_set/*.jpg"
    output_directory = rootdir + "/results/cnnfeatures"
Beispiel #6
0
def is_match(known_embedding, candidate_embedding):

    # calculate distance between embeddings
    score = cosine(known_embedding, candidate_embedding)

    return score


thresh = 0.35  #means looking for at least 65% similarity
print("Please note this machine only knows prashant")
group_array = plt.imread(group_photo)
plt.imshow(group_array)
for i in range(0, len(group_embedding)):
    pk_score = is_match(known_embedings[0], group_embedding[i])
    if pk_score < thresh:
        ax = plt.gca()
        x = face_coordinates[i][0]
        y = face_coordinates[i][1]
        width = face_coordinates[i][2]
        height = face_coordinates[i][3]

        rect = Rectangle((x, y), width, height, fill=False, color='red')
        ax.add_patch(rect)
        s = str((100 - pk_score * 100))
        ax.text(x, y, 'Prashant - ' + s, color='yellow')

plt.show()

model = VGGFace()
print(model.summary())
Beispiel #7
0
def train(args):
    """Use transfer learning and fine-tuning to train a network on a new dataset"""
    nb_train_samples = get_nb_files(args.train_dir)
    nb_classes = len(glob.glob(args.train_dir + "/*"))
    nb_val_samples = get_nb_files(args.val_dir)
    nb_epoch = int(args.nb_epoch)
    batch_size = int(args.batch_size)

    # data prep
    train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,
                                       rotation_range=30,
                                       width_shift_range=0.2,
                                       height_shift_range=0.2,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)
    test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,
                                      rotation_range=30,
                                      width_shift_range=0.2,
                                      height_shift_range=0.2,
                                      shear_range=0.2,
                                      zoom_range=0.2,
                                      horizontal_flip=True)

    train_generator = train_datagen.flow_from_directory(
        args.train_dir,
        target_size=(IM_WIDTH, IM_HEIGHT),
        batch_size=batch_size)

    validation_generator = test_datagen.flow_from_directory(
        args.val_dir, target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size)

    # setup model
    base_model = VGGFace(include_top=False,
                         input_tensor=Input(shape=(IM_HEIGHT, IM_WIDTH, 3)))

    model = add_new_last_layer(base_model, nb_classes)

    # transfer learning
    setup_to_transfer_learn(model, base_model)

    history_tl = model.fit_generator(
        train_generator,
        nb_epoch=nb_epoch,
        steps_per_epoch=nb_train_samples // batch_size,
        validation_data=validation_generator,
        nb_val_samples=nb_val_samples // batch_size,
        class_weight='auto')
    # fine-tuning
    setup_to_finetune(model)

    history_ft = model.fit_generator(
        train_generator,
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=nb_epoch,
        validation_data=validation_generator,
        validation_steps=nb_val_samples // batch_size,
        callbacks=[lr_reducer, early_stopper, csv_logger])

    model.save(args.output_model_file)

    plot_training(history_tl)
Beispiel #8
0
from keras_vggface.vggface import VGGFace
from matplotlib import pyplot as plt
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.exc import NoResultFound
from google.cloud import storage
from google.cloud.exceptions import GoogleCloudError

from face.compare import extract_face, get_embeddings, verify_user

app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = os.environ['DATABASE_URL']
db = SQLAlchemy(app)
BUCKET_NAME = "pictures_bucket"
keras.backend.clear_session()
tf.get_default_graph()
MODEL = VGGFace(include_top=False, input_shape=(224, 224, 3))


class User(db.Model):
    id = db.Column(db.Integer, primary_key=True)
    email = db.Column(db.String)
    face_embeddings = db.Column(db.LargeBinary)
    photos = db.relationship("Photo", backref='user')
    face_recognition_enabled = db.Column(db.Boolean, default=True)


class Photo(db.Model):
    id = db.Column(db.Integer, primary_key=True)
    photo_link = db.Column(db.String)
    user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
Beispiel #9
0
x_test = resize_shape(x_test)
x_val = resize_shape(x_val)
print(x_train[0].shape)

!pip install keras_vggface

from keras.engine import  Model
from keras.layers import Flatten, Dense, Input, Dropout
from keras_vggface.vggface import VGGFace
from keras.optimizers import Adam,SGD

#custom parameters
nb_class = 12
hidden_dim = 2048

vgg_model = VGGFace(include_top=False, input_shape=(3,96,96))
vgg_model.trainable = True
for layer in vgg_model.layers:
  if layer.name == 'conv5_3':
    break;
  else:
    layer.trainable = False
last_layer = vgg_model.get_layer('pool5').output
x = Flatten(name='flatten')(last_layer)
x = Dense(hidden_dim, activation='relu', name='fc6')(x)
x = Dropout(0.5)(x)
x = Dense(hidden_dim, activation='relu', name='fc7')(x)
out = Dense(nb_class, activation='softmax', name='fc8')(x)
custom_vgg_model = Model(vgg_model.input, out)

from keras.models import model_from_json
Beispiel #10
0
from mtcnn.mtcnn import MTCNN
from keras_vggface.vggface import VGGFace
from keras_vggface.utils import preprocess_input
from keras_vggface.utils import decode_predictions
from keras import backend as K
import tables

from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession

config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)

# create a vggface model
vggFace = VGGFace(model='vgg16')


def prepare_dataset():
    images_folder = 'celeba-dataset/img_align_celeba/img_align_celeba'
    df_attr = pd.read_csv('celeba-dataset/list_attr_celeba.csv')
    no_glasses = df_attr['Eyeglasses'] < 0
    no_hat = df_attr['Wearing_Hat'] < 0
    young = df_attr['Young'] > 0
    df_young = df_attr[young & no_hat & no_glasses]
    df_young = df_young[['image_id']]

    # split data into train, val, and test
    input_train, input_test = split_data(
        df_young['image_id'].values.reshape((df_young.shape[0], 1)), 0.6)
    input_val, input_test = split_data(input_test, 0.5)
Beispiel #11
0
    if (MODE == "single"):
        it_order = train_it.filenames
        ids_key = []

        for i in it_order:
            aux = int((i.split("/")[0]).split("C")[1])
            if (not (aux in ids_key)): ids_key.append(aux)

        np.save("ids_key.npy", np.asarray(ids_key))

    ####################################################################################################################
    # PREPARE THE MODEL
    ####################################################################################################################
    if (WEIGHTS == "vggface"):
        resnet_model = VGGFace(model="resnet50",
                               include_top=False,
                               input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
        x = resnet_model.get_layer("avg_pool").output
        x = Flatten(name="flatten")(x)
        out = Dense(NUM_CLASSES, activation="softmax", name="classifier")(x)
        model = Model(resnet_model.input, out)

    else:
        # use a ResNet50 model
        if (TRAIN_FROM_SCRATCH_MODEL == "resnet50"):
            resnet_model = ResNet50(weights=None,
                                    include_top=False,
                                    input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
            x = resnet_model.output
            x = GlobalAveragePooling2D()(x)
            x = Dropout(DROPOUT)(x)
Beispiel #12
0
        self.kernel = self.add_weight(name='kernel',
                                      shape=(0, 0),
                                      initializer='uniform',
                                      trainable=False)
        super(Reshape_Layer, self).build(input_shape)

    def call(self, x):
        return x[:, 16:240, 16:240, :]

    def compute_output_shape(self, input_shape):
        return (input_shape[0], 224, 224, 3)


from keras_vggface.vggface import VGGFace

model_VGG = VGGFace(model='resnet50')
model_F1 = Sequential()

model_F1.add(model)
model_F1.add(Reshape_Layer((224, 224, 3)))
model_F1.add(model_VGG)
model_true = Sequential()
model_true.add(Reshape((256, 256, 3), input_shape=(256, 256, 3)))
model_true.add(Reshape_Layer((224, 224, 3)))
model_true.add(model_VGG)

y_true = model_F1.predict(X)
y_true = y_true[1]

model_F1_predict = Sequential()
model_F1_predict.add(Dense(152, use_bias=False, input_dim=1))
import pandas as pd
from tqdm import tqdm

code_path = '/../../'

train_df = pd.read_csv(os.path.join(code_path, "Data/train_relationships.csv"))
test_df = pd.read_csv(os.path.join(code_path, "Data/sample_submission.csv"))

from keras_applications.imagenet_utils import _obtain_input_shape
from keras_vggface.vggface import VGGFace
import ssl

ssl._create_default_https_context = ssl._create_unverified_context
# Convolution Features
vgg_features = VGGFace(include_top=False,
                       input_shape=(160, 160, 3),
                       pooling='avg')
model = vgg_features


def prewhiten(x):
    if x.ndim == 4:
        axis = (1, 2, 3)
        size = x[0].size
    elif x.ndim == 3:
        axis = (0, 1, 2)
        size = x.size
    else:
        raise ValueError('Dimension should be 3 or 4')

    mean = np.mean(x, axis=axis, keepdims=True)
Beispiel #14
0
def generate_vggface_data():
    if not os.path.exists(commons.FEATURES_DIR):
        os.mkdir(commons.FEATURES_DIR)

    if not os.path.exists(VGGFACE_DIR):
        os.mkdir(VGGFACE_DIR)

    # if we already got them...
    generated = (os.path.exists(VGGFACE_DIR + '/' + TRAIN_X)
                 and os.path.exists(VGGFACE_DIR + '/' + TRAIN_Y)
                 and os.path.exists(VGGFACE_DIR + '/' + TEST_X)
                 and os.path.exists(VGGFACE_DIR + '/' + TEST_Y)
                 and os.path.exists(VGGFACE_DIR + '/' + TEST_WHO))

    if generated:
        return

    # generate them, save, and return them!
    train_ids = set([])
    with open(commons.TRAIN_FILE, 'r') as f:
        for line in f.readlines():
            train_ids.add(line.strip())

    test_ids = set([])
    with open(commons.TEST_FILE, 'r') as f:
        for line in f.readlines():
            test_ids.add(line.strip())

    # encoding and separate (data, label)
    train_x, train_y, test_x, test_y, test_who = [], [], [], [], []
    labelof = commons.get_label()

    IMAGE_SIZE = 224
    cnn_model = VGGFace(include_top=False,
                        input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3),
                        pooling='None')
    print("Loading baseline data...", datetime.now())

    image_filenames = [
        x for x in sorted(os.listdir(commons.TRAIN_IMAGES_DIR))
        if len(x) > 4 and x[-4:] == '.jpg'
    ]
    for i in range(len(image_filenames)):
        print("{}/{}...".format(i, len(image_filenames)), end='\r')

        image_filename = image_filenames[i]
        imdb_id, character_id, timestamp = image_filename[:-4].split('-')
        character_id = int(character_id)

        # if this movie is not yet labeled
        if imdb_id not in labelof:
            continue

        image = keras.preprocessing.image.load_img(
            os.path.join(commons.TRAIN_IMAGES_DIR, image_filename),
            target_size=(IMAGE_SIZE, IMAGE_SIZE))

        x = keras.preprocessing.image.img_to_array(image)
        x = numpy.expand_dims(x, axis=0)
        x = keras_vggface.utils.preprocess_input(x, version=1)
        x = cnn_model.predict(x)

        y = labelof[imdb_id][character_id]

        if imdb_id in train_ids:
            train_x.append(x)
            train_y.append(y)
        else:
            test_x.append(x)
            test_y.append(y)
            test_who.append(image_filename[:-4])

    print("Done. ", datetime.now())

    with open(VGGFACE_DIR + '/' + TRAIN_X, 'wb') as f:
        pickle.dump(train_x, f)
    with open(VGGFACE_DIR + '/' + TRAIN_Y, 'wb') as f:
        pickle.dump(train_y, f)
    with open(VGGFACE_DIR + '/' + TEST_X, 'wb') as f:
        pickle.dump(test_x, f)
    with open(VGGFACE_DIR + '/' + TEST_Y, 'wb') as f:
        pickle.dump(test_y, f)
    with open(VGGFACE_DIR + '/' + TEST_WHO, 'w') as f:
        for who in test_who:
            f.write("{}\n".format(who))
                                                    test_size=0.20,
                                                    random_state=42,
                                                    shuffle=True)

x_train = x_train.reset_index(drop=True)
x_test = x_test.reset_index(drop=True)
y_train = y_train.reset_index(drop=True)
y_test = y_test.reset_index(drop=True)

train_steps = int(x_train.shape[0] / batch_size)
valid_steps = int(x_test.shape[0] / batch_size)

train_gen = KinImageReader(xdf, ydf, batch_size, height, width)
valid_gen = KinImageReader(x_test, y_test, batch_size, height, width)

vgg_features1 = VGGFace(include_top=False, model='resnet50')
vgg_features2 = VGGFace(include_top=False, model='resnet50')

# Define Pretrained VGG16 Application from Keras
model1 = vgg_features1
model2 = vgg_features2

# Append label for each head so merge layer doesnt get confused
for layer in model1.layers:
    layer.name = layer.name + str("_1")
    #layer.trainable = False
    layer.trainable = True

for layer in model2.layers:
    layer.name = layer.name + str("_2")
    #layer.trainable = False
Beispiel #16
0
def extract_features(speaker,
                     feature_path=FEATURE_DIRECTORY_PATH,
                     video_dir_path=VIDEO_DIRECTORY_PATH,
                     is_demo=False):

    stdscr = curses_init()

    input_tensor = Input(shape=(224, 224, 3))

    #don't include 3 fully connected layers on top
    vgg_model = VGGFace(input_tensor=input_tensor,
                        include_top=False,
                        pooling='avg')

    #create folder for speaker's features
    speaker_feature_dir = os.path.join(feature_path, speaker)
    if not os.path.isdir(speaker_feature_dir):
        os.mkdir(speaker_feature_dir)

    # Get list of all videos to process.
    video_glob = os.path.join(video_dir_path, speaker, '*.mpg')
    video_paths = glob(video_glob)
    num_videos = len(video_paths)
    word_count = 0

    stdscr.addstr(0, 0, f'Extracting facial features for speaker {speaker}.')
    stdscr.refresh()

    try:
        for video_ordinal, video_path in enumerate(video_paths):
            video_name = os.path.basename(video_path)

            progress_msg(stdscr, video_ordinal, word_count, video_name,
                         num_videos)

            word_frames, words_arr = process_video(speaker,
                                                   video_name,
                                                   video_dir_path,
                                                   is_demo=is_demo)
            name_no_ext = video_name.split('.')[0]

            # Process each word's set of frames.
            for i, word_frame in enumerate(word_frames):

                word_count += 1
                progress_msg(stdscr, video_ordinal, word_count, video_name,
                             num_videos)

                # Format of the file name is [video_name]_[word_index]_[word].
                feature_file_name = f'{name_no_ext}_{i}_{words_arr[i]}'

                feature_file_path = os.path.join(speaker_feature_dir,
                                                 feature_file_name)

                if (feature_exists(feature_file_path)):
                    continue

                # Classify the frames and save the features to a file.
                features = vgg_model.predict(word_frame)
                np.save(feature_file_path, features)
    except (KeyboardInterrupt):
        pass
    finally:
        curses_clean_up()
Beispiel #17
0
# grayscale images
img_channels = 3
nb_classes = 8

verbose = 1

# hyperparameters
nb_epoch = 51
batch_size = 128

# set up th ecallbacks
c1 = CheckpointCallback1()
c2 = MetricsCallback()
callbacks = [c1, c2]
vgg_model = VGGFace(model='vgg16',
                    include_top=False,
                    pooling='max',
                    input_shape=(224, 224, 3))

# Freeze the layers which you don't want to train. Here I am freezing the first 14 layers.
for layer in vgg_model.layers[:14]:
    layer.trainable = False

#Adding custom Layers
last_layer = vgg_model.get_layer('pool5').output
x = Flatten()(last_layer)
x = Dense(4096, activation="relu")(x)
x = Dense(4096, activation="relu")(x)
predictions = Dense(nb_classes, activation="softmax")(x)

model_final = Model(input=vgg_model.input, output=predictions)
Beispiel #18
0
new_train_y=np.append(y_train,y_batch,axis=0)

#BRIGHTNESS AUGMENT
datagen = ImageDataGenerator(brightness_range=[0.2,1.0])
datagen.fit(x_train)

for X_batch, y_batch in datagen.flow(x_train, y_train,batch_size=6000):
    break

new_train_x1 = np.append(new_train_x,X_batch,axis=0)
new_train_y1 = np.append(new_train_y,y_batch,axis=0)

# In[15]:


model = VGGFace(model='resnet50',include_top=False, input_shape = (224,224, 3))
model.summary()
len(model.layers)

print('Inputs: %s' % model.inputs)
print('Outputs: %s' % model.outputs)


# In[16]:


len(model.layers)


# In[17]:
                                              batch_size=4,
                                              color_mode="rgb",
                                              class_mode='categorical')

test_data_dir = "/Users/mrngochuu/Desktop/Dataset/emotion/test"
datagen = ImageDataGenerator(rescale=1. / 255,
                             shear_range=0.3,
                             zoom_range=0.2,
                             horizontal_flip=True)
test_generator = datagen.flow_from_directory(test_data_dir,
                                             target_size=(224, 224),
                                             batch_size=4,
                                             color_mode="rgb",
                                             class_mode='categorical')

vggmodel = VGGFace(model='vgg16', include_top=True)
vggmodel.summary()

for layers in (vggmodel.layers)[:19]:
    print(layers)
    layers.trainable = False

X = vggmodel.layers[-2].output
predictions = Dense(7, activation="softmax")(X)
model_final = Model(input=vggmodel.input, output=predictions)

model_final.compile(loss="categorical_crossentropy",
                    optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
                    metrics=["accuracy"])
model_final.summary()
Beispiel #20
0
    def setup(self):
        distorted_A, fake_A, fake_sz64_A, mask_A, self.path_A, self.path_mask_A, self.path_abgr_A, self.path_bgr_A = self.cycle_variables(self.model.netGA)
        distorted_B, fake_B, fake_sz64_B, mask_B, self.path_B, self.path_mask_B, self.path_abgr_B, self.path_bgr_B = self.cycle_variables(self.model.netGB)
        real_A = Input(shape=self.model.img_shape)
        real_B = Input(shape=self.model.img_shape)

        if self.use_lsgan:
            self.loss_fn = lambda output, target : K.mean(K.abs(K.square(output-target)))
        else:
            self.loss_fn = lambda output, target : -K.mean(K.log(output+1e-12)*target+K.log(1-output+1e-12)*(1-target))
        self.loss_fn_bce = lambda output, target : -K.mean(K.log(output+1e-12)*target+K.log(1-output+1e-12)*(1-target))

        # ========== Define Perceptual Loss Model==========
        if self.use_perceptual_loss:
            from keras.models import Model
            from keras_vggface.vggface import VGGFace
            print("Using perceptual loss.")
            vggface = VGGFace(include_top=False, model='resnet50', input_shape=(224, 224, 3))
            vggface.trainable = False
            out_size55 = vggface.layers[36].output
            out_size28 = vggface.layers[78].output
            out_size7 = vggface.layers[-2].output
            vggface_feat = Model(vggface.input, [out_size55, out_size28, out_size7])
            vggface_feat.trainable = False
            netDA_feat = netDB_feat = 0
        else:
            print("Not using perceptual loss.")
            vggface_feat = None
            netDA_feat = netDB_feat = vggface_feat = None

        #TODO check "Tips for mask refinement (optional after >15k iters)" => https://render.githubusercontent.com/view/ipynb?commit=87d6e7a28ce754acd38d885367b6ceb0be92ec54&enc_url=68747470733a2f2f7261772e67697468756275736572636f6e74656e742e636f6d2f7368616f616e6c752f66616365737761702d47414e2f383764366537613238636537353461636433386438383533363762366365623062653932656335342f46616365537761705f47414e5f76325f737a3132385f747261696e2e6970796e62&nwo=shaoanlu%2Ffaceswap-GAN&path=FaceSwap_GAN_v2_sz128_train.ipynb&repository_id=115182783&repository_type=Repository#Tips-for-mask-refinement-(optional-after-%3E15k-iters)
        loss_DA, loss_DA2, loss_GA, loss_DA_feat, loss_DA_code = self.define_loss(self.model.netDA, self.model.netDA2, netDA_feat, self.model.netD_code, self.model.netGA, real_A, fake_A, fake_sz64_A, distorted_A, "A", vggface_feat)
        loss_DB, loss_DB2, loss_GB, loss_DB_feat, loss_DB_code = self.define_loss(self.model.netDB, self.model.netDB2, netDB_feat, self.model.netD_code, self.model.netGB, real_B, fake_B, fake_sz64_B, distorted_B, "B", vggface_feat)

        loss_GA += 3e-3 * K.mean(K.abs(mask_A))
        loss_GB += 3e-3 * K.mean(K.abs(mask_B))

        w_fo = 0.01
        loss_GA += w_fo * K.mean(self.first_order(mask_A, axis=1))
        loss_GA += w_fo * K.mean(self.first_order(mask_A, axis=2))
        loss_GB += w_fo * K.mean(self.first_order(mask_B, axis=1))
        loss_GB += w_fo * K.mean(self.first_order(mask_B, axis=2))

        weightsDA = self.model.netDA.trainable_weights
        weightsDA2 = self.model.netDA2.trainable_weights
        weightsGA = self.model.netGA.trainable_weights
        weightsDB = self.model.netDB.trainable_weights
        weightsDB2 = self.model.netDB2.trainable_weights
        weightsGB = self.model.netGB.trainable_weights
        weightsD_code = self.model.netD_code.trainable_weights

        # Adam(..).get_updates(...)
        """
        # Using the following update function spped up training time (per iter.) by ~15%.
        training_updates = Adam(lr=lrD, beta_1=0.5).get_updates(weightsDA+weightsDA2+weightsD_code,[],loss_DA+loss_DA2+loss_DA_code)
        netDA_train = K.function([distorted_A, real_A],[loss_DA+loss_DA2+loss_DA_code], training_updates)
        """
        training_updates = Adam(lr=self.lrD, beta_1=0.5).get_updates(weightsDA,[],loss_DA)
        self.netDA_train = K.function([distorted_A, real_A],[loss_DA], training_updates)
        training_updates = Adam(lr=self.lrD, beta_1=0.5).get_updates(weightsDA2,[],loss_DA2)
        self.netDA2_train = K.function([distorted_A, real_A],[loss_DA2], training_updates)
        training_updates = Adam(lr=self.lrG, beta_1=0.5).get_updates(weightsGA,[], loss_GA)
        self.netGA_train = K.function([distorted_A, real_A], [loss_GA], training_updates)

        training_updates = Adam(lr=self.lrD, beta_1=0.5).get_updates(weightsDB,[],loss_DB)
        self.netDB_train = K.function([distorted_B, real_B],[loss_DB], training_updates)
        training_updates = Adam(lr=self.lrD, beta_1=0.5).get_updates(weightsDB2,[],loss_DB2)
        self.netDB2_train = K.function([distorted_B, real_B],[loss_DB2], training_updates)
        training_updates = Adam(lr=self.lrG, beta_1=0.5).get_updates(weightsGB,[], loss_GB)
        self.netGB_train = K.function([distorted_B, real_B], [loss_GB], training_updates)

        training_updates = Adam(lr=self.lrD, beta_1=0.5).get_updates(weightsD_code,[], loss_DA_code)
        self.netDA_code_train = K.function([distorted_A, real_A],[loss_DA_code], training_updates)
        training_updates = Adam(lr=self.lrD, beta_1=0.5).get_updates(weightsD_code,[], loss_DB_code)
        self.netDB_code_train = K.function([distorted_B, real_B],[loss_DB_code], training_updates)
Beispiel #21
0
def model():

    # VGG model initialization with pretrained weights

    vgg_model_cari = VGGFace(include_top=True, input_shape=(224, 224, 3))
    last_layer_cari = vgg_model_cari.get_layer('pool5').output
    for i in vgg_model_cari.layers[0:7]:
        i.trainable = False
    custom_vgg_model_cari = Model(vgg_model_cari.input, last_layer_cari)

    vgg_model_visu = VGGFace(include_top=True, input_shape=(224, 224, 3))
    last_layer_visu = vgg_model_visu.get_layer('pool5').output
    for i in vgg_model_visu.layers[0:7]:
        i.trainable = False
    custom_vgg_model_visu = Model(vgg_model_visu.input, last_layer_visu)
    # Input of the siamese network : Caricature and Visual images

    caricature = Input(shape=(224, 224, 3), name='caricature')
    visual = Input(shape=(224, 224, 3), name='visual')
    # Get the ouput of the net for caricature and visual images
    caricature_net_out = custom_vgg_model_cari(caricature)
    caricature_net_out = Flatten()(caricature_net_out)
    visual_net_out = custom_vgg_model_visu(visual)
    visual_net_out = Flatten()(visual_net_out)

    # Merge the two networks by taking the transformation P_C, P_V[Unique transformations of visual & Caricature] and W [shared transformation]
    caricature_net_out = Dense(4096, activation="relu")(caricature_net_out)
    visual_net_out = Dense(4096, activation="relu")(visual_net_out)

    # Unique Layer - Caricature
    P_C_layer = Dense(2084, activation="relu", name="P_C_layer")
    P_C = P_C_layer(caricature_net_out)

    # Unique Layer - Visual
    P_V_layer = Dense(2084, activation="relu", name="P_V_layer")
    P_V = P_V_layer(visual_net_out)

    # Shared layers
    W = Dense(
        2084, activation="relu", name="W", kernel_initializer='glorot_uniform')
    W_C = W(caricature_net_out)
    W_V = W(visual_net_out)

    d = keras.layers.Concatenate(axis=-1)([W_C, W_V])
    d_1 = Dense(2048, activation="relu")(d)
    d_2 = Dense(1024, activation="sigmoid")(d_1)
    d_3 = Dense(2, activation="softmax", name='verification')(d_2)

    # Merge Unique and Shared layers for getting the feature descriptor of the image
    feature_caricature = keras.layers.Concatenate(axis=-1)([P_C, W_C])
    feature_visual = keras.layers.Concatenate(axis=-1)([P_V, W_V])

    # CARICATURE Classification Network - Dense layers

    fc1_c = Dense(2048, activation="relu")(feature_caricature)
    drop1_c = Dropout(0.6)(fc1_c)
    fc2_c = Dense(1024, activation="relu")(drop1_c)
    drop2_c = Dropout(0.6)(fc2_c)
    fc3_c = Dense(
        nb_class, activation="softmax",
        name='caricature_classification')(drop2_c)

    # VISUAL Classification Network - Dense layers

    fc1_v = Dense(2048, activation="relu")(feature_visual)
    drop1_v = Dropout(0.6)(fc1_v)
    fc2_v = Dense(1024, activation="relu")(drop1_v)
    drop2_v = Dropout(0.6)(fc2_v)
    fc3_v = Dense(
        nb_class, activation="softmax", name='visual_classification')(drop2_v)

    model = Model([caricature, visual], [d_3, fc3_c, fc3_v])

    return model
import numpy as np
from keras_vggface.vggface import VGGFace
from keras.preprocessing import image
from keras_vggface import utils
from sklearn.neighbors import NearestNeighbors

if __name__ == "__main__":
    model = VGGFace(model='vgg16',
                    include_top=False)  # or VGGFace() as default
    img = image.load_img('matt.jpg', target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = utils.preprocess_input(x, version=1)  # or version=2
    preds = model.predict(x)
    print(preds)
    print(preds.shape)
#face_cascade = cv2.CascadeClassifier(str(Path.cwd() / 'haarcascade_frontalface_alt.xml'))
#face_cascade = cv2.CascadeClassifier(str(Path.cwd() / 'lbpcascade_frontalface.xml'))
face_cascade = cv2.CascadeClassifier(
    str(Path.cwd() / 'lbpcascade_frontalface_improved.xml'))

Gesichter = False  # either True for only croped celebrity faces or False for original celbrity image. DEciding which images to show. Cropped or total resized image
brt = 90  # value could be + or - for brightness or darkness
gray = False
p = 35  # frame size around detected face
width = height = 224  # size of the cropped image. Same as required for network
mitte = np.empty(shape=[0, 0])
mittleres_Gesicht_X = ()

resnet50_features = VGGFace(model='resnet50',
                            include_top=False,
                            input_shape=(224, 224, 3),
                            pooling='avg')  # pooling: None, avg or max

EMBEDDINGS_Celebs = pd.read_json(Path.cwd() / 'EMBEDDINGS_8k.json')

ret, frame = cap.read()
framemitte = np.shape(frame)[1] / 2


def splitDataFrameIntoSmaller(df, chunkSize):
    listOfDf = list()
    numberChunks = len(df) // chunkSize + 1
    for i in range(numberChunks):
        listOfDf.append(df[i * chunkSize:(i + 1) * chunkSize])
    return listOfDf
    features, labels = pickle.load(open(filename, mode='rb'))

    # Return the training data in batches of size <batch_size> or less
    return batch_features_labels(features, labels, batch_size)

# def MyActivationLayer(x):
#     return tf.nn.relu(x) * tf.nn.tanh(x)
# this could also be the output a different Keras model or layer
#input_tensor = Input(shape=(224, 224, 3))  # this assumes K.image_data_format() == 'channels_last'
input_tensor = tf.placeholder(tf.float32, [None, 224, 224, 3])
y = tf.placeholder(tf.float32,[None,85])

nb_class = 85
#hidden_dim = 512

vgg_model = VGGFace(weights='vggface',input_tensor=input_tensor,include_top=False, input_shape=(224, 224, 3),pooling='max')
last_layer = vgg_model.get_layer('pool5').output
x = Flatten(name='flatten')(last_layer)
#x1 = Dense(4096, activation='relu',kernel_initializer=RandomNormal(mean=0.0,stddev=0.01),bias_initializer='zeros')(x)
#x2 = Dropout(0.7)(x1)
#x3 = Dense(2048, activation='relu',kernel_initializer=RandomNormal(mean=0.0,stddev=0.01),bias_initializer='zeros')(x2)
#x4 = Dropout(0.7)(x3)
predictions = Dense(85, activation='softmax')(x)

abs11 = tf.abs(tf.subtract(tf.argmax(predictions, 1), tf.argmax(y, 1)))
test_features1 = test_features[1000:1200]
test_labels1 = test_labels[1000:1200]

sess = tf.Session()  # 创建sessionnvidia
K.set_session(sess)
from keras.models import Model, Sequential
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation
from PIL import Image
import numpy as np
from keras.preprocessing.image import load_img, save_img, img_to_array
from keras.applications.imagenet_utils import preprocess_input
from keras.preprocessing import image
import matplotlib.pyplot as plt
import keras
import os
from glob import glob
from random import randint
import matplotlib.pyplot as plt
from keras_vggface.vggface import VGGFace

model = VGGFace(model='resnet50', include_top=False)
model.summary()


def preprocess_image(image_end):
    img = load_img(image_end, target_size=(224, 224))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = preprocess_input(img)
    return img


def acessa_imgs():
    data = glob(
        '/home/mateus/Documents/visao/face-data/train/*')  #acessa a pasta
    imagens = []
Beispiel #26
0
import numpy as np
from sklearn.model_selection import train_test_split
from keras_vggface.vggface import VGGFace
from keras.engine import Model
from keras.layers import Input
import numpy as np
import keras
from keras.layers import Dense
# extracting file saved by data_prep.py
data = np.load('face_data.npz')
x, y = data['x'], data['y']
#categorical conversion of data label
y = keras.utils.to_categorical(y, 6)
# using transfer learning to reduce the time required to train the algo
resnet = VGGFace(model='resnet50', input_shape=(224, 224, 3))

layer_name = resnet.layers[-2].name
#adding our own custom layers to make the model work on our datatset
out = resnet.get_layer(layer_name).output
out = Dense(6, activation='softmax')(out)
resnet_4 = Model(resnet.input, out)
# removing last layer of the model and adding my own layer to it
for layer in resnet_4.layers[:-1]:
    layer.trainable = False

resnet_4.compile(loss='categorical_crossentropy',
                 optimizer='adam',
                 metrics=['accuracy'])
#checking the final created dataset
print(resnet_4.summary())
# training the model we have created with our own dataset
Beispiel #27
0
# tensorflow compiling
# https://stackoverflow.com/a/54048937
#
# https://github.com/fo40225/tensorflow-windows-wheel
# https://github.com/fo40225/tensorflow-windows-wheel/blob/master/1.12.0/py36/CPU/avx2/tensorflow-1.12.0-cp36-cp36m-win_amd64.whl

from keras.applications.vgg16 import VGG16
from keras_vggface.vggface import VGGFace

image_size = 224

face_model = VGGFace(model='vgg16',
                     weights='vggface',
                     input_shape=(224, 224, 3))
face_model.summary()

for layer in face_model.layers:
    layer.trainable = False

from keras.models import Model, Sequential
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation

person_count = 5

last_layer = face_model.get_layer('pool5').output

x = Flatten(name='flatten')(last_layer)
x = Dense(1024, activation='relu', name='fc6')(x)
x = Dense(1024, activation='relu', name='fc7')(x)
out = Dense(person_count, activation='softmax', name='fc8')(x)
Beispiel #28
0
import numpy as np
from keras.engine import Model
from keras.layers import Flatten, Dense, Input
from keras_vggface.vggface import VGGFace
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical

#custom parameters
nb_class = 2
hidden_dim = 512

vgg_model = VGGFace(include_top=False, input_shape=(224, 224, 3))
last_layer = vgg_model.get_layer('pool5').output
x = Flatten(name='flatten')(last_layer)
x = Dense(hidden_dim, activation='relu', name='fc6')(x)
x = Dense(hidden_dim, activation='relu', name='fc7')(x)
print(x.shape)
out = Dense(nb_class, activation='softmax', name='fc8')(x)
print(out.shape)
custom_vgg_model = Model(vgg_model.input, out)

for layer in custom_vgg_model.layers[:-3]:
    layer.trainable = False

print(custom_vgg_model.summary())
custom_vgg_model.compile(loss='binary_crossentropy',
                         optimizer=optimizers.SGD(lr=1e-3, momentum=0.9),
                         metrics=['accuracy'])

train_data_dir = 'cropped_data/train'
RESOURCES = "../resources/"
TRAIN_PATH = RESOURCES + "train_faces.pickle"
TEST_PATH = RESOURCES + "test_faces.pickle"
shape_img = (1, 224, 224, 3)


def generate_embs(pickle_file, vgg):
    with open(pickle_file, "rb") as handle:
        faces = pickle.load(handle)

    face_embs = []
    for face in tqdm(faces):
        face = np.reshape(face, shape_img)
        face_embs.append(vgg.predict(face))

    return face_embs


if __name__ == "__main__":
    model = VGGFace(model="resnet50")
    layer_name = "flatten_1"
    model = Model(model.input, model.get_layer(layer_name).output)

    print(model.summary())

    with open(RESOURCES + "train_embeddings.pickle", "wb") as handle:
        pickle.dump(generate_embs(TRAIN_PATH, model), handle)
    with open(RESOURCES + "test_embeddings.pickle", "wb") as handle:
        pickle.dump(generate_embs(TEST_PATH, model), handle)
Beispiel #30
0
import os

import time

import numpy as np
from keras.engine import Model
from keras_vggface.vggface import VGGFace
from sklearn.externals import joblib

from helpers import load_image_references, load_image_from_disk
from settings import VGGFACE2_TRAIN_PATH

if __name__ == '__main__':
    layer_name = 'flatten_1'
    vgg_model = VGGFace(model='resnet50')
    out = vgg_model.get_layer(
        layer_name).output  # 2048-dimensional feature vector
    feature_descriptor_model = Model(vgg_model.input, out)

    image_references = load_image_references()

    for celebrity_images in image_references:
        folder_name = celebrity_images[0][0]
        images = []

        load_images_start_time = time.time()

        for image_reference in celebrity_images:
            file_name = image_reference[1]
            file_path = os.path.join(VGGFACE2_TRAIN_PATH, folder_name,
                                     file_name)