示例#1
0
 def __init__(self):
     self.recognizer = InceptionResnetV1(pretrained="vggface2", device=env.device)
     self.recognizer.eval()
示例#2
0
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier

from facenet_pytorch import InceptionResnetV1
from GAN.models import Discriminator

resnet = InceptionResnetV1(pretrained='cassia_webface')

gallery = pd.read_csv('path/to/gallery')

disc = Discriminator()
knn_clf = KNeighborsClassifier(weights='distance', metric='cosine')
knn_clf.fit(gallery.drop('names', axis=1), gallery['names'])


def get_prediction(image_bytes):
    tensor = prepare_image(image_bytes=image_bytes)
    embedding = resnet(tensor).detach().cpu().numpy()
    name = knn_clf.predict(embedding)
    score = disc(tensor).item()

    return name, score
示例#3
0
class FaceDetector:
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print(device)

    mtcnn = MTCNN(keep_all=True, device=device)
    resnet = InceptionResnetV1(pretrained='vggface2').eval()
示例#4
0
        # Distance based on cosine similarity
        dot = np.sum(np.multiply(embeddings1, embeddings2), axis=1)
        norm = np.linalg.norm(embeddings1, axis=1) * np.linalg.norm(
            embeddings2, axis=1)
        similarity = dot / norm
        dist = np.arccos(similarity) / math.pi
    else:
        raise 'Undefined distance metric %d' % distance_metric

    return dist


device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
print(device)
resnet = InceptionResnetV1(classify=False, pretrained='vggface2').eval()

# resnet = InceptionResnetV1(classify=False)
# resnet.load_state_dict(torch.load("saved_models\mixed_mask_triplet", map_location=device), strict=False)
mtcnn = MTCNN(image_size=160,
              margin=14,
              selection_method='center_weighted_size')
trans = transforms.Compose(
    [np.float32,
     transforms.ToTensor(), fixed_image_standardization])

resnet.eval()
path1 = "C:\\Users\\david\\Documents\\masked-face-recognition\\mixed_face_dataset_subset\\nini\\0_0_nini_0006.jpg"
path2 = "C:\\Users\\david\\Documents\\masked-face-recognition\\mixed_face_dataset_subset\\sunyaowei\\0_0_2.jpg"
img1 = Image.open(path1)
img2 = Image.open(path2)
示例#5
0
    for name in list(path.glob('*'))[:class_num]:
        images = list(name.glob('*.jpg'))
        for img in images:
            cropped = Crop.crop(str(img))
            if cropped is not None:
                img = Image.fromarray(cropped).convert('RGB')
                img = transform(img)
                standard.append(img)
                break

    matrix = np.zeros((class_num, class_num), dtype=np.int32)
    s_features = []
    # net = GAN(train=False).cuda(3)
    # param = torch.load("/data/chenyangrui/resnetGan/resnetGan.pth")
    # net.netG.load_state_dict(param)
    net = InceptionResnetV1(pretrained='vggface2').cuda(3).eval()

    criterion = nn.MSELoss()

    for each in standard:
        net.eval()
        x = torch.unsqueeze(each, 0).cuda(3)
        # feature, _ = net(x)
        feature = net(x)
        s_features.append(feature)

    print('test start')
    for i, d in enumerate(ds):
        net.eval()
        (imgs, labels) = d
        imgs = imgs.cuda(3)
            label = labels_dict[label]
            # Save video embeddings to single file
            df = pd.DataFrame(columns=['filename', 'video_embedding', 'label'])
            filename = os.path.basename(video_path)
            path = os.path.join(dest_path, filename.split('.')[0] + '.csv')
            df.loc[0] = [filename, all_faces_embeddings, label]
            df.to_pickle(path)
            # Free up CUDA memory after work is done.
            torch.cuda.empty_cache()


if __name__ == '__main__':
    dest_path = 'dataset\\video_embeddings'
    dataset = VideoDataset('data\\train_data\\to_add.json', 2, check_path=dest_path)
    dataloader = DataLoader(
        dataset,
        # Keep batch size always > 1 since the custom collate function
        # will skip None videos and it will fall back to the rest
        batch_size=6,
        # sampler=Subset[0, 1, 2],
        num_workers=3,
        pin_memory=True,
        collate_fn=collate_fn
    )
    # Load Face detector
    face_detector = MTCNN(margin=14, device=DEVICE).eval()
    # Load facial recognition model
    feature_extractor = InceptionResnetV1(pretrained='vggface2', device=DEVICE).eval()
    # Extract faces and embeddings
    extract_faces_and_embeddings(dataloader, face_detector, feature_extractor, dest_path)
示例#7
0
import torch
import cv2
from torchvision import datasets
from torch.utils.data import DataLoader
from PIL import Image

########################################################################################################################
SRC_VIDEO_PATH = 0  #"./database/HumanVideo.mp4"
FACE_DATABASE_PATH = '../ImageDatabase/Faces'
########################################################################################################################
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))

mtcnn = MTCNN(image_size=160, margin=0, min_face_size=20,
              device=device)  # initializing mtcnn for face detection
resnet = InceptionResnetV1(pretrained='vggface2').eval(
)  # initializing resnet for face img to embeding conversion


def create():
    dataset = datasets.ImageFolder(FACE_DATABASE_PATH)  # photos folder path
    idx_to_class = {i: c
                    for c, i in dataset.class_to_idx.items()
                    }  # accessing names of peoples from folder names

    print(idx_to_class)

    def collate_fn(x):
        return x[0]

    loader = DataLoader(dataset, collate_fn=collate_fn)
示例#8
0
def config_facenet_model():
    ### Facenet for features extraction
    facenet = InceptionResnetV1(pretrained='vggface2').eval()

    return facenet
示例#9
0
import torch
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
import numpy as np
import pandas as pd
from PIL import Image, ImageDraw
from matplotlib import pyplot as plt
from scipy.spatial.distance import cosine
#%%
device = torch.device(
    "cpu")  #'cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
#%%
mtcnn = MTCNN(device=device)
#%%
resnet = InceptionResnetV1(pretrained='casia-webface').eval().to(device)
#%%
img = Image.open("dataset/matthew2.jpg")
img_cropped1 = mtcnn(img)
plt.imshow(img_cropped1.permute((1, 2, 0)))
#%%
img = Image.open("dataset/matthew3.jpg")
img_cropped2 = mtcnn(img)
plt.imshow(img_cropped2.permute((1, 2, 0)))
#%%
img_embedding1 = resnet(
    img_cropped1.unsqueeze(0).to(device)).cpu().detach().numpy()
img_embedding2 = resnet(
    img_cropped2.unsqueeze(0).to(device)).cpu().detach().numpy()

dist = cosine(img_embedding1, img_embedding2)
示例#10
0
        os.mkdir('tmp/' + d)
    except OSError as e:
        print('{} already exists'.format('tmp/' + d))

# Initialize variables
# I did this already on `app.py` but just in case
app = Flask(__name__, static_url_path="/tmp", static_folder="tmp")
app.secret_key = "secret key"  # Flask ask me for a key.
app.config['MAX_CONTENT_LENGTH'] = 50 * 1024 * 1024  # 50 mb

ALLOWED_IMAGES = set(['jpg',
                      'jpeg'])  # Files allowed (check if PNG could work)
ALLOWED_VIDEOS = set(['mp4', 'mov'])

#GLOBAL VARIABLES
MODEL = InceptionResnetV1(pretrained='vggface2').eval()  # Preload the resnet
COUNTER = 0
DEPLOY = 0


# Handy functions
def allowed_file(filename, allowed):
    '''
    This function is used to determine if they uploaded file has the apropiate extension
    '''
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in allowed


###############################################################################################################################
# Home page
@app.route('/')
示例#11
0
transform = transforms.Compose([
    transforms.Resize([img_x, img_y]),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

selected_frames = np.arange(begin_frame, end_frame, skip_frame).tolist()
# Load face detector
mtcnn = MTCNN(margin=14, keep_all=False, factor=0.5, device=device).eval()
# Define face detection pipeline
detection_pipeline = DetectionPipeline(detector=mtcnn,
                                       batch_size=60,
                                       resize=0.25)

# Load facial recognition model
resnet = InceptionResnetV1(pretrained='vggface2', device=device).eval()
#resnet=InceptionV3(input_shape=img_shape, weights='imagenet', include_top=False, pooling='avg')
#X = torch.zeros([1334,30,3,256,342],dtype=torch.float64).to(device)
X = []
#print(X.shape)
start = time.time()
n_processed = 0
flag1 = 0
with torch.no_grad():
    for i, filename in tqdm(enumerate(filenames), total=len(filenames)):
        try:

            # Load frames and find faces
            faces = detection_pipeline(filename)
            #X[i,:,:,:,:]=faces
            #waste=process_faces(faces, resnet)
示例#12
0
from PIL import Image

from facenet_pytorch import MTCNN, InceptionResnetV1

import torch

img = Image.open('./1.jpg')

mtcnn = MTCNN()

# Get cropped and prewhitened image tensor
img_cropped = mtcnn(img, save_path='./2.jpg')

model = InceptionResnetV1(pretrained=None, num_classes=10575, device="cuda:0")

state_dict = {}

cached_file1 = "./20180408-102900-casia-webface-logits.pt"
cached_file2 = "20180408-102900-casia-webface-features.pt"

state_dict.update(torch.load(cached_file1))
state_dict.update(torch.load(cached_file2))

model.load_state_dict(state_dict)

#print(model)
model.eval()

#model.classify = True

input_img = img_cropped.cuda()
示例#13
0
 def post_init(self):
     from facenet_pytorch import InceptionResnetV1
     self.model = InceptionResnetV1(pretrained=self.pretrained).eval()
     self.to_device(self.model)
示例#14
0
    def __init__(self, input_list, target_list, mask_list, optimizer):
        """
        Class initialization with lists of preprocessed inputs, targets, and masks
        There are 3 optimizer options: SGD, Adam, Adamax


        Parameters
        ----------
        input_list : list[PIL.Image]
            list of inputs to train on

        target_list : list[PIL.Image]
            list of targets

        mask_list : list[np.array]
            list of preprocessed masks to attach to the input

        optimizer : str
            takes in either 'sgd', 'adam', or 'adamax'
        """
        self.input_list = input_list
        self.target_list = target_list
        self.mask_list = mask_list

        self.input_tensors = []
        self.input_emb = []
        self.target_emb = []
        self.losses = []

        # Necessary tools for training: normalization, image + delta applier, and
        # facial recognition model
        self.norm = Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
        self.norm.to(device)
        self.apply = Applier()
        self.apply.to(device)
        self.resnet = InceptionResnetV1(pretrained='vggface2').eval()
        self.resnet.to(device)

        # Read all inputs in.  Embeddings will be used for loss calculation, tensors
        # will be used for actual training
        for image, _ in self.input_list:
            self.input_emb.append(
                self.resnet(self.norm(tensorize(image).cuda())))
            self.input_tensors.append(tensorize(image))

        # Create target embeddings for loss calculation
        for image, _ in self.target_list:
            self.target_emb.append(
                self.resnet(self.norm(tensorize(image).cuda())))

        try:
            if (optimizer is 'sgd'):
                self.opt = optim.SGD(self.mask_list,
                                     lr=1e-1,
                                     momentum=0.9,
                                     weight_decay=0.0001)
            elif (optimizer is 'adam'):
                self.opt = optim.Adam(self.mask_list,
                                      lr=1e-1,
                                      weight_decay=0.0001)
            elif (optimizer is 'adamax'):
                self.opt = optim.Adamax(self.mask_list,
                                        lr=1e-1,
                                        weight_decay=0.0001)
        except:
            print("No optimizer chosen, reverting to ADAM")
            self.opt = optim.Adam(self.mask_list, lr=1e-1, weight_decay=0.0001)
示例#15
0
from facenet_pytorch import InceptionResnetV1

InceptionResnetV1(pretrained='')
示例#16
0
        return i
    else:
        raise NameError("No model saved file with this epoch")


# In[147]:

LAST_EPOCH = 0

RESUME_MODEL = False

LEARNING_RATE = 0.0002

# In[158]:

resnet = InceptionResnetV1(pretrained='vggface2').eval().to(
    device)  # предобученная нейросеть
for param in resnet.parameters():
    param.requires_grad = False

G = nn.DataParallel(Generator()).to(device)
D1 = nn.DataParallel(Discriminator_faces()).to(
    device
)  # Этот дискриминатор отличает изображение (img) и то, как генератор его повернул
D2 = nn.DataParallel(Discriminator_marks()).to(
    device
)  # Этот дискриминатор отличает повернутое G изображение и landmarks_wanted

if RESUME_MODEL:
    resume_model(G, "G", "models_save", LAST_EPOCH)
    resume_model(D1, "D1", "models_save", LAST_EPOCH)
    resume_model(D2, "D2", "models_save", LAST_EPOCH)
示例#17
0
import random
import csv
from boundingboxutils import expandBox, inflate

from attrgenerator import makeGoodAttr, convertToAttGanAttr
device = torch.device('cpu')

mtcnn = MTCNN(image_size=160,
              margin=0,
              min_face_size=20,
              thresholds=[0.6, 0.7, 0.7],
              factor=0.709,
              post_process=True,
              device=device)

resnet = InceptionResnetV1(pretrained='vggface2').eval().to(device)


def collate_fn(x):
    return x[0]


imgfolder = '5941'
imgfolder = 'CelebA_female'
iteration = 1
isMale = False

#print(int(178/12),int(218/12))


#Attributes:
示例#18
0
# importing libraries
from facenet_pytorch import MTCNN, InceptionResnetV1
import torch
from torchvision import datasets
from torch.utils.data import DataLoader
from PIL import Image

mtcnn = MTCNN(image_size=240, margin=0,
              min_face_size=20)  # initializing mtcnn for face detection
#resnet = InceptionResnetV1(pretrained='vggface2').eval()  # initializing resnet for face img to embeding conversion

resnet = InceptionResnetV1(classify=True, num_classes=1001).eval()

dataset = datasets.ImageFolder('./images')  # photos folder path
idx_to_class = {i: c
                for c, i in dataset.class_to_idx.items()
                }  # accessing names of peoples from folder names

print(idx_to_class)


def collate_fn(x):
    return x[0]


loader = DataLoader(dataset, collate_fn=collate_fn)

face_list = []  # list of cropped faces from photos folder
name_list = []  # list of names corrospoing to cropped photos
embedding_list = [
]  # list of embeding matrix after conversion from cropped faces to embedding matrix using resnet
示例#19
0
client = storage.Client()

bucket = client.get_bucket("face-app-c3aee.appspot.com")
if not isdir(model_dir('')):
    pathlib.Path(model_dir('')).mkdir(parents=True, exist_ok=True)

if not isfile(model_dir("vggface2_DG3kwML46X.pt")):
    bucket.blob('face_model/vggface2_DG3kwML46X.pt') \
        .download_to_filename(model_dir("vggface2_DG3kwML46X.pt"))

if not isfile(model_dir("vggface2_G5aNV2VSMn.pt")):
    bucket.blob('face_model/vggface2_G5aNV2VSMn.pt') \
        .download_to_filename(model_dir("vggface2_G5aNV2VSMn.pt"))

model: InceptionResnetV1 = InceptionResnetV1(pretrained="vggface2")
model.eval()
json_encoder = JSONEncoder()


def prepare_image(image_string):
    image_data = b64decode(image_string)
    image: Image = Image.open(BytesIO(image_data)).resize((160, 160), 2)
    return prewhiten(to_tensor(float32(image)))


def prewhiten(x):
    mean = x.mean()
    std = x.std()
    std_adj = std.clamp(min=1.0 / (float(x.numel())**0.5))
    y = (x - mean) / std_adj
示例#20
0
from Configs import Global_Config

IMAGE_SIZE = 220
mtcnn = MTCNN(image_size=IMAGE_SIZE,
              margin=0,
              min_face_size=20,
              thresholds=[0.6, 0.7, 0.7],
              factor=0.709,
              post_process=True,
              device=Global_Config.device)
to_pil = transforms.ToPILImage(mode='RGB')
crop_transform = transforms.Compose(
    [transforms.Resize(IMAGE_SIZE),
     transforms.CenterCrop(IMAGE_SIZE)])

resnet = InceptionResnetV1(pretrained='vggface2',
                           classify=False).eval().to(Global_Config.device)


class ID_Encoder(torch.nn.Module):
    def __init__(self):
        super(ID_Encoder, self).__init__()

    def crop_tensor_according_to_bboxes(self, images, bboxes):
        cropped_batch = []
        for idx, image in enumerate(images):
            try:
                cropped_image = crop_transform(
                    image[:,
                          int(bboxes[idx][0][1]):int(bboxes[idx][0][3]),
                          int(bboxes[idx][0][0]):int(bboxes[idx][0][2]
                                                     )].unsqueeze(0))
示例#21
0
                    num_workers=workers,
                    batch_size=batch_size,
                    collate_fn=training.collate_pil)

for i, (x, y) in enumerate(loader):
    mtcnn(x, save_path=y)
    print('\rBatch {} of {}'.format(i + 1, len(loader)), end='')

# Remove mtcnn to reduce GPU memory usage
del mtcnn

# Define Inception Resnet V1 module
# noinspection PyUnresolvedReferences
resnet = InceptionResnetV1(
    classify=True,
    # pretrained='vggface2',
    pretrained=None,
    num_classes=len(dataset.class_to_idx)).to(device)

# Define optimizer, scheduler, dataset, and data_loader
optimizer = optim.Adam(resnet.parameters(), lr=0.001)
scheduler = MultiStepLR(optimizer, [5, 10])

trans = transforms.Compose(
    [np.float32,
     transforms.ToTensor(), fixed_image_standardization])
dataset = datasets.ImageFolder(data_dir + '_cropped', transform=trans)
img_inds = np.arange(len(dataset))
np.random.shuffle(img_inds)
train_inds = img_inds[:int(0.8 * len(img_inds))]
val_inds = img_inds[int(0.8 * len(img_inds)):]
示例#22
0
文件: encoder.py 项目: ceynri/FIC
 def __init__(self):
     super(Encoder, self).__init__()
     self.facenet = InceptionResnetV1(pretrained='vggface2')
     for param in self.facenet.parameters():
         param.requires_grad = False
示例#23
0
def check_list(request, format=None):
    if request.method == 'GET':
        images = Check.objects.all()
        serializer = CheckSerializer(images, many=True)
        return Response(serializer.data)

    elif request.method == 'POST':
        serializer = CheckSerializer(data=request.data)
        if serializer.is_valid():
            serializer.save()
            # 폴더 생성
            sid = request.data.get('title')

            # 체크
            # initializing mtcnn for face detection
            mtcnn = MTCNN(image_size=240, margin=0, min_face_size=20)
            # initializing resnet for face img to embeding conversion
            resnet = InceptionResnetV1(pretrained='vggface2').eval()

            start = time.time()

            print("checkpoint 0")

            path = "media/check/" + sid
            file_list = os.listdir(path)
            img = Image.open(path + "/" + file_list[0])

            print("checkpoint 1:", time.time() - start)
            img_cropped = mtcnn(img,
                                save_path="media/croppedCheck/" + sid +
                                "/cropped_" + sid + ".jpg")
            print("checkpoint 2:", time.time() - start)

            # img_path= location of photo, data_path= location of data.pt
            def face_match(img_path, data_path):
                # getting embedding matrix of the given img
                img = Image.open(img_path)
                # returns cropped face and probability
                face, prob = mtcnn(img, return_prob=True)
                # detech is to make required gradient false
                emb = resnet(face.unsqueeze(0)).detach()

                saved_data = torch.load(data_path)  # loading data.pt file
                # print(saved_data)
                embedding_list = saved_data[0]  # getting embedding data
                name_list = saved_data[1]  # getting list of names
                dist_list = [
                ]  # list of matched distances, minimum distance is used to identify the person

                for idx, emb_db in enumerate(embedding_list):
                    dist = torch.dist(emb, emb_db).item()
                    dist_list.append(dist)

                # print(len(embedding_list))
                # print(embedding_list[0].shape)
                # print(type(embedding_list[0]))

                idx_min = dist_list.index(min(dist_list))
                return (name_list[idx_min], min(dist_list))

            print("checkpoint 3:", time.time() - start)
            result = face_match(
                "media/croppedCheck/" + sid + "/cropped_" + sid + ".jpg",
                'golo.pt')
            print("checkpoint 4:", time.time() - start)

            print(result)

            checked = False
            if sid == result[0]:
                checked = True

            dummy_data = {
                "title": "student check",
                "description": "dd",
                "check_list": [
                    {
                        "id": sid,
                        "check": checked
                    },
                ],
                "status": "OK"
            }
            # 삭제
            print("checkpoint 5:", time.time() - start)

            return JsonResponse(dummy_data, status=status.HTTP_201_CREATED)
            # return Response(serializer.data, status=status.HTTP_201_CREATED)
        return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)

    elif request.method == 'DELETE':
        serializer = CheckSerializer(data=request.data)
        if serializer.is_valid():

            # 폴더 생성
            sid = request.data.get('title')

            path1 = "media/check/" + sid + '/' + sid + '.jpg'
            path2 = "media/croppedCheck/" + sid + '/cropped_' + sid + '.jpg'

            os.remove(path1)
            os.remove(path2)
            deleted_data = {
                "title": sid,
                "description": "deleted " + sid,
                "status": "OK"
            }

            return JsonResponse(deleted_data, status=status.HTTP_201_CREATED)
        return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
示例#24
0
 def __init__(self, device, image_size=109):
     self.device = device
     self.mtcnn = MTCNN(image_size=image_size, device=self.device)
     self.resnet = InceptionResnetV1(pretrained='vggface2').eval().to(
         self.device)
示例#25
0
from facenet_pytorch import MTCNN, InceptionResnetV1
import torch
import numpy as np
import mmcv, cv2
from PIL import Image, ImageDraw
from IPython import display

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))

mtcnn = MTCNN(device=device)
model = InceptionResnetV1(pretrained='vggface2').eval()

img = Image.open('sample.jpg')

img_cropped = mtcnn(img)

model.classify = True
img_probs = model(img_cropped.unsqueeze(0))
print(img_probs)

emb = []


def get_embedding(filename):
    img = Image.open(file_name)
    img_cropped = mtcnn(img)

    model.classify = True
    img_probs = model(img_cropped.unsqueeze(0))
    emb.append(img_probs)
示例#26
0
              thresholds=[0.6, 0.7, 0.7],
              factor=0.709,
              device=device)

aligned = []
indexes = []
names = []

for x, y in loader:
    x_aligned, prob = mtcnn(x, return_prob=True)
    if x_aligned is not None:
        aligned.append(x_aligned)
        indexes.append(y)
        names.append(dataset.idx_to_class[y])

resnet = InceptionResnetV1(pretrained='vggface2',
                           classify=True).eval().to(device)

aligned = torch.stack(aligned).to(device)
embeddings = resnet(aligned).detach().cpu()

from sklearn import svm

clf = svm.SVC(kernel='linear', probability=True)
clf.fit(embeddings.tolist(), indexes)

if "classifier.pkl" in os.listdir():
    os.remove("classifier.pkl")

if "classifier.pkl" in os.listdir():
    os.remove("classifier.pkl")
示例#27
0
        img = img[..., :3]
    return Image.fromarray(img, 'RGB')


# Faces com distância > threshold serão consideradas diferentes.
def similarity_threshold(mtcnn, model, PATH):
    minimum = sys.maxsize
    for img_base in os.listdir(PATH):
        for img_target in os.listdir(PATH):
            if img_target == img_base:
                continue
            base = mtcnn(normalize_image(Image.open(PATH + '/' + img_base)))
            target = mtcnn(normalize_image(Image.open(PATH + '/' +
                                                      img_target)))
            base_emb = model(base.unsqueeze(0))
            target_emb = model(target.unsqueeze(0))
            dist = euclidean_distance(base_emb, target_emb)
            minimum = min(minimum, dist)
    return minimum


#cur = os.getcwd()  # Get current working directory

mtcnn = MTCNN(image_size=160, margin=32, device='cuda')
model = InceptionResnetV1(
    pretrained='casia-webface').eval()  # Pre-trained on CASIA dataset

threshold = similarity_threshold(mtcnn, model, PATH)

print('Threshold: ' + str(threshold))
示例#28
0
def train_model(db_id):
    start_epoch = 0
    batch_size = 32
    epochs = 5
    workers = 2
    train_transform = transforms.Compose([
             transforms.ToPILImage(),
             transforms.RandomHorizontalFlip(p=0.5),
             np.float32,
             transforms.ToTensor(),
             fixed_image_standardization
    ])
    images, num_classes = get_dataset(db_id)
    dataset = MyCustomDataset(images, train_transform)
    train_loader = DataLoader(
                    dataset,
                    num_workers=workers,
                    batch_size=batch_size
                    )
    model = InceptionResnetV1(
                 classify=True,
                 num_classes=num_classes
            ).to(device)
    checkpoint_path, checkpoint_file, label_dict = get_saved_model(db_id)
    if checkpoint_path is not None and os.path.exists(checkpoint_path):
         checkpoint = torch.load(checkpoint_file)
         model.load_state_dict(checkpoint['net'])
         start_epoch = checkpoint['epoch']
    else:
        checkpoint_path = "./checkpoint"

    optimizer = optim.SGD(model.parameters(), lr=0.1)
    scheduler = MultiStepLR(optimizer, [60, 120, 180])
    loss_fn = torch.nn.CrossEntropyLoss()
    metrics = {
      'fps': training.BatchTimer(),
      'acc': training.accuracy
    }

    writer = SummaryWriter(log_dir=None, comment='', purge_step=None, max_queue=10, flush_secs=600, filename_suffix='face_rec_log_')
    writer.iteration, writer.interval = 1, 10

    checkpoint_save_name = 'face_rec_test'
    ckp_dir = checkpoint_path
    ckp_name = ''
    for epoch in range(epochs):
        training.pass_epoch(
              model, loss_fn, train_loader, optimizer, scheduler,
              batch_metrics=metrics, show_running=False, device=device,
              writer=writer
        )

        if (epoch+1) % 50 == 0:
            print('Saving..')
            state = {
               'net': model.state_dict(),
               'epoch': epoch,
               'is_final' : 0
            }
            ckp_name = checkpoint_save_name+'_'+str(epoch+1)
                       #if not os.path.isdir('checkpoint'):
            os.makedirs(ckp_dir, exist_ok=True)
            torch.save(state, ckp_dir+'/'+ckp_name+'.pth')
        writer.close()

    
    state = {
        'net': model.state_dict(),
        'epoch': epochs,
        'is_final' : 1
    }
    ckp_name = checkpoint_save_name+'_final'
    os.makedirs(ckp_dir, exist_ok=True)
    save_path = ckp_dir+'/'+ckp_name+'.pth'
    torch.save(state, save_path)
    update_model(db_id, save_path)
示例#29
0
args = parser.parse_args()

for p in vars(args).items():
    print('  ', p[0] + ': ', p[1])
print('\n')

root = args.data_dir

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Model on ' + str(device))

# Define the model

if args.model == 'facenet':
    # ================ code for facenet ===========================
    model = InceptionResnetV1(pretrained='vggface2').to(device).eval()
    modelName = 'facenet'
    model_input_size = (160, 160)

elif args.model == 'facenet-webface':
    # ================ code for facenet ===========================
    model = InceptionResnetV1(pretrained='casia-webface').to(device).eval()
    modelName = 'facenet-webface'
    model_input_size = (160, 160)

elif args.model == 'sphereface':

    # ================ code for sphereface ===========================
    import models.net_sphere
    model = getattr(models.net_sphere, 'sphere20a')()
    model.load_state_dict(torch.load('sphereface.pth'))
示例#30
0
            gender = int(s[2])
            self._db.append((filename, age, gender))

    def __getitem__(self, k):
        filename, age, gender = self._db[k]

        I = cv2.imread(filename)
        I = I[:, :, ::-1]
        I = cv2.resize(I, (160, 160))
        I = I[None]
        I = np.transpose(I, (0, 3, 1, 2))
        X = prewhiten(I)
        return X, torch.tensor(age), torch.tensor(gender)

    def __len__(self):
        return len(self._db)


# readDataLabel(
#     filepath='data/Adience/fold_0_data.txt',
#     imgprefix='/home/zhangxk/AIProject/数据集与模型/faces',
#     target='train.txt')

if __name__ == "__main__":
    model = InceptionResnetV1(pretrained='casia-webface').eval()

    trainset = Gender_Dataset(model, 'train.txt')
    batch_size = 64
    trainloader = DataLoader(trainset, batch_size, shuffle=True, num_workers=4)
    for x in trainloader:
        print(x[0].shape)