コード例 #1
0
    def get_frame(self):
        detector = mtcnn.MTCNN()

        #extracting frames
        ret, frame = self.video.read()

        #       if ret == True :

        faces = [i['box'] for i in detector.detect_faces(frame)]

        for (a, b, c, d) in faces:
            cv2.rectangle(frame, (a, b), (a + c, b + d), (0, 255, 0), 1)
            break
        cv2.putText(frame, str("faces : " + str(len(faces))), (2, 100),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)

        # encode OpenCV raw frame to jpg and displaying it
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()
コード例 #2
0
ファイル: test1.py プロジェクト: ParthPratim/fre_layer
def generate_augmeted_images():
    o_mtcnn = MTCNN.MTCNN()
    tr_labels, tr_imgs = [], []
    celebs = 0
    print("LOADING....TRAINING SET")
    for celeb in os.listdir(train_dir):
        celeb_train = os.path.join(train_dir, celeb)
        celebs = celebs + 1
        usr_map[celeb] = celebs
        for train_img in os.listdir(celeb_train):
            img_dir = os.path.join(celeb_train, train_img)
            #d = o_mtcnn.detect_faces(cv2.imread(img_dir))
            img = cv2.imread(img_dir)
            d = detect_faces(img)
            bbox = d[0]['box']
            keypoints = d[0]['keypoints']
            dY = keypoints['right_eye'][1] - keypoints['left_eye'][1]
            dX = keypoints['right_eye'][0] - keypoints['left_eye'][0]
            angle = np.degrees(np.arctan2(dY, dX)) - 180
            img = rotate_by_angle(img, angle)
            width = bbox[2]
            height = bbox[3]
            f_img = resize(
                crop(img, [width, height], cords=(bbox[0], bbox[1])),
                (150, 150))
            cv2.imwrite(img_dir, f_img)
            tr_imgs.append(f_img)
            tr_labels.append(celebs)
            augmentation = ImageAugmentation(f_img)
            augmented_imgs = augmentation.process()
            for imgs in augmented_imgs:
                for img in imgs:
                    tr_imgs.append(img[0])
                    tr_labels.append(celebs)
                    #cv2.imwrite(img_dir[:-4]+"_"+img[1]+".jpg",img[0])
    print("LOADED....TRAINING SET")
    tr_labels = labelencoder_y_1.fit_transform(tr_labels)
    return (tr_imgs, tr_labels, celebs)
コード例 #3
0
ファイル: test.py プロジェクト: Chilydream/FaceExtraction
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

import numpy as np
import imghdr
import cv2
import PIL
from Config.TrainConfig import TRAIN_PARAMETER
from Config.DatasetConfig import DATASET_PARAMETER
from mtcnn import mtcnn
from torch_mtcnn import detect_faces

image = cv2.imread("1.png")
detector = mtcnn.MTCNN()
info_list = detector.detect_faces(image)
bbox = info_list[0]['box']
face = image[bbox[1]:bbox[1]+bbox[3], bbox[0]:bbox[0]+bbox[2]]
cv2.imwrite("face.png", face)



# people_list = os.listdir(TRAIN_PARAMETER['data_dir'])
# print(people_list)
# video_list = []
# video_id_list = []
# for ipeople, people in enumerate(people_list):
# 	video_dir = os.path.join(TRAIN_PARAMETER['data_dir'], people, 'videos')
# 	if not os.path.exists(video_dir):
# 		continue
# 	video_filelist = os.listdir(video_dir)
コード例 #4
0
import torch.optim as optim
import pandas as pd
import os
import time
import logging
from mtcnn import mtcnn

# 바꾼 부분 1 : Multi GPU + VGG -> cuda:0  global_model -> cuda:1
# 바꾼 부분 2 : ResNet, global model 의 마지막 layer 에 sigmoid 추가
# 바꾼 부분 3 : answer 이 REAL 일 때 [1.0], FAKE 일 때 [0.0] 값 넣어줌.
# 바꾼 부분 4 : optimizer Adam -> SGD
# 바꾼 부분 5 : global model => CONV1D, CONV2D
# 바꾼 부분 6 : loss MSE -> BCE
# 바꾼 부분 7 : print -> log 파일로 저장

detector = mtcnn.MTCNN()  # Face detection model

# Argument Paster
parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument('--batchSize',
                    type=int,
                    default=4,
                    help='training batch size')
parser.add_argument('--nEpochs',
                    type=int,
                    default=2,
                    help='number of epochs to train for')
parser.add_argument('--lr',
                    type=float,
                    default=0.00001,
                    help='Learning Rate. Default=0.01')
コード例 #5
0
        return x


my_lin_net = Linear(2048)
my_net = models.resnet50()
my_net.fc = my_lin_net

my_net.load_state_dict(
    torch.load(Path('./net/resnet50.pth'), map_location=torch.device("cpu")))
my_net.eval()

transform = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

cap = cv2.VideoCapture(0)

mt_nn = mtcnn.MTCNN(Path("mtcnn"))

count = 0

while True:
    ret, img = cap.read()
    cv2.imshow("camera", img)

    count += 1
    count %= 5
    if count == 0:
        continue

    my_img = Image.fromarray(img)
    bounds = mt_nn.detect_faces(my_img)
    if len(bounds[0]) == 0:
コード例 #6
0
 def __init__(self):
     self.face_encoder, _ = get_network('FaceEncoder', 'test')
     self.detector = mtcnn.MTCNN()
     self.image_cnt = 0