Esempio n. 1
0
def main():
    #video load
    VideoPath = "../videos/3.mp4"
    imagelist = Video2list(VideoPath)

    #face detect
    mtcnn_path = os.path.join(os.path.dirname(__file__), 'mtcnn-model')
    detector = MtcnnDetector(model_folder=mtcnn_path,
                             ctx=mx.cpu(0),
                             num_worker=1,
                             minsize=80,
                             accurate_landmark=True,
                             threshold=[0.6, 0.7, 0.9])
    Videoimginfo = []
    for img in imagelist:
        ret = detector.detect_face(img)
        Videoimginfo.append(ret)

    #face feature get
    model = MobileFaceNet(512)
    model_static_cnn = torch.load("model_mobilefacenet.pth",
                                  map_location=lambda storage, loc: storage)
    net_model_static_cnn = {}
    for k, v in model_static_cnn.items():
        if k == "fc2.weight":
            continue
        if k == "fc2.bias":
            continue
        net_model_static_cnn[k] = v
    model.load_state_dict(net_model_static_cnn)
    model.eval()
    imageinfo = []
    allFaceFeture = []
    for item in range(len(imagelist)):
        if Videoimginfo[item] is not None:
            image = imagelist[item]
            ret = Videoimginfo[item]
            facefeature = Facefeature(ret, image, model)
            imageinfo.append(len(facefeature[0]))
            allFaceFeture += facefeature[0]
            Videoimginfo[item] = [facefeature[1], facefeature[2]]
        else:
            imageinfo.append(0)

    Facecalsslist, classnum = dbscan(np.array(allFaceFeture), distance, minPt)
    print(Facecalsslist, classnum)

    #pic2video
    fourcc = cv2.VideoWriter_fourcc(*"MJPG")
    videoWrite = cv2.VideoWriter(
        'output.avi', fourcc, 25,
        (imagelist[0].shape[1], imagelist[0].shape[0]))
    font = cv2.FONT_HERSHEY_SIMPLEX
    cc = 0
    flag = 0
    for item in range(len(imageinfo)):
        img = imagelist[item]
        if imageinfo[item] == 0:
            videoWrite.write(img)
            cv2.imwrite("./ll/%d.jpg" % cc, img)
        else:
            #in this one pic may be has more than one pic
            # rectangle point lable ;
            bbox, point = Videoimginfo[item]
            for q in range(len(point)):
                for i in range(5):
                    cv2.circle(img, (int(point[q][i]), (int(point[q][i + 5]))),
                               3, (0, 255, 0), -1)
                cv2.rectangle(img, (int(bbox[q][0]), int(bbox[q][1])),
                              (int(bbox[q][2]), int(bbox[q][3])),
                              (0, 255, 255), 2)
                cv2.putText(img, "%d" % Facecalsslist[flag],
                            (int(bbox[q][0]), int(bbox[q][1])), font, 1.2,
                            (255, 255, 255), 2)
                flag += 1
            cv2.imwrite("./ll/%d.jpg" % cc, img)
            videoWrite.write(img)
        cc += 1
    videoWrite.release()
Esempio n. 2
0
    pairs = lfw.read_pairs(os.path.expanduser(lfw_pairs))

    # Get the paths for the corresponding images
    paths, actual_issame = lfw.get_paths(os.path.expanduser(lfw_dir), pairs)

    # print(paths)
    print('paths len:', len(paths))
    print('pairs len: ', len(actual_issame))

    embeddings = np.zeros([len(paths), 512])
    # arcface = ArcFace(classnum=10).to('cuda')
    # backbone = arcface.backbone
    # backbone.load_state_dict(torch.load('resnet50.pth'))

    mobileFacenet = MobileFaceNet(512).to('cuda')
    mobileFacenet.load_state_dict(torch.load('mobilefacenet.pth'))
    mobileFacenet.eval()

    with torch.no_grad():
        for idx, path in enumerate(paths):
            print('process image no:', idx)
            img = Image.open(path)
            embedding = get_embedding(mobileFacenet,
                                      img,
                                      tta=False,
                                      device='cuda')
            embeddings[idx] = embedding.cpu().numpy()

    # np.save('temp2.npy', embeddings)
    # embeddings = np.load('temp.npy')
Esempio n. 3
0

from model import MobileFaceNet
import torch
from mtcnn import MTCNN


model = MobileFaceNet(512)

model.load_state_dict(torch.load('/home/ai/Desktop/project/pytorch-insightface/model_mobilefacenet.pth'))

print(model.state_dict())