Example #1
0
 def do_POST(self):
     length = int(self.headers['content-length'])
     print length
     if length > 10000000:
         print "file to big"
         read = 0
         while read < length:
             read += len(self.rfile.read(min(66556, length - read)))
         self.respond("file to big")
         return
     else:
         form = cgi.FieldStorage(
             fp=self.rfile,
             headers=self.headers,
             environ={'REQUEST_METHOD':'POST',
                      'CONTENT_TYPE':self.headers['Content-Type'],
                      })
         data = form['file'].file.read()
         open("test.jpg", "wb").write(data)
         #call facedetect
         facedetect.facedetect()
         #show result
         img = open("result.png", "rb")
         self.imgrespond(img.read())
         img.close()
Example #2
0
    def __init__(self,modelfile):
        self.fd_detector=facedetect.facedetect().get_instance()
        cudnn.enabled = True
        self.gpu = "cuda:0"
        self.model = importlib.import_module("resnet").inference()

        saved_state_dict = torch.load(modelfile)
        self.model.load_state_dict(saved_state_dict)
        self.transformations = transforms.Compose([transforms.Scale(224),
        transforms.CenterCrop(224), transforms.ToTensor(),
        transforms.Normalize(mean=[0.4, 0.4, 0.4], std=[0.2, 0.2, 0.2])])
        self.model.cuda(self.gpu)
        self.model.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
Example #3
0
def facedetectapi():
    if request.method == 'POST':
        f = request.files['image']
        basepath = os.path.dirname(__file__)
        upload_path = os.path.join(basepath, 'postimg.jpg')
        f.save(upload_path)

        img = cv2.imread(upload_path)  # 读取从终端上传的图像
        result_dict = facedetect(img)  # 对读取到的图像进行人脸检测
        # print(result_Dict)
        # result_trans = [ str(x) for x in result_list ]
        result_json = json.dumps(result_dict)
        return result_json + '\n'
    return 'Error Format'
def demo(args):
    face_fd=facedetect().get_instance("tf-ssh")

    cam=cv2.VideoCapture(0)
    cam.set(3,1280) 
    cam.set(4,720)


    while(1):
        _,img=cam.read()
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        rects, imgs=face_fd.findfaces(img)
        if  len(rects)==0:
            print ("find no face")
        else:
           for  index , (rect, faceimg) in enumerate( zip(rects,imgs) ) :
               cv2.imshow("1",faceimg)
               cv2.waitKey(1)
    return args


def img_preprocess(img):
    processimg = cv2.resize(img, (96, 96))
    processimg = processimg.astype(np.float32)
    processimg = np.transpose(processimg, (2, 0, 1))
    processimg = np.expand_dims(processimg, 0)
    processimg = processimg / 255.0
    processimg = (processimg - 0.5) / 0.5
    return processimg


if __name__ == '__main__':
    args = parse_args()
    fd_detector = facedetect.facedetect("tf-ssh")
    cudnn.enabled = True

    gpu = "cuda:0"
    snapshot_path = args.snapshot

    model = resnet.inference(10).cuda()

    saved_state_dict = torch.load(snapshot_path)
    model.load_state_dict(saved_state_dict)

    model.eval()

    camera = cv2.VideoCapture(0)

    while True:
Example #6
0
import os
import sys
sys.path.append("/home/hanson/pytools/lib")
import faceutils as fu
import importlib
import cv2
from facedetect import facedetect

module = importlib.import_module("WFLW106" + "generateBBox")
face_fd = facedetect().get_instance("tf-ssh")

for imgpath, label_rect, label_landmark5p, label_landmark106p in module.generateBBox(
):
    image = cv2.imread(imgpath)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    facerects, _ = face_fd.findfaces(image)

    target_rect_tmp = max(facerects, key=lambda x: fu.IOU(label_rect, x))

    if fu.IOU(target_rect_tmp, label_rect) > 0.65:
        target_rect = target_rect_tmp
    else:
        target_rect = label_rect

    roi_image = target_rect.get_roi(image)
    scale_landmark = target_rect.projectlandmark(label_landmark5p, scale=0)

    #fu.showimg(roi_image, faceboxs = [target_rect], landmarks = [label_landmark])
    #fu.showimg(roi_image, landmarks = [scale_landmark] ,landmarkscale=0)

    for i in scale_landmark:
import numpy as np
import cv2
# from facebox import Face2point
import tensorflow as tf
import tensorflow.contrib.slim as slim
import utils


def softmax(x):
    x_exp = np.exp(x)
    x_sum = np.sum(x_exp, axis=0, keepdims=True)
    s = x_exp / x_sum
    return s


face_fd = facedetect("tf-ssh")

ckpt = tf.train.latest_checkpoint(
    "/home/hanson/work/FaceHeadpose_TF/saved_models/20190801-145743/models")
meta = ckpt + ".meta"
print ckpt

saver = tf.train.import_meta_graph(meta)
input_tensor = tf.get_default_graph().get_tensor_by_name("input:0")

yaw_output_tensor = tf.get_default_graph().get_tensor_by_name(
    "vgg_16/yaw_fc8/BiasAdd:0")
pitch_output_tensor = tf.get_default_graph().get_tensor_by_name(
    "vgg_16/pitch_fc8/BiasAdd:0")
roll_output_tensor = tf.get_default_graph().get_tensor_by_name(
    "vgg_16/roll_fc8/BiasAdd:0")
    return args


def img_preprocess(img):
    processimg = cv2.resize(img, (112, 112))
    processimg = processimg.astype(np.float32)
    processimg = np.transpose(processimg, (2, 0, 1))
    processimg = np.expand_dims(processimg, 0)
    processimg = processimg / 255.0
    processimg = (processimg - 0.4) / 0.2
    return processimg


if __name__ == '__main__':
    args = parse_args()
    fd_detector = facedetect.facedetect().get_instance()
    cudnn.enabled = True

    gpu = "cuda:0"
    snapshot_path = args.snapshot

    model = importlib.import_module("resnet").inference()

    saved_state_dict = torch.load(snapshot_path)
    model.load_state_dict(saved_state_dict)

    transformations = transforms.Compose([
        transforms.Resize(112),
        transforms.CenterCrop(112),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.4, 0.4, 0.4], std=[0.2, 0.2, 0.2])
Example #9
0
def generatePic(target_name):

    target_image_dir = target_name + "/images"
    target_106p_label_file = target_name + "/landmark106p_label.txt"
    target_5p_label_file = target_name + "/landmark5p_label.txt"
    target_6p_label_file = target_name + "/landmark6p_label.txt"

    if not os.path.exists(target_image_dir):
        os.makedirs(target_image_dir)

    module = importlib.import_module(target_name + "generateBBox")
    face_fd = facedetect("tf-ssh")
    label5p_f = open(target_5p_label_file, "w")
    label6p_f = open(target_6p_label_file, "w")
    label106p_f = open(target_106p_label_file, "w")

    img_cnt = 0
    for imgpath, label_rect, label_landmark5p, label_landmark6p, label_landmark106p in module.generateBBox(
    ):

        if label_rect.width < 40 or label_rect.height < 40:
            continue

        print(imgpath)
        image = cv2.imread(imgpath)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        facerects, _ = face_fd.findfaces(image)

        target_rect = label_rect
        if len(facerects) > 0:
            target_rect_tmp = max(facerects,
                                  key=lambda x: fu.IOU(label_rect, x))
            if fu.IOU(target_rect_tmp, label_rect) > 0.65:
                target_rect = target_rect_tmp

        roi_image = target_rect.get_roi(image)
        roi_image = cv2.cvtColor(roi_image, cv2.COLOR_RGB2BGR)
        scale_landmark5p = target_rect.projectlandmark(label_landmark5p,
                                                       scale=0)
        scale_landmark6p = target_rect.projectlandmark(label_landmark6p,
                                                       scale=0)
        scale_landmark106p = target_rect.projectlandmark(label_landmark106p,
                                                         scale=0)

        cv2.imwrite("%s/%d.jpg" % (target_image_dir, img_cnt), roi_image)

        label5p_f.write("%d.jpg" % img_cnt)
        for point in scale_landmark5p:
            label5p_f.write(" %d %d" % (point[0], point[1]))
        label5p_f.write("\n")

        label6p_f.write("%d.jpg" % img_cnt)
        for point in scale_landmark6p:
            label6p_f.write(" %d %d" % (point[0], point[1]))
        label6p_f.write("\n")

        label106p_f.write("%d.jpg" % img_cnt)
        for point in scale_landmark106p:
            label106p_f.write(" %d %d" % (point[0], point[1]))
        label106p_f.write("\n")

        img_cnt += 1
    label5p_f.close()
    label6p_f.close()
    label106p_f.close()