Ejemplo n.º 1
0
def detect(image_path, trackerid, ts, cameraId, face_filter):
    result = m.detect(image_path)

    #FIXME:
    result = result.replace('[,', '[')
    result = json.loads(result)
    #print('detect result-----',result)
    people_cnt = 0
    cropped = []
    detected = False

    nrof_faces, img_data, imgs_style, blury_arr, face_width, face_height = load_align_image_v2(result, image_path, trackerid, ts, cameraId, face_filter)
    if img_data is not None and len(img_data) > 0:
        people_cnt = len(img_data)
        detected = True
        for align_image_path, prewhitened in img_data.items():
            style=imgs_style[align_image_path]
            blury=blury_arr[align_image_path]
            width=face_width[align_image_path]
            height=face_height[align_image_path]
            cropped.append({"path": align_image_path, "style": style, "blury": blury, "ts": ts,
                "trackerid": trackerid, "totalPeople": people_cnt, "cameraId": cameraId,
                "width":width,"height":height})

    return json.dumps({'detected': detected, "ts": ts, "totalPeople": people_cnt, "cropped": cropped, 'totalmtcnn': nrof_faces})
Ejemplo n.º 2
0
def onPhoto():
    #test1 = imagen en formato base64 de alejandro
    #test2 = imagen en formateo base54 de obama
    img = b64ToImg(open("test_2", "rb").read())
    id = detect(img)
    if id is None:
        print "Nada"
    else:
        print id
Ejemplo n.º 3
0
def demo(in_fn, out_fn):
    print ">>> Loading image..."
    img_color = cv2.imread(in_fn)
    img_gray = cv2.cvtColor(img_color, cv.CV_RGB2GRAY)
    img_gray = cv2.equalizeHist(img_gray)
    print in_fn, img_gray.shape
 
    print ">>> Detecting faces..."
    start = time.time()
    rects = detect(img_gray)
    end = time.time()
    print 'time:', end - start
    img_out = img_color.copy()
    draw_rects(img_out, rects, (0, 255, 0))
    cv2.imwrite(out_fn, img_out)
Ejemplo n.º 4
0
    def __init__(self,
                 img,
                 image_src='/tmp/temp.jpg',
                 segment_dump="/tmp/",
                 a_score=1,
                 prediction_stage=False,
                 min_saliency=0.02,
                 max_iter_slic=100):
        self.timer = time.time()
        self.image_src = image_src
        self.a_score = a_score
        self.image = img
        # self.image = io.imread(image_src)
        print "Image source : ", image_src

        self.__set_timer("segmentation...")
        segment_object = seg.SuperPixelSegmentation(self.image,
                                                    max_iter=max_iter_slic)
        self.segment_map = segment_object.getSegmentationMap()
        self.slic_map = segment_object.getSlicSegmentationMap()
        self.__print_timer("segmentation")

        self.__set_timer("saliency...")
        saliency_object = saliency.Saliency(self.image, 3)
        self.saliency_map = saliency_object.getSaliencyMap()
        self.__print_timer("saliency")

        # perform face detection
        self.__set_timer("face detection...")
        self.faces = face_detection.detect(np.array(self.image))
        self.__print_timer("face detection")

        self.__set_timer("saliency detection of objects...")
        self.saliency_list, self.salient_objects, self.pixel_count, self.segment_map2 = cutils.detect_saliency_of_segments(
            self.segment_map.astype(np.intp), self.saliency_map, min_saliency)
        self.__print_timer("saliency detection of objects")
Ejemplo n.º 5
0
 def clicked2(self):
     self.label_3.setText("Status: Id/Roll No-" + self.lineEdit.text() +
                          ",  Name- " + self.lineEdit_2.text())
     self.update()
     face_detection.detect(self.lineEdit.text(), self.lineEdit_2.text())
Ejemplo n.º 6
0
    'cpus': 2,
    'result': 0
}, {
    'res': 1080,
    'file': './1_1920x1080.jpg',
    'minsize': 200,
    'cpus': 2,
    'result': 0
}]

m.init('./model/')
print('warming up')
m.set_minsize(40)
m.set_num_threads(1)
m.set_threshold(0.6, 0.7, 0.8)
result = m.detect('./1_854x480.jpg')
print(result)
m.detect('./1_854x480.jpg')
m.detect('./1_854x480.jpg')
m.detect('./1_854x480.jpg')
m.detect('./1_854x480.jpg')
print('starting up')

rounds = 20

for item in benchmark:
    print(item)
    m.set_minsize(item['minsize'])
    m.set_num_threads(item['cpus'])
    start = time.time()
    for i in range(rounds):
Ejemplo n.º 7
0
import face_detection
import face_recognition
import numpy as np
import pickle
import os

# image = 'images/thang.jpg'
# faces = face_detection.detect(image)
# face_recognition.learn(faces, 'thang')

folder = 'test'
for image in os.listdir(folder):
    path = '/'.join([folder, image])
    faces = face_detection.detect(path)
    faces_with_name = face_recognition.recognition(faces, path)
Ejemplo n.º 8
0
import face_detection as m
import time

m.init('./models/ncnn/')
print('warming up')

m.set_minsize(40)
m.set_threshold(0.6, 0.7, 0.8)
m.set_num_threads(1)

m.detect('./images_480p/1_854x480.jpg')
m.detect('./images_480p/1_854x480.jpg')
m.detect('./images_480p/1_854x480.jpg')
m.detect('./images_480p/1_854x480.jpg')
m.detect('./images_480p/1_854x480.jpg')

start = time.time()
for i in range(100):
    step_start = time.time()
    result = m.detect('./images_480p/1_854x480.jpg')
    step_end = time.time()
    print('step {} duration is {}'.format(i, step_end - step_start))
end = time.time()
print(result)

print('average duration is {}'.format((end - start) / 100))
Ejemplo n.º 9
0
model_name = 'blind_with_regularization.model'
COM = 'COM9'
camera = 1
baudrate = 9600
width = 64
height = 64
prob = 0
label = ''

print("loading model .....")
model = load_model(model_name)
print("model loaded")
ard = Arduino(baudrate, COM)  ##movleft(),movright()
vce = Voice()  #left(),right()
st = Stop()
fac = detect()
current = datetime.datetime.now()
flag = None
cap = cv2.VideoCapture(camera)
ret = True
prev = None
while ret:
    ret, frame = cap.read()
    frame = cv2.resize(frame, (640, 480))

    faces = fac.faceDetect(frame)

    ##stop on left

    ##  you have a stop on '''
    current = datetime.datetime.now()
Ejemplo n.º 10
0
import face_detection as m
import time

m.init('./model/')
m.set_minsize(100)
m.set_threshold(0.6, 0.7, 0.8)
m.set_num_threads(2)
print('warming up')
result = m.detect('./1_1920x1080.jpg')
print(result)
m.detect('./1_1920x1080.jpg')
m.detect('./1_1920x1080.jpg')
m.detect('./1_1920x1080.jpg')
m.detect('./1_1920x1080.jpg')

start = time.time()
for i in range(10):
    step_start = time.time()
    result = m.detect('./1_1920x1080.jpg')
    step_end = time.time()
    print('step {} duration is {}'.format(i, step_end - step_start))
end = time.time()
print(result)

print('1080p average duration is {}'.format((end - start) / 10))