Exemple #1
0
    def __init__(self, video_src):
        self.cam = video.create_capture(video_src, presets['cube'])
        _ret, self.frame = self.cam.read()

        args, video_src = getopt.getopt(sys.argv[1:], '',
                                        ['cascade=', 'nested-cascade='])
        try:
            video_src = video_src[0]
        except:
            video_src = 0
        args = dict(args)
        cascade_fn = args.get(
            '--cascade', "data/haarcascades/haarcascade_frontalface_alt.xml")
        cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn))

        #       get the face using ’facedetect‘,
        #       if it succeeds we get the the coordinates of the four vertices of the rectangle
        [xp1, yp1, xp2, yp2] = [0, 0, 0, 0]
        gray = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY)
        gray = cv.equalizeHist(gray)
        self.rects = detect(gray, cascade)
        rect_p = np.array([[x, y, w, h] for (x, y, w, h) in self.rects])
        if len(rect_p):
            [xp1, yp1, xp2, yp2] = rect_p[0, :][:]

        cv.namedWindow('camshift')

        self.selection = (xp1, yp1, xp2, yp2)
        self.drag_start = None
        self.show_backproj = False
        self.track_window = (xp1, yp1, xp2 - xp1, yp2 - yp1)
Exemple #2
0
def extract(image, directory):
    def crop(box):
        if box:
            (x1, y1), (x2, y2) = box
            name = OUTPUT_NAME_FORMAT % \
                        ('.'.join(split(image)[-1].split('.')[:-1]),
                         x2 - x1, y2 - y1)
            image_name = join(directory, name)
            result = open(image_name, 'w')
            Image.open(image).crop((x1, y1, x2, y2)).save(result)
            result.close()
            return image_name

    if not exists(directory) or not isdir(directory):
        raise IOError, '"%s" is not a directory' % directory

    if not exists(image) or not isfile(image):
        raise IOError, '"%s" does not exists or is not an image' % image

    return filter(None, map(crop, detect(image)))
Exemple #3
0
def calculat_facedetection_model(part_of_data, image_temp_dir,
                                 cascade_filename):

    cascade = facedetect.load_cascade(cascade_filename)

    content = []
    for line in part_of_data:
        line_fields = line.rsplit(',')
        if (len(line_fields) == 1):
            line_fields = line.rsplit("\t")

        image_url = line_fields[0]
        utils.crawl_image_from_url(image_url, image_temp_dir + '/temp.jpg')

        level = facedetect.detect(image_temp_dir + '/temp.jpg', cascade)
        print(level)

        line_fields.append(str(level))

        content.append(",".join(line_fields))

    return content
def calculat_facedetection_model(part_of_data, image_temp_dir, cascade_filename):
    

    cascade = facedetect.load_cascade(cascade_filename)
 
    content = []
    for line in part_of_data:
        line_fields = line.rsplit(',')
        if (len(line_fields) == 1):
            line_fields = line.rsplit("\t")

        image_url = line_fields[0]
        utils.crawl_image_from_url(image_url, image_temp_dir + '/temp.jpg')

        level = facedetect.detect(image_temp_dir + '/temp.jpg', cascade)
        print(level)

        line_fields.append(str(level))

        content.append(",".join(line_fields))
 

    return content
Exemple #5
0
    def run(self):
        args, video_src = getopt.getopt(sys.argv[1:], '',
                                        ['cascade=', 'nested-cascade='])
        args = dict(args)
        cascade_fn = args.get('--cascade', "haarcascade_frontalface_alt2.xml")
        cascade = cv2.CascadeClassifier(cascade_fn)
        while True:
            ret, frame = self.cam.read()
            vis = frame.copy()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            #frame_gray = cv2.equalizeHist(frame_gray)
            #t = clock()
            rects = detect(frame_gray, cascade)
            coords = draw_rects(vis, rects, (0, 255, 0))

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1]
                                 for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None,
                                                       **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(
                    img1, img0, p1, None, **lk_params)
                d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks,
                                                 p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                self.tracks = new_tracks
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False,
                              (0, 255, 0))
                draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))
                #dt = clock() - t

            if self.frame_idx % self.detect_interval == 0:
                roi = frame_gray[coords[4]:coords[5], coords[0]:coords[1]]
                #print(roi.shape)
                #roi = frame_gray[[coords[0],coords[1]],:][:,[coords[4],coords[5]]]
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)

                p = cv2.goodFeaturesToTrack(frame_gray,
                                            mask=mask,
                                            **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        if x > coords[0]:
                            if x < coords[1]:
                                if y > coords[4]:
                                    if y < coords[5]:
                                        self.tracks.append([(x, y)])
                                elif y > coords[2]:
                                    if y < coords[3]:
                                        self.tracks.append([(x, y)])

            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow('lk_track', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
Exemple #6
0
    cascade = cv2.CascadeClassifier(cascade_fn)
    nested = cv2.CascadeClassifier(nested_fn)
    
    reconizer = cv2.createLBPHFaceRecognizer()
    reconizer.load(ModelName)

    cam = create_capture(video_src, fallback='synth:bg=../cpp/lena.jpg:noise=0.05')
    Name, Labels = tc.Read_List_Label(LabelName)

    while True:
        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        t = clock()
        rects = fd.detect(gray, cascade)
        vis = img.copy()
        fd.draw_rects(vis, rects, (0, 255, 0))
        for x1, y1, x2, y2 in rects:
            roi = gray[y1:y2, x1:x2]
            vis_roi = vis[y1:y2, x1:x2]
            subrects = fd.detect(roi.copy(), nested)
            fd.draw_rects(vis_roi, subrects, (255, 0, 0))
            
            label, distance = reconizer.predict(roi)
            
            draw_str(vis, (x1, y2+15), '%.1f' % (distance))
            if(distance < 300):
                draw_str(vis, (x1, y2), '%s' % (Name[label]))
            else:
                draw_str(vis, (x1, y2), '%s' % ("Known H"))