Ejemplo n.º 1
0
def detect_shots(filename):
    """ Detect shots and calculates shot attributes, stores in the hdf file.
    """

    hdf_filename = '%s.hdf' % os.path.basename(filename)
    f = tables.openFile(hdf_filename, 'r+')
    parser = VideoParser(filename)

    shots_table = create_table_shots(f)

    print 'Start shot detection'
    shots = extract_shots(f.root.frames[:])

    num_shots = len(shots)

    # stores shot data (start, end, length) in the database
    for i,s in enumerate(shots):
        r = shots_table.row

        r['start'] = s.start
        r['end'] = s.end
        r['length'] = s.length

        r['num_of_faces'] = len(detect(parser._get_frame(s.median)))
        r['dynamics'] = int(sum(f.root.frames.cols.abs_diff[s.start:s.end])/float(s.length))

        r.append()

        print "%d of %d" % (i,num_shots)

    shots_table.flush()

    create_table_clusterings(f)
Ejemplo n.º 2
0
    def process_images(self, clip, start, end):
        has_face = False
        colours = set()
        images = []
        sample = takespread(range(start, end + 1), self.samplesize)
        for i, frame in enumerate(sample):
            npa = get_numpy(clip, frame)
            images.append(npa)

            # Should we skip facial recognition?
            if self.noface or i % self.faceprec:
                continue

            # Facial recognition
            if not has_face:
                # Copy the image for facial analysis
                new = numpy.empty_like(npa)
                new[:] = npa
                if face.detect(new):
                    has_face = True
        # Generate and save the filmstrip
        stacked = numpy.concatenate(images, axis=0)
        img_path = self.get_scene_img_path(start, end)
        img = Image.fromarray(stacked)
        img.save(img_path)
        if not self.nocol:
            # Quantize the image, find the most common colours
            for c in most_frequent_colours(img, top=self.num_colours):
                colour = get_colour_name(c[:3])
                colours.add(colour)
        return (start, has_face, colours)
Ejemplo n.º 3
0
def predict(test_img):
    img = test_img.copy()
    face, rect = detect(img)
    label = face_recog.predict(face)
    label_text = people[label[0]]
    draw_rectangle(img, rect)
    text(img, label_text, rect[0], rect[1] - 5)
    return img
Ejemplo n.º 4
0
def main(argv):
    # Frames per second
    fps = 20
    tux_pos = 5
    tux_pos_min = 0.0
    tux_pos_max = 9.0

    try:
        opts, args = getopt.getopt(argv, "fps", ["framerate=",])
    except getopt.GetoptError:
            sys.exit(2)

    for opt, arg in opts:
            if opt in ("-fps", "--framerate"):
                fps = arg

    camera = highgui.cvCreateCameraCapture(0)

    while True:
        highgui.cvNamedWindow('Camera', 1)
        im = highgui.cvQueryFrame(camera)
        if im is None:
            break
        # mirror
        opencv.cv.cvFlip(im, None, 1)

#        positions = face.detect(im, 'haarcascade_data/haarcascade_profileface.xml')
        positions = face.detect(im, 'haarcascade_data/haarcascade_frontalface_alt2.xml')
#        if not positions:
#            positions = face.detect(im, 'haarcascade_data/haarcascade_frontalface_alt2.xml')

        # display webcam image
        highgui.cvShowImage('Camera', im)

        # Division of the screen to count as "walking" motion to trigger tux
        image_size = opencv.cvGetSize(im)
        motion_block = image_size.width / 9

        if positions:
            mp = None
            for position in positions:
                if not mp or mp['width'] > position['width']:
                    mp = position
            pos = (mp['x'] + (mp['width'] / 2)) / motion_block
            print "tux pos: %f" % tux_pos
            print "pos: %f" % pos

            if pos != tux_pos:
                if tux_pos > pos:
                    move_tux_right(tux_pos - pos)
                elif tux_pos < pos:
                    move_tux_left(pos - tux_pos)
                tux_pos = pos

        if highgui.cvWaitKey(fps) >= 0:
            highgui.cvDestroyWindow('Camera')
            sys.exit(0)
Ejemplo n.º 5
0
    def detect_face_srv(self, image, identify=False):
        faces = []
        json_faces = None
        try:
            data = CF.detect(
                StringIO(self._convert_jpg(image)),
                landmarks=False,
                attributes=
                'age,gender,headPose,smile,glasses,hair,facialhair,accessories'
            )
            identities = {}
            if identify:
                print('trying to identify persons')
                ids = [f['faceId'] for f in data]

                try:
                    identified = CF.identify(ids[0:10], self._person_group_id)
                    for i in identified:
                        if len(i['candidates']) > 0:
                            pid = i['candidates'][0]['personId']
                            person = PERSON.get(self._person_group_id, pid)
                            print 'person=', person
                            identities[i['faceId']] = person['name']
                    print('identified %d persons in this image: %s' %
                          (len(identities), str(identities)))
                except CognitiveFaceException as e:
                    print('identification did not work: %s' % str(e))
            for f in data:

                faceId = f['faceId']
                if faceId in identities:
                    person = identities[faceId]
                else:
                    person = ''

                f['name'] = person
                ret_face = self._parse_to_naoqi_json(f)
                faces.append(ret_face)

            json_faces = {'faces': faces}

        except Exception as e:
            print('failed to detect via the MS Face API: %s' % str(e))
        return json_faces
Ejemplo n.º 6
0
    def add_face_srv(self, image, nameperson):
        img_msg = self._convert_jpg(image)

        target_face = None
        data = CF.detect(
            StringIO(img_msg),
            landmarks=False,
            attributes=
            'age,gender,headPose,smile,glasses,hair,facialhair,accessories')

        # biggest face
        max_area = 0.0
        biggest_face = None
        for f in data:
            area = float(f['faceRectangle']['width'] *
                         f['faceRectangle']['height'])
            if area > max_area:
                max_area = area
                biggest_face = f
        biggest_face['name'] = nameperson

        target_face = biggest_face

        person = self._init_person(biggest_face['name'], delete_first=True)

        pfid = PERSON.add_face(StringIO(img_msg),
                               self._person_group_id,
                               person['personId'],
                               target_face="%d,%d,%d,%d" %
                               (target_face['faceRectangle']['left'],
                                target_face['faceRectangle']['top'],
                                target_face['faceRectangle']['width'],
                                target_face['faceRectangle']['height']))

        # print "PFID: " +str(pfid)
        target_face['faceId'] = pfid['persistedFaceId']
        # print PERSON.get(self._person_group_id, person['personId'])
        print('restarting training with new '
              'face for group "%s".' % self._person_group_id)
        PG.train(self._person_group_id)

        ret_face = self._parse_to_naoqi_json(target_face)

        return ret_face
Ejemplo n.º 7
0
def get_frame():
    unknown_str = face.imgToStr("faces/Unknown.jpg")

    known_face_encodings = []
    known_face_names = []
    known_face_images = []

    conn = connect.init()
    cursor = conn.cursor()
    sql = "SELECT * FROM person"
    cursor.execute(sql)
    rows = cursor.fetchall()

    for row in rows:
        known_face_names.append(row[1])
        known_face_images.append(row[2])
        known_face_encodings.append(face.encoding0_15ToNp_encoding(row[3:19]))

    if request.method == 'POST':
        print("get photo")
        """from json"""
        # json = request.json
        # if json:
        #     image_data = base64.b64decode(json["image_str"])
        #     file = open('result.png', 'wb')
        #     file.write(image_data)
        #     file.close()
        #     print(json["test_str"])
        #     return 'success'
        # else:
        #     return 'fail'
        """from form"""
        image_str = request.form.get("image_str")
        if image_str:
            image_data = base64.b64decode(image_str)
            file = open('result.png', 'wb')
            file.write(image_data)
            file.close()
            return face.detect("result.png", known_face_encodings,
                               known_face_names, known_face_images,
                               unknown_str)
        else:
            return 'fail'
Ejemplo n.º 8
0
def detect_feed():
    global video
    return Response(detect(),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
Ejemplo n.º 9
0
detect_rate = 1

def draw_faces(vis, faces):
    for x, y, w, h in faces:
        cv.Rectangle(vis, (x, y), (x+w, y+h), (0, 0, 255), 2)

faces = []
shot_idx = 0

while True:
    frame = cv.QueryFrame(cap)
    vis = cv.CloneImage(frame)
    
    if clock() - last_time > detect_rate:
        last_time = clock()
        faces = face.detect(frame)
        draw_faces(vis, faces)
        if len(faces) > 0:
            cv.SaveImage('out/face_%03d.bmp' % shot_idx, vis)
            shot_idx += 1
            x, y, h, w = faces[0]
            async_cmd(cam_name, 'center', x + w/2, y + h/2)
    else:
        draw_faces(vis, faces)


    cv.ShowImage(cam_name, vis)
    ch = cv.WaitKey(10)
    if ch == 27:
        break
    if ch == ord('a'):
Ejemplo n.º 10
0
        else:
            os.remove(c_path)
    for file in glob('imgs/*.*'):
        os.remove(file)
    for file in glob('faces/*.*'):
        os.remove(file)


if __name__ == '__main__':
    #http://konachan.net/post?page=1&tags=
    sp = konachan_spyder('li_syaoran')

    print('准备中...')
    clean('result')
    print('(1/3) 爬取k站图片中,需要较长时间,请稍后...')
    sp.konachan_spyder()
    print('(2/3) 人脸识别中,请稍后...')
    if os.path.exists('faces') is False:
        os.makedirs('faces')
    file_list = glob('imgs/*.jpg')
    for filename in file_list:
        face.detect(filename)
    faces = glob('faces/*.jpg')
    for face_img in faces:
        face.img_circle(face_img)
    print('(3/3) 生成动态图标中,请稍后...')
    xml = makeXML()
    xml.deal(face_sf=0.7, base_sf=0.7, mask_sf=1)
    #print('清理工程目录....')
    #clean('result')
    print('全部完成,结果文件在result目录')
Ejemplo n.º 11
0
def write_image(file_path, image, sub_dir="/face", suffix=""):
    dir_file = os.path.split(file_path)
    dir = dir_file[0]
    file_name = dir_file[1]
    report_dir = dir + sub_dir

    root, ext = os.path.splitext(report_dir + "/" + file_name)
    export_file_path = root + suffix + ext

    os.makedirs(report_dir, exist_ok=True)
    cv2.imwrite(export_file_path, image)

for image_path in paths.list_images(args["images"]):
    print(image_path)
    img = cv2.imread(image_path)
    faces, hoge = face.detect(img)

    for index, f in enumerate(faces):
        frame = face.gray_in_frame(img)
        rotated = face.rotate(frame, img, f['deg'])

        y = int(f['y'])
        h = int(f['r_h'])
        x = int(f['x'])
        w = int(f['w'])
        y_offset = int(h * 0.1)

        croped = rotated[y + y_offset: y + h, x: x + w]
        write_image(image_path, croped, "/faces", "_" + str(index))
Ejemplo n.º 12
0
    '10': 'Receive a book'

}
state = '1'
while True:
    if state == '1':
        print(statemachine[state])
        print(statemachine['2'])
        print(statemachine['4'])
        print(statemachine['10'])
        state='6'

    if state == '3':
        print(statemachine[state])
        for i in range(10):
            faces = fac.detect(videoCapture, faceCascade, statemachine[state])
        state='6'
    if state == '4':
        print(statemachine['4'])
        break
    if state == '6':
        print(statemachine[state])
        flag = False
        for i in range(10):
            faces = fac.detect(videoCapture,faceCascade,statemachine[state])
            if len(faces) > 0:
                flag = True
        if flag:
            state = '7'
        else:
            state='3'