コード例 #1
0
def get_test_face():
    # video = "http://*****:*****@192.168.43.1:8081/"
    video = 0
    capture = cv2.VideoCapture(video)
    begin = False
    input = mkpath('tmp/input/')
    count = 0
    while True:
        ret, frame = capture.read()

        cv2.imshow('video', frame)

        k = cv2.waitKey(1)
        if k == ord('q') or count > 100:
            break
        if k == ord('s'):
            begin = True
        if begin:
            str_time = str(time()).replace('.', '')[4:13]
            path = input + '/' + str_time + '.jpg'
            cv2.imwrite(path, frame)
            print(count, path)
            count += 1
    capture.release()
    cv2.destroyAllWindows()
コード例 #2
0
def merge(source, target):
    print("准备合成视频")
    last_source = find_last_path(source)[0]
    target_path = mkpath(target)
    images_to_video(last_source, target_path)
コード例 #3
0
ファイル: detect.py プロジェクト: hyqlmy/FaceDetection
plt.rcParams['axes.unicode_minus'] = False
fontpath = r"C:/Windows/Fonts/simfang.ttf"  # 宋体字体文件
# 压缩图片, 基本不用了, 已废除
# compress()
# 加载训练模型
known_face_encodings, known_face_names = load_face()
# # Create arrays of known face encodings and their names

# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
does, select = selection()
if select == '3':
    ouput = mkpath('tmp/output/')
begin = time()
video_capture = cv2.VideoCapture(does)
msc_start = time()
while True:
    # Grab a single frame of video
    ret, frame = video_capture.read()

    # Resize frame of video to 1/4 size for faster face recognition processing
    try:
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
    except Exception as result:
        print("The error is ", result)
        break
    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_small_frame = small_frame[:, :, ::-1]