from imutils.video_capture import playVideo from imutils.object_detection import non_max_suppression hog = cv2.HOGDescriptor() detector = cv2.HOGDescriptor_getDefaultPeopleDetector() hog.setSVMDetector(detector) def detect(image): rects, weights = hog.detectMultiScale(image, winStride=(4, 4), padding=(8, 8), scale=1.05) # 非极大值抑制 rects = non_max_suppression(rects, overlapThresh=.65) output = image.copy() for x, y, w, h in rects: cv2.rectangle(output, (x, y), (x + w - 1, y + h - 1), (0, 0, 255), 2) return output def captureFunc(frame, frameIndex): result = detect(frame) cv2.imshow("Frame", result) return result playVideo("../1.入门/OpenCV高级项目/vtest.avi", captureFunc=captureFunc)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ret, saliencyMap = saliency.computeSaliency(frame) saliencyMap = (saliencyMap * 255).round().astype(np.uint8) cv2.imshow("Output", saliencyMap) def endFunc(): cv2.destroyAllWindows() # 静态显着性检测(此类显着性检测算法依赖于图像特征和统计信息来定位图像中显著性区域) # 静态频谱显着性 playVideo("vtest.avi", fps=10.0, winname="Input", startFunc=StaticSaliencySpectralResidual_startFunc, captureFunc=StaticSaliencySpectralResidual_captureFunc, endFunc=endFunc) # 细粒度显着性 playVideo("vtest.avi", fps=10.0, winname="Input", startFunc=StaticSaliencyFineGrained_startFunc, captureFunc=StaticSaliencyFineGrained_captureFunc, endFunc=endFunc) # 运动显着性检测(此类显着性检测算法输入为视频或一系列连续帧。运动显着性算法处理这些连续的帧,并跟踪帧中“移动”的对象。这些移动的对象被认为是显着性区域) playVideo("vtest.avi", fps=10.0, winname="Input",
global modelInited global initializationFrames # 如果模型刚好初始化完毕,重播视频,模型开始工作 if not modelInited and frameIndex == initializationFrames: print("[Info] model initialization complete, replay video") modelInited = True return True return False def captureFunc(frame, frameIndex): global modelInited # 为背景减法器提供帧 mask = fgbg.apply(frame) # 模型初始化结束 if modelInited: # 形态学开运算 mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, None, iterations=2) output = cv2.bitwise_and(frame, frame, mask=mask) cv2.imshow("Output", output) return frame playVideo("vtest.avi", fps=10, replayCondition=replayCondition, captureFunc=captureFunc, startFunc=startFunc)
# 输出视频路径,编解码器,码率,分辨率 writer = cv2.VideoWriter(output_filename, fourcc, fps, (w * 2, h * 2), True) zeros = np.zeros((h, w), dtype=np.uint8) blue = cv2.merge([frame[..., 0], zeros, zeros]) green = cv2.merge([zeros, frame[..., 1], zeros]) red = cv2.merge([zeros, zeros, frame[..., 2]]) output = np.zeros((h * 2, w * 2, 3), dtype=np.uint8) output[0:h, 0:w] = frame output[0:h, w:w * 2] = red output[h:h * 2, w:w * 2] = green output[h:h * 2, 0:w] = blue # 写入文件 writer.write(output) cv2.imshow("Output", output) def endFunc(): global writer if writer: writer.release() playVideo("vtest.avi", fps=fps, captureFunc=captureFunc, showOriginalFrame=True, endFunc=endFunc)