Example #1
0
 def show_camera(self):
     flag,self.image = self.cap.read()
     show = cv2.resize(self.image,(640,480))
     show = cv2.cvColor(show,cv2.color_RER2RGB)
     show = cv2.cvColor(show.data,cv2.color_RER2RGB)
     showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888)
     self.label_show_camera.setPixmap(QtGui.QPixmap.fromImage(showImage))
     showImage = QtGui.QImage(show.data,show.shape[1],show.shape[0],QtGui.QImage.Format)
Example #2
0
def blend_transparent(face_img, overlay_t_img):
    overlay_img = overlay_t_img[:, :, :3]
    overlay_mask = overlay_t_img[:, :, 3:]
    background_mask = 255 - overlay_mask
    overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
    background_mask = cv2.cvColor(background_mask, cv2.COLOR_GRAY2BGR)
    face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
    overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))
    return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0,
                                    0.0))
Example #3
0
def process_video(content_path, style_path, outfile):
    j = 0
    video, videoWriter = load_video(content_path, style_path, outfile)
    while (video.isOpened()):
        j = j + 1
        ret, frame = video.read()
        if not ret:
            break

        if j % 1 == False:
            # 对每一帧进行风格化。
            style = Image.open(style_path)
            content = Image.fromarray(cv2.cvColor(frame, cv2.COLOR_BGR2RGB))
            output = image_process(frame, style)
            # 对风格化后的结果进行额外处理,以存储到视频中
            save_frame(output, videoWriter)
Example #4
0
time.sleep(conf["camera_warmup_time"]
avg = None
lastUploaded = datetime.datetime.now()
motionCounter = 0
print('[INFO] poly_watch started !!')

# capture farmes from the camera
for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
    #grab the raw NumPy array of the image and init timestamp and text
    frame = f.array
    timestamp = datetime.datetime.now()
    text = "Unoccupied"

    #Computer Vision!
    # resize the frame, convert to grayscale, and blur
    gray = cv2.cvColor(frame, cv.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, tuple(conf['blur_size']), 0)

    #if the avg frame is None, init
    if avg is None:
        print("[INFO] starting background model...")
        avg = gray.copy().astype("float")
        rawCapture.truncate(0)
        continue

    #accumulate the weighted average between the current frame and
    #previous frames, then computer the diff between the current
    #fram and running average
    frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
    cv2.accumulateWeighted(gray, avg, 0.5)
Example #5
0
#intent      :
#Author      :Michael Jack hu
#start date  : 2018/10/8
#File        : 人脸识别2.py
#Software    : PyCharm
#finish date :
'''

# 在识别的图片上面添加人脸识别:重点注意  需要添加模型
# 1.导入库
import cv2
# 2.加载图片
img = cv2.imread('D:\04.jpg')
# 3.加载人脸模型
face = cv2.Case("D:\timg.jpg")
# 4.调整图片灰度
gray = cv2.cvColor(img, cv2.COLOR_RGB2GRAY)
# 5.检查人脸
faces = face.detectMultiScale(gray)
# 6.标记人脸
for (x, y, w, h) in faces:
    #里面有四个参数  1,写图片 2,坐标原点 3,识别大小 4,颜色RGB5,线宽
    cv2.rectang(img, (x, y), (x + w, y + h), (0, 255, 0), 10)
# 7.创建窗口
cv2.nameWindow('james 窗口')
# 8.显示图片
cv2.imshow('jiaqi', img)
# 9.暂停窗口
cv2.waitKey(0)
# 10.关闭窗口
cv2.destroyAllWindows()
Example #6
0
        hist_scale = val

    cv2.createTrackbar('scale', 'hist', hist_scale, 32, set_scale)

    try:
        fn = sys.argv[1]
    except:
        fn = 0
    cam = video.create_capture(fn, fallback=' ')

    while True:
        flag, frame = cam.read()
        cv2.imshow('camera', frame)

        small = cv2.pyrDown(frame)
        hsv = cv2.cvColor(frame, cv2.COLOR_BGR2HSV)

        dark = hsv[:, :, 2] < 32

        hsv[dark] = 0

        h = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])

        h = np.clip(h * 0.005 * hist_scale, 0, 1)

        vis = hsv_map * h[:, :, np.newaxis] / 255.0
        cv2.imshow('hist', vis)
        ch = 0xFF & cv2.waitKey(1)
        if ch == 27:
            break
Example #7
0
import numpy as np
from PIL import ImageGrab
import cv2
fourcc = cv2.VideoWriter_fourcc(*"XVID")
size = (ImageGrab.grab()).size
output = cv2.VideoWriter("output.mp4", fourcc, 5.0, size)
while True:
    img = np.array(ImageGrab.grab())
    frame = cv2.cvColor(img, cv2.COLOR_BGR2RGB)
    cv2.imshow("Screen", frame)
    output.write(img)
    if cv2.waitkey(1) == 27:
        break
output.release()
cv2.destroyAllWindows