Beispiel #1
0
from facial_emotion_recognition import EmotionRecognition
import cv2
import cv2
er = EmotionRecognition(device='gpu')
cam = cv2.Videocapture(0)
while True:
    success, frame = cam.read()
    frame = er.recognise_emotion(frame, return_type='BGR')
    cv2.imshow('frame', frame)
    key = cv2.waitKey(1)
    if key == 27:
        break
cam.release()
cv2.destroyAllWindows
Beispiel #2
0
import numpy as np
import cv2

start = 'true'
cap = cv2.Videocapture(0)

while (true):
    ret, frame = cap.read()
    if ret == true:
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        if start == 'true':
            start = 'false'
            current = gray

        a = current[:, 0, 0].size
        b = current[0, :, 0].size

        x = np.arange(0, a, 1)
        y = np.arange(0, b, 1)

        delay = zeros[a, b]
        for i1 in x:
            for i1 in y:
                if current[i1, i2] < gray[i1, i2]:
                    current[i1, i2] += 1
                if frame[i1, i2, 2] > 180 and farme[i1, i2,
                                                    1] < 50 and frame[i1, i2,
                                                                      0] < 50:
                    current[i1, i2] = 0
        cv2.imshow('frame', current)
        if cv2.waitkey(1) & 0xFF == ord('q'):
Beispiel #3
0
    def monitor(self):
        success, image = self.cap.read()
        if not self.cap.isOpened():
            self.cap = cv2.Videocapture(self.streamAddr)
            success, image = self.cap.read()
        if image != None:
            image_copy = image.copy()
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        if success:
            self._cnt += 1
            if self._cnt == 10:
                self._handler.baseImg.append(image_copy)
            self._handler.add_diff_value(image_copy)
            # self._handler.add_candidate_img(image_copy)
            # cv2.rectangle(image, (480, 360), (1440, 900), (255, 0, 0))
            show_img = QImage(image.data, image.shape[1], image.shape[0],
                              QImage.Format_RGB888)
            photo = QPixmap.fromImage(show_img)
            self._images.append(photo)
            n = len(self._images)
            if n >= 9:
                tmp = self._images[n - 9:n]  # 优化缓存qpixmap的list
                del self._images
                self._images = tmp

            if len(self._images) >= 9:
                flag, self._abnormal_rois, self._draw_index = self._handler.hasAbnormal(
                )
                if flag or self._is_real_normal:
                    if self._cnt_flag < 8:
                        self._handler.check_bg(image_copy)
                        self._is_real_normal = True
                        self._cnt_flag += 1
                    else:
                        self._is_real_normal = False
                        print "现在可能有异常了,前面的处理完了没--"
                        ab_pixmap = self._images[4]
                        updated_candidate = self._handler.img_to_candidate()
                        self._cnt_flag = 0

                        if self._step2_processed:  # second process finish
                            print "可以处理新的数据了--------"
                            if self._handler.candidate_valid():
                                self._step2_processed = False
                                if updated_candidate:
                                    self._handler.saveImg(self._cnt)
                                self._buffers[0] = POJO(
                                    ab_pixmap, self._handler.get_candidate())

                                self._notify_event.set()
                            else:
                                print "没有被判定可用的图像--"
                        else:
                            print "还没处理完,你再等会儿----"

            utils.set_label_pic(self._window.video, photo)
            if self._updated_abnormal_pic:
                self._updated_abnormal_pic = False
                self.update_show()
            else:
                self._warning_timer.stop()
        else:
            self.cap.release()
import cv2

cap = cv2.Videocapture(r'C:\Users\ravillalavanya\Downloads\vtest.avi')
#videos are just a squences of images
#so,we will add while loop to capture the frame continuously

facecascade = cv2.cascadeclassifier(r"C:\Users\ravillalavanya\Downloads\haarcascade_fullbody.xml")

while True:
    success, frame = cap.read() #frame variable will capture the video & success variable will tell us whether it was captured success are not

    imgGray = cv2.cvtcolor(frame, cv2.COLOR_BGR2GRAY)

    faces = facecascade.detectMultiscale(imgGray,1.1, 4)

    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 0, 255), 2)

    cv2.imshow("video", frame)

    if cv2.waitkey(1) == ord('q'): #This adds a Delay and looks for the Key press inorder to break the loop
        break

cap.release() #Release the resources after recording
cv2.destroyAllWindows()
Beispiel #5
0
            strTime = datetime.datetime.now().strftime("%H:%M:%S")
            speak(f" the time is {strTime}")
            print("the time is " + strTime)

        elif 'whats the temperature' in query:
            res = app.query(query)
            speak(next(res.results).text)

        elif 'Calculater' in query:
            speak("what should i calculatre?")
            gh = takecommand().lower()
            res = app.query(gh)
            speak(next(res.results).text)

        elif "open camera" in query:
            nar = cv2.Videocapture(0)
            while True:
                cv2.inshow('camera,img')
                k = cv2.waitkey(58)
                if k == 27:
                    break
            nar.release()
            cv2.destroyAllwindows()

        elif "ip address" in query:
            ip = get("https://api.ipify.org").text
            speak(f"your IP address is {ip}")
            print(f"your IP address is {ip} ")

        elif "take screenshot" in query or "take a screenshot" in query:
            speak("sir, please tell me the name for this screenshot file")
import argparse, datetime
import imutils
import time
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area")
args = vars(ap.parse_args())
if args.get("video", None) is None:
    camera = cv2.VideoCapture(0)
    time.sleep(0.25)

else:
    camera = cv2.Videocapture(args["video"])

firstFrame = None

while True:
    (grabbed, frame) = camera.read()
    text = "Unoccupied"
    b = 0
    if not grabbed:
        break

    frame = imutils.resize(frame, width=500)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)

    if firstFrame is None:
        firstFrame = gray