Exemple #1
0
def cv2_demo(net, transform):
    def predict(frame):
        height, width = frame.shape[:2]
        x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)
        x = Variable(x.unsqueeze(0))
        y = net(x)  # forward pass
        detections = y.data
        # scale each detection back up to the image
        scale = torch.Tensor([width, height, width, height])
        for i in range(detections.size(1)):
            j = 0
            while detections[0, i, j, 0] >= 0.6:
                pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
                cv2.rectangle(frame,
                              (int(pt[0]), int(pt[1])),
                              (int(pt[2]), int(pt[3])),
                              COLORS[i % 3], 2)
                cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])),
                            FONT, 2, (255, 255, 255), 2, cv2.LINE_AA)
                j += 1
        return frame

    # start video stream thread, allow buffer to fill
    print("[INFO] starting threaded video stream...")
    stream = WebcamVideoStream(src=0).start()  # default camera
    time.sleep(1.0)
    # start fps timer
    # loop over frames from the video file stream
    while True:
        # grab next frame
        frame = stream.read()
        key = cv2.waitKey(1) & 0xFF

        # update FPS counter
        fps.update()
        frame = predict(frame)

        # keybindings for display
        if key == ord('p'):  # pause
            while True:
                key2 = cv2.waitKey(1) or 0xff
                cv2.imshow('frame', frame)
                if key2 == ord('p'):  # resume
                    break
        cv2.imshow('frame', frame)
        if key == 27:  # exit
            break
def stream():
    vs = WebcamVideoStream(src="rtmp://192.168.100.240:1935/b").start()
    fps = FPS().start()

    while True:
        frame = vs.read()

        if frame is None:
            print("Error: failed to capture image")
            break

        cv2.imwrite('demo.jpg', frame)
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + open('demo.jpg', 'rb').read() + b'\r\n')
        fps.update()
    fps.stop()
    vs.stop()
Exemple #3
0
    def __init__(self):
        super(Afis_1, self).__init__()
        loadUi('gui.ui', self)

        self.image = None
        self.im_thresh = None
        self.start_counter = 0

        self.data = [[], [], [], []]
        self.kata = []
        self.data_compress = [[], []]
        self.data_morse = []

        # [ record, mode(0=single,1=multi)]
        self.rd_snl = [0, 0]

        self.t1 = 0
        self.t2 = 0
        self.sinyal = [[], []]

        self.massage = ""

        self.position_cursor = []

        self.capture = WebcamVideoStream(src=0).start()
        #        self.capture = cv2.VideoCapture(0)

        #        ret, self.image =self.capture.read()
        self.image = self.capture.read()

        height, width, can = np.shape(self.image)
        self.fy = 480 / height
        self.fx = 640 / width

        # print(self.fx)
        # print(self.fy)
        self.main_frame.mousePressEvent = self.getPos
        self.Button_Start.toggled.connect(self.start_cam)
        self.Button_Start.setCheckable(True)

        self.Button_Save.toggled.connect(self.start_record)
        self.Button_Save.setCheckable(True)

        self.Button_Clear.clicked.connect(self.clear_data)

        self.Button_Send.clicked.connect(self.send_data)
Exemple #4
0
    def recognize(self):

        recognizer = cv2.face.LBPHFaceRecognizer_create()
        recognizer.read('trainer/trainer.yml')
        cascadePath = "haarcascade/haarcascade_frontalface_default.xml"
        faceCascade = cv2.CascadeClassifier(cascadePath)
        if sys.platform == 'win32':
            from imutils.video import WebcamVideoStream
            cap = WebcamVideoStream(src=0).start()
        else:
            from imutils.video.pivideostream import PiVideoStream
            cap = PiVideoStream().start()
        font = cv2.FONT_HERSHEY_COMPLEX
        d = database.Database().getAll()
        ls = {}
        for doc in d:
            ls[doc['id']] = doc['name']
        name = 'unknown'

        while True:
            im = cap.read()
            gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
            faces = faceCascade.detectMultiScale(gray, 1.2, 5)
            for (x, y, w, h) in faces:
                cv2.rectangle(im, (x, y), (x + w, y + h), (225, 0, 0), 2)
                Id, conf = recognizer.predict(gray[y:y + h, x:x + w])
                if sys.platform == 'win32':
                    if (conf <= 70):
                        name = ls.get(Id)
                        print(name, Id, conf)
                    else:
                        name = "who?"
                else:
                    if (conf >= 50):
                        name = ls.get(Id)
                        print(name, Id, conf)
                    else:
                        name = "who?"

                cv2.putText(im, str(name), (x, y + h), font, 1, (255, 255, 255))
            cv2.imshow('im', im)
            if cv2.waitKey(10) == ord('q'):
                break
        cap.stop()
        cv2.destroyAllWindows()
        Gui.Gui()
Exemple #5
0
    def __init__(self, cam_id):
        self.id = cam_id
        self.stream = WebcamVideoStream(self.id).start()
        self.size = get_image_size(self)
        self.cam_mat = None
        self.dist_coeff = None

        # targets
        # names for targets - short for index, middle, thumb(fingers)
        targ_names = cf['targets']
        self.targets = create_tracking_targets(targ_names, cam_id)

        self._cal_count = None
        self._cal_funcs = [self._get_cal_frame, self._select_target]

        self._cal_image = None
        self._circle_draw = None
Exemple #6
0
def web_camera(src):
    stream = WebcamVideoStream(src=src).start()
    while True:
        # grab next frame
        frame = stream.read()
        key = cv2.waitKey(1) & 0xFF

        # keybindings for display
        if key == ord('p'):  # pause
            while True:
                key2 = cv2.waitKey(1) or 0xff
                cv2.imshow('frame', frame)
                if key2 == ord('p'):  # resume
                    break
        cv2.imshow('frame', frame)
        if key == 27:  # exit
            break
Exemple #7
0
def main():

    for i in list(range(4))[::-1]:
        print(i + 1)
        time.sleep(1)

    paused = False
    while (True):

        if not paused:
            # 800x600 windowed mode
            #screen = grab_screen(region=(0,40,800,640))
            last_time = time.time()
            vs = WebcamVideoStream(src=0).start()
            fps = FPS().start()

            # loop over some frames...this time using the threaded stream
            while fps._numFrames < args["num_frames"]:
                # grab the frame from the threaded video stream and resize it
                # to have a maximum width of 400 pixels
                frame = vs.read()
                screen = imutils.resize(frame, width=400)
                screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
                final = cv2.resize(screen, (80, 60))
                #cv2.imshow("Frame", screen)
                #cv2.imshow("Frame2", final)
            # resize to something a bit more acceptable for a CNN
            keys = key_check()
            output = keys_to_output(keys)
            training_data.append([screen, output])

            if len(training_data) % 5000 == 0:
                print(len(training_data))
                np.save(file_name, training_data)

        keys = key_check()
        if 'T' in keys:
            if paused:
                paused = False
                print('unpaused!')
                time.sleep(1)
            else:
                print('Pausing!')
                paused = True
                time.sleep(1)
Exemple #8
0
 def recognize_Face(self):
     '''
         Recognition of the new input image
         Input is from web camera 
     '''
     #frame = cv2.imread('bhog.jpg')
     #frame = cv2.resize(frame,(640,480))
     cam =  WebcamVideoStream(src=1).start()
     prevTime = 0
     while True:
         curTime = time.time()
         frame = cam.read()
         rects, landmarks = self.face_detect.detect_face(frame,20)
         aligns = []
         positions = []
         if len(rects) > 0 :
             for (i, rect) in enumerate(rects):
                 aligned_face, face_pos = self.aligner.align(160,frame,landmarks[i])
                 aligns.append(aligned_face)
                 positions.append(face_pos)
             features_arr = self.extract_feature.get_features(aligns)
             recog_data = findPeople(features_arr,positions)
             for (i,rect) in enumerate(rects):
                 cv2.rectangle(frame,(rect[0],rect[1]),(rect[0] + rect[2],rect[1]+rect[3]),(255,0,0),3) #draw bounding box for the face
                 cv2.putText(frame,recog_data[i][0]+ "-" +str(recog_data[i][1])+"%",(rect[0],rect[1]),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255))
                 communicate(recog_data[i][0],dictionary_list)
                   
         sec = curTime - prevTime
         prevTime = curTime
         fps = 1 / (sec)
         string = 'FPS: %2.3f' % fps
         text_fps_x = len(frame[0]) - 150
         text_fps_y = 20
         #frame = cv2.resize(frame,(640,480))
         cv2.putText(frame, string,(text_fps_x, text_fps_y),cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), thickness=1, lineType=2)
         #cv2.imwrite('group.jpg',frame)
         cv2.imshow("Frame",frame)
         key = cv2.waitKey(1) & 0xFF
         if key == ord("q"):
             break
             #cv2.destroyAllWindows()
         #database.close_connection()
     cv2.destroyAllWindows()
     cam.stop()
     data_file.close()
Exemple #9
0
def webcamAgeAndGenderDetection():

    face_net, age_net, gender_net = load_caffe_models()
    video_capture = WebcamVideoStream(src=0).start()
    # image = loadImage(img_path)
    while True:
        padding = 20
        frame = video_capture.read()
        resultImg, faceBoxes = highlightFace(face_net, frame)

        for faceBox in faceBoxes:
            face = frame[max(0, faceBox[1] -
                             padding):min(faceBox[3] +
                                          padding, frame.shape[0] - 1),
                         max(0, faceBox[0] -
                             padding):min(faceBox[2] +
                                          padding, frame.shape[1] - 1)]

            blob = cv2.dnn.blobFromImage(face,
                                         1.0, (227, 227),
                                         MODEL_MEAN_VALUES,
                                         swapRB=False)

            #Predict Gender
            gender_net.setInput(blob)
            gender_preds = gender_net.forward()
            gender = gender_list[gender_preds[0].argmax()]

            #Predict Age
            age_net.setInput(blob)
            age_preds = age_net.forward()
            age = age_list[age_preds[0].argmax()]
            overlay_text = "%s %s" % (gender, age)
            cv2.putText(resultImg, overlay_text, (faceBox[0], faceBox[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, .7, (0, 255, 255), 2,
                        cv2.LINE_AA)

        cv2.imshow("Video", resultImg)
        # Quits when 'Q' or 'q' or 'esc' is pressed
        ch = cv2.waitKey(1)
        if ch == 27 or ch == ord('q') or ch == ord('Q'):
            break

    video_capture.stop()
    cv2.destroyAllWindows()
Exemple #10
0
def main():
    print("startup")
    vs = WebcamVideoStream(src=0).start()
    fps = FPS().start()

    while fps._numFrames < 100:
        print(fps._numFrames)
        frame = vs.read()
        frame = imutils.resize(frame, width=400)

        cv2.imshow("Frame", frame)

        fps.update()

    fps.stop()

    cv2.destroyAllWindows()
    vs.stop()
Exemple #11
0
  def __init__(self):
    # These variables define the average width and height of human face (in cm)
    self.face_width  = 13.9
    self.face_height = 22.5

    # Get ROS parameters
    self._published_topic = rospy.get_param('published_topic', '/face_distance')
    self._video_device = rospy.get_param('video_device', '/dev/front_camera')

    self._graph_name  = rospy.get_param('graph_name', '/frozen_inference_graph_face.pb')
    self._label_name  = rospy.get_param('label_name', '/face_label_map.pbtxt')
    self._num_classes = rospy.get_param('num_classes', 2)

    self._min_score = rospy.get_param('min_score', 0.5)
    self._display_image = rospy.get_param('display_image', True)

    self._camera_info_path = rospy.get_param('camera_info_path', '/home/yago/catkin_ws/src/eyecu/eyecu/config/logitech_webcam_calibration.yaml')

    # Tensorflow initialization
    self._path_to_ckpt = sys.path[0] + '/exported_graphs' + self._graph_name
    self._path_to_labels = sys.path[0] + '/labels' + self._label_name

    self._detection_graph = tf.Graph()
    self.load_graph()

    self._session = tf.Session(graph=self._detection_graph)

    self._label_map = label_map_util.load_labelmap(self._path_to_labels)
    self._categories = label_map_util.convert_label_map_to_categories(self._label_map,
               max_num_classes=self._num_classes, use_display_name=True)
    self._category_index = label_map_util.create_category_index(self._categories)

    # OpenCV video capture
    self._cap = WebcamVideoStream(src=self._video_device).start()

    # Load values for calibration
    self.load_camera_info()

    # Publisher variable
    self.face_distance = DistanceCamera()

    # Subscribers and publishers
    self._pub = rospy.Publisher(self._published_topic, DistanceCamera, queue_size=1)
 def starter(self):
     self.clientVideoSocket.connect((self.HOST, self.PORT_VIDEO))
     try:
         MediaChat.wvs = WebcamVideoStream(0)
         self.wvs.start()
     except:
         print("Camera cannot start...")
     self.clientAudioSocket.connect((self.HOST, self.PORT_AUDIO))
     self.audio = pyaudio.PyAudio()
     self.stream = self.audio.open(format=FORMAT,
                                   channels=CHANNELS,
                                   rate=RATE,
                                   input=True,
                                   output=True,
                                   frames_per_buffer=CHUNK)
     SendFrameThread = Thread(target=self.SendFrame).start()
     SendAudioThread = Thread(target=self.SendAudio).start()
     RecieveFrameThread = Thread(target=self.RecieveFrame).start()
     RecieveAudioThread = Thread(target=self.RecieveAudio).start()
Exemple #13
0
def capture_with_threading_without_workload(camera):
    vs = WebcamVideoStream(camera).start()

    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        start = time.time()
        frame = vs.read()
        read_cost = (time.time() - start) * 1000
        print("read: {:.2f}ms".format(read_cost))

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Exemple #14
0
 def __init__(self,
              src=0,
              usePiCamera=False,
              resolution=(320, 240),
              framerate=32):
     # check to see if the picamera module should be used
     if usePiCamera:
         # only import the picamera packages unless we are
         # explicity told to do so -- this helps remove the
         # requirement of `picamera[array]` from desktops or
         # laptops that still want to use the `imutils` package
         from pivideostream import PiVideoStream
         # initialize the picamera stream and allow the camera
         # sensor to warmup
         self.stream = PiVideoStream(resolution=resolution,
                                     framerate=framerate)
     # otherwise, we are using OpenCV so initialize the webcam
     # stream
     else:
         self.stream = WebcamVideoStream(src=src)
Exemple #15
0
def main():
    client = get_mqtt_client()
    client.connect(MQTT_BROKER, port=MQTT_PORT)
    time.sleep(4)  # Wait for connection setup to complete
    client.loop_start()

    # Open camera
    camera = WebcamVideoStream(src=VIDEO_SOURCE).start()
    time.sleep(2)  # Webcam light should come on if using one

    while True:
        frame = camera.read()
        np_array_RGB = opencv2matplotlib(frame)  # Convert to RGB

        image = Image.fromarray(np_array_RGB)  #  PIL image
        byte_array = pil_image_to_byte_array(image)
        client.publish(MQTT_TOPIC_CAMERA, byte_array, qos=MQTT_QOS)
        now = get_now_string()
        print(f"published frame on topic: {MQTT_TOPIC_CAMERA} at {now}")
        time.sleep(1 / FPS)
Exemple #16
0
def start_video():
    # deeplab_model = Deeplabv3(backbone='xception', OS=8)

    deeplab_model = Deeplabv3(OS=8)
    vid = WebcamVideoStream(src=0).start()
    cv2.namedWindow("result", cv2.WINDOW_NORMAL)
    blurValue = (3, 3)
    blur_bg_value = 81

    while True:
        frame = vid.read()
        if frame is None:
            break
        w, h, _ = frame.shape
        ratio = 512. / np.max([w, h])

        resized = cv2.resize(frame, (int(ratio * h), int(ratio * w)))
        resized = resized / 127.5 - 1.
        pad_x = int(512 - resized.shape[0])
        resized2 = np.pad(resized, ((0, pad_x), (0, 0), (0, 0)), mode='constant')
        res = deeplab_model.predict(np.expand_dims(resized2, 0))
        labels = np.argmax(res.squeeze(), -1)

        labels = labels[:-pad_x]
        mask = labels == 0
        mask_person = labels != 0

        resizedFrame = cv2.resize(frame, (labels.shape[1], labels.shape[0]))
        blur = cv2.GaussianBlur(resizedFrame, (blur_bg_value,blur_bg_value), 0)

        blur_person = cv2.GaussianBlur(resizedFrame, blurValue, 0)


        resizedFrame[mask] = blur[mask]
        resizedFrame[mask_person] = blur_person[mask_person]

        cv2.imshow("result", resizedFrame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    vid.stop()
    cv2.destroyAllWindows()
    def get_frame():
        vs = WebcamVideoStream(src=src).start()
        with model.as_default():
            with tf.Session(graph=model) as sess:
                while True:
                    # grab the frame from the threaded video stream and process it
                    src_frame = vs.read()

                    # Process image
                    out_frame = cp.process_frame(src_frame,
                                                 model,
                                                 sess,
                                                 category_index,
                                                 display=True)

                    imgencode = cv2.imencode('.jpg', out_frame)[1]
                    stringData = imgencode.tostring()
                    yield (b'--frame\r\n'
                           b'Content-Type: text/plain\r\n\r\n' + stringData +
                           b'\r\n')
        vs.stop()
Exemple #18
0
def main(pipe_object):
    cam = WebcamVideoStream(src=0).start()
    global pipe
    pipe = pipe_object

    background_set = False
    bg_model = None

    # Drawing variables
    thickness = 4
    drawing = False
    start = time.time()
    count = -1
    check = True
    # Main loop
    while check:
        drawing, bg_model, background_set, check, count, restart_time, thickness = loop(
            height, cam, bg_model, background_set, rect, thickness, drawing,
            start, count)
        if restart_time:
            start = time.time()
    def __init__(self, id): 
        # self.cap = cv2.VideoCapture(id)
        self.cap = WebcamVideoStream(src = id).start()
        self.cfgfile = "cfg/yolov4-tiny.cfg"
        self.weightsfile = "yolov4-tiny.weights"
        self.confidence = float(0.6)
        self.nms_thesh = float(0.8)
        self.num_classes = 1
        self.classes = load_classes('data/butts.names')
        self.colors = pkl.load(open("pallete", "rb"))
        self.model = Darknet(self.cfgfile)
        self.CUDA = torch.cuda.is_available()
        self.model.load_weights(self.weightsfile)
        self.width = 1280 #640#1280
        self.height = 720 #360#720
        print("Loading network.....")
        if self.CUDA:
            self.model.cuda()
        print("Network successfully loaded")

        self.model.eval()
def obj_center(objX, objY, centerX, centerY):
    signal.signal(signal.SIGINT, signal_handler)

    vs = WebcamVideoStream(args.camera).start()
    # time.sleep(2.0)

    obj = ObjCenter()

    while True:
        frame = vs.read()
        frame = imutils.resize(frame, width=300)
        (H, W, _) = frame.shape
        centerX.value = W // 2
        centerY.value = H // 2

        objectLoc = obj.update(frame, (centerX.value, centerY.value))
        (objX.value, objY.value) = objectLoc
        cv.imshow("frame", frame)
        print(objectLoc)
        cv.waitKey(1)
    vs.stop()
def main():
  global vs, videoDeviceNumber

  # created a threaded video stream
  print("starting webcam thread...")
  vs = WebcamVideoStream(src=videoDeviceNumber).start()
  # allow the camera sensor to warm up before grabbing frames
  time.sleep(1)

  try:
    server = ThreadedHTTPServer(('', 8080), httpHandler)
    print 'started httpserver...'
    server.serve_forever()
  except KeyboardInterrupt:
    print '^C received, shutting down server'
    server.shutdown()
    server.server_close()
    try:
      os.sys.exit(0)
    except SystemExit:
      os._exit(0)
def detect_process():
    # start video stream thread, allow buffer to fill
    print("[INFO] starting threaded video stream...")

    # use camera
    stream = WebcamVideoStream(src=0).start()  # default camera
    time.sleep(1.0)

    # read video
    video = FileVideoStream("test_data/test3.mp4").start()

    # start fps timer
    # loop over frames from the video file stream
    frame_count = 0
    detect_model = detect.Detect()

    while True:
        # grab next frame
        # frame = stream.read()
        frame = video.read()

        key = cv2.waitKey(1) & 0xFF

        if frame_count % 10 != 0:
            frame_count += 1
            continue
        frame_count += 1

        frame = detect_model.predict(frame)

        # keybindings for display
        if key == ord('p'):  # pause
            while True:
                key2 = cv2.waitKey(1) or 0xff
                cv2.imshow('frame', frame)
                if key2 == ord('p'):  # resume
                    break
        cv2.imshow('frame', frame)
        if key == 27:  # exit
            break
Exemple #23
0
 def __init__(self,
              src=0,
              flip=True,
              crop=False,
              live=True,
              calibrate=False,
              resize=False,
              multi=False):
     """
     Constructor for camera object.
     :param src: The source for the camera. 0 for live and video name for saved video.
     :param flip: True if image needs to be flipped.
     :param crop: True if image needs to be cropped.
     :param live: True if live camera is on.
     :param calibrate: Feature to calibrate the tablet. True if we want to use the feature.
     """
     self.src = src
     self.FLIP = flip
     self.CROP = crop
     self.LIVE = live
     self.CALIBRATE = calibrate
     self.RESIZE = resize
     self.MULTI = multi
     # Opens a stream for the camera.
     if self.LIVE:
         self.stream = WebcamVideoStream(src=src, name="Live Video").start()
     else:
         self.stream = SavedVideoWrapper.SavedVideoWrapper(src)
     # Crop dimensions for automatic calibration.
     self.bl_crop_dimensions, self.tr_crop_dimensions = calibrate_from_file(
     )
     # Current frame taken.
     self.current = None
     # Buffer which saves the original frames to display for debug purposes.
     self.buffer = []
     self.last_big_frame = []
     # Maximal size for buffer to avoid using too much memory.
     self.MAX_SIZE_BUFFER = 500
     fourcc = cv2.VideoWriter_fourcc(*'XVID')
     self.out = cv2.VideoWriter('detection.avi', fourcc, 30.0, (480, 160))
Exemple #24
0
def theThreaded():
    print ("[INFO] sampling frames from webcam...")

    vs = WebcamVideoStream(src=0).start()
    fps = FPS().start()

    while fps._numFrames < args["num_frames"]:
        frame = vs.read()
        frame = imutils.resize(frame, width=400)

        if args["display"] > 0:
            cv2.imshow("Frame", frame)
            key = cv2.waitkey(1) & 0xFF

        fps.update()

    fps.stop()
    print("[INFO] elapsed Time: {:.2f}".format(fps.elapsed()))
    print ("[INFO] approx. fps: {:.2f}".format(fps.fps()))

    cv2.destroyAllWindows()
    vs.stop()
def detect_faces():
    print("[INFO] starting video stream...")
    vs = WebcamVideoStream(
        src="rtsp://*****:*****@192.168.1.90/axis-media/media.amp")
    if vs.grabbed:
        vs = vs.start()
        time.sleep(2.0)
        print("------started survillancing------")
        id = 0
        x = 1
        while (True):
            x += 1
            frame = vs.read()
            id += 1
            if x % 2 == 0:
                gray = face_detector(frame, id)
            cv2.imshow('frame', gray)
            if cv2.waitKey(10) & 0xFF == ord('q'):
                break

        vs.stop()
        cv2.destroyAllWindows()
Exemple #26
0
    def __init__(self):

        self.cap = WebcamVideoStream(src=0).start()
        self.root = tk.Tk()
        w, h = self.root.winfo_screenwidth(), self.root.winfo_screenheight()
        self.root.overrideredirect(0)
        self.root.geometry("%dx%d+0+0" % (w, h))
        self.root.wm_title('Face Recognizer')

        #Top and Bottom Frame
        tFrame = tk.Frame(self.root, width=360, height=240)
        tFrame.grid(row=0, column=0, padx=10, pady=50)
        bFrame = tk.Frame(self.root, width=360, height=240)
        bFrame.grid(row=1, column=0, padx=10, pady=2)

        #Video Frame
        self.lmain = tk.Label(tFrame)
        self.lmain.grid(row=0, column=0)
        self.details = Label(tFrame, text='Details: ')
        self.details.grid(row=0, column=1)
        self.det = Label(tFrame)
        self.det.grid(
            row=0,
            column=2,
        )
        self.vc = Label(bFrame, text='Say Something')
        self.vc.grid(
            row=1,
            column=0,
        )

        #Buttons
        trainButton = Button(bFrame, text='Train System', command=self.ftrain)
        trainButton.grid(row=0, column=0)
        quitButton = Button(bFrame, text='Quit', command=self.Quit)
        quitButton.grid(row=0, column=1)

        self.video_Loop()
Exemple #27
0
	def __init__(self):
		camera = 'rtsp://*****:*****@10.0.17.13:80/live'
		#######################################################################
		# Camera Options                                                      #
		# My phone IP Cam - 'http://10.10.10.149:8080/video'                  #
		# Webcam - 0                                                          #
		# 'rtsp://*****:*****@10.0.17.13:80/live'                             #
		# 'http://*****:*****@10.0.17.13:80/axis-cgi/mjpg/video.cgi'          #
		# 'http://*****:*****@10.0.17.13:80/axis-cgi/mjpg/video.cgi?camera=1' #
		#######################################################################
		# Using OpenCV to capture from device 0. If you have trouble capturing
		# from a webcam, comment the line below out and use a video file
		# instead.
		# If you decide to use video.mp4, you must have this file in the folder
		# as the main.py.

		# Initialize camera threading and start getting image object to self.vs variable
		self.vs = WebcamVideoStream(src=camera).start()
		# Getting image width and height
		self.W = self.vs.getW()
		self.H = self.vs.getH()
		# Initialize detection program and specify height and width
		self.detect = ConDetect(self.H, self.W)
Exemple #28
0
    def __init__(self, logger, src, ROOT_DIR):
        self.vs = WebcamVideoStream(src)
        self.fps = FPS()
        self.logger = logger
        self.ROOT_DIR = ROOT_DIR

        cv2.namedWindow("Webcam")
        cv2.namedWindow("roi")
        cv2.namedWindow("stacked")
        cv2.createTrackbar('dilate kernel', 'roi', 3, 5, self.none)
        cv2.createTrackbar('erode kernel', 'roi', 2, 5, self.none)
        cv2.createTrackbar('blackhat kernel', 'roi', 21, 30, self.none)

        self.mouse = Mouse(window="Webcam")
        self.gt = Graphics()
        # self.hist = Hist()
        self.msg = "draw a rectangle to continue ..."
        self.font_20 = ImageFont.truetype(f'{self.ROOT_DIR}/fonts/raleway/Raleway-Light.ttf', 20)
        self.font_10 = ImageFont.truetype(f'{self.ROOT_DIR}/fonts/raleway/Raleway-Light.ttf', 15)
        self.font_30 = ImageFont.truetype(f'{self.ROOT_DIR}/fonts/raleway/Raleway-Light.ttf', 30)
        self.font_40 = ImageFont.truetype(f'{self.ROOT_DIR}/fonts/raleway/Raleway-Medium.ttf', 50)
        # self.stabilizer = VidStab()
        self.predictor = parser_v3.Predictor(model_path=f'{self.ROOT_DIR}/models/model_0.1v7.h5', root_dir=self.ROOT_DIR)
Exemple #29
0
def faceBlurWebcam(conf_threshold, blocks):

    video_capture = WebcamVideoStream(src=0).start()
    net = loadCaffeModels()

    while True:
        orig = video_capture.read()
        (h, w) = orig.shape[:2]
        # construct a blob from the image
        blob = cv2.dnn.blobFromImage(orig, 1.0, (300, 300),
                                     (104.0, 177.0, 123.0))

        net.setInput(blob)
        detections = net.forward()

        # loop over the detections
        for i in range(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]
            if confidence > conf_threshold:
                startX = int(detections[0, 0, i, 3] * w)
                startY = int(detections[0, 0, i, 4] * h)
                endX = int(detections[0, 0, i, 5] * w)
                endY = int(detections[0, 0, i, 6] * h)
                face = orig[startY:endY, startX:endX]
                face = anonymize_face_pixelate(face, blocks=blocks)

                # store the blurred face in the output image
                orig[startY:endY, startX:endX] = face

        cv2.imshow("Video", orig)
        # Quits when 'Q' or 'q' or 'esc' is pressed
        ch = cv2.waitKey(1)
        if ch == 27 or ch == ord('q') or ch == ord('Q'):
            break

    video_capture.stop()
    cv2.destroyAllWindows()
Exemple #30
0
def capture_with_threading(camera, display=True):
    vs = WebcamVideoStream(camera).start()
    if display:
        threading.Thread(target=display_video, args=(camera, vs)).start()
    frame_count = 0
    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        start = time.time()
        frame = vs.read()
        read_cost = (time.time() - start) * 1000
        frame_count += 1
        if frame_count >= 100 and frame_count < 200:
            save_image(IMAGE_PATH, 'cv2', frame)

        start = time.time()
        for i in range(100):
            imutils.resize(frame, width=400)
        handle_cost = (time.time() - start) * 1000
        print("read: {:.2f}ms, handle: {:.2f}ms".format(
            read_cost, handle_cost))

    # do a bit of cleanup
    vs.stop()