Esempio n. 1
0
 def camera(camflag, s4):
     global vloop
     #global camflag
     try:
         caps = VideoCapture(camflag)
         _, fram = caps.read()
     except:
         s4.sendall(str.encode(b'cambusy'))
         print('cam busy')
         vloop = False
         return 0
     caps.release()
     width, height, _ = fram.shape
     caps = VideoCapture(camflag)
     s4.sendall(str.encode(str(width) + ':' + str(height)))
     while vloop:
         ret, frame = caps.read()
         a = gzip.compress(pickle.dumps(frame), 9)
         s4.sendall(str.encode(str(len(a))))
         while a:
             chk = a[:3072]
             s4.sendall(chk)
             a = a[3072:]
         time.sleep(0.4)
     caps.release()
     s4.close()
     del s4
Esempio n. 2
0
    def run(self):
        print("starting", flush=True)
        print("bueno")
        model = TFModel(resource_path("output_graph.pb"), resource_path("output_labels.txt"), "Placeholder",
                        "final_result")
        print("model loaded")
        # self.label_load.setText("Loading: Model")
        count = 0
        available = []
        while True:
            test = VideoCapture(count)
            if test is None or not test.isOpened():
                break
            available.append(count)
            count += 1
        print(count)
        if count > 0:
            vid = VideoCapture(0)
            _, frame = vid.read()

            load = rt.Recoginize(model)
            load.daemon = True
            load.start()
            load.predict(frame)
            from time import sleep
            sleep(1)
            load.predict("kill")
            load.join()
            vid.release()
            print("model loaded")
            # self.label_load.setText("Loaded Model")
        print("donezo")
        # self.label_load.setText("Finalizing")
        self.progress.emit(model, len(available))
    def __init__(self,camera=0,video_file=None,video=False):
        from cv2 import VideoCapture

        if(video):
            # Use a video file as input.
            self.stream = VideoCapture(video_file)
        else:
            # Use a camera as input.
            self.stream = VideoCapture(camera)

        # Check if we were successful in opening stream.
        if(self.stream.isOpened() == False):
            name = video_file if video else camera
            raise IOError("Error opening video stream or file '{}'".format(name))
Esempio n. 4
0
    def getOriginalVideo(self):
        #这里是否需要对新的线程进行一个处理,来清空close按钮带来的影响
        # self.stopEvent.clear()
        # print(self.checkbutton.checkState())
        if (self.checkbutton.checkState() == 2) & (self.running
                                                   == False):  # 即使用摄像头
            self.running = True
            self.cap = VideoCapture(0)
        elif self.checkbutton.checkState() == 0:
            fname, _ = QFileDialog.getOpenFileName(self, 'Open file', './',
                                                   'Video files (*.* )')
            self.cap = VideoCapture(fname)

        NewThread = threading.Thread(target=self.playvideo)
        NewThread.start()
Esempio n. 5
0
def main():
    # 画像サイズ
    img_shape = (224, 224, 3)
    # 日本語辞書の読み込み
    ja_labels = load_japanese_labels('src/imagenet_class_index_ja.json')
    # カメラの初期化
    cam = VideoCapture(0)
    # モデルの読み込み
    model = load_mobilenet(img_shape)

    width = cam.get(CAP_PROP_FRAME_WIDTH)
    height = cam.get(CAP_PROP_FRAME_HEIGHT)
    fps = cam.get(CAP_PROP_FPS)
    print("Image Capture FPS: %d" % (fps))
    print('Image Capture Size: width=%d height=%d' % (width, height))

    # Julisu へ接続
    sock = connect_julius()

    # Hello, Raspberry Pi!
    call_jtalk("おはようございます。")
    try:
        while True:
            data = receive_voice(sock, cam)
            if data != "":
                execute_command(data, cam, model, ja_labels)
    except KeyboardInterrupt:
        print("Bye.")

    cam.release()
Esempio n. 6
0
def get_scan_camera(camera_id=None):
    from cv2 import VideoCapture

    if camera_id is not None:
        return VideoCapture(int(camera_id))
    else:
        raise RuntimeError("NOT_SUPPORT", "can not find scan camera")
def detect_face_cv2_webcam():
    cap = VideoCapture(0)
    classifier = CascadeClassifier('haarcascade_frontalface_default.xml')

    # Check if the webcam is opened correctly
    if not cap.isOpened():
        raise IOError("Cannot open webcam")

    while True:
        ret, frame = cap.read()
        frame = flip(frame, 1)
        gray = cvtColor(frame, COLOR_BGR2GRAY)
        faces = classifier.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(30, 30),
            # flags=cv2.cv.CV_HAAR_SCALE_IMAGE
        )

        for (x, y, w, h) in faces:
            rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)

        imshow("Video", frame)
        if waitKey(1) == ord('q'):
            break

    cap.release()
    destroyAllWindows()
    def get_screenshot(self, carNo, show_view=True):
        """
        截图一帧
        :param carNo:
        :param show_view:
        :return:
        """
        try:
            # 打开rtsp
            cap = VideoCapture(self.URL)
            ret, frame = cap.read()
            if not ret:
                consoleLog(self.logPre, "未捕获到帧")

            imencode('.jpg', frame)[1].tofile(self.img_path.format(carNo=carNo, now=fmt_date(fmt=FMT_DATETIME)))
            if show_view:
                # 预览窗口
                namedWindow('view', WINDOW_NORMAL | WINDOW_KEEPRATIO)
                imshow("view", frame)
                waitKey(5 * 1000)
        except Exception as e:
            consoleLog(self.logPre, "保存截图异常:", repr(e))
        finally:
            if cap:
                cap.release()
            destroyAllWindows()
Esempio n. 9
0
def main(args):
  apg1.add_argument("--subtitle", type=FileType("r"), help="subtitle file for -text")
  apg1.add_argument("--subtitle-placeholder", type=str, default="#", help="placeholder for subtitle")
  apg1.add_argument("--mon-background", type=str, default=None, help="replacement back-color for mon (default -key-color)")
  readSrt = lambda it: srt.parse(it.read())

  cfg = app.parse_args(args)
  cfg.font = ImageFont.truetype(cfg.font, cfg.font_size) if cfg.font != None else ImageFont.load_default()
  cfg.key_color = colorFromHtml(cfg.key_color)

  print(f"{cfg.font_size}px, {cfg.key_color} ±{cfg.key_thres} {cfg.spacing}")
  cfg.calc_draw_color = lambda c: None if isColorNearTo(cfg.key_color, cfg.key_thres, c) else c
  for path in cfg.images:
    (name, ext) = fileExtNameSplit(path)
    if ext in "mp4 webm mkv flv".split(" "):
      cap = VideoCapture(path)
      (fps, count, width, height) = cv2VideoInfo(cap)
      print(f"{fps}fps*{count} {width}x{height}")

      mon = Montage(cfg, (width, height) )
      playCvMontage(cap, mon, filename=f"{name}_mon.avi", subtitle=let(readSrt, cfg.subtitle), placeholder=cfg.subtitle_placeholder)
      cap.release()
    else:
      image = Image.open(path)
      mon = Montage(cfg, image.size)
      mon.runOn(image).save(f"{name}_mon.png")
Esempio n. 10
0
def video_to_json(filename):
    file_full_path = input_vid_dir + filename
    start = clock()
    size = round(path.getsize(file_full_path) / 1024 / 1024, 2)
    video_pointer = VideoCapture(file_full_path)
    frame_count = int(VideoCapture.get(video_pointer, int(CAP_PROP_FRAME_COUNT)))
    width = int(VideoCapture.get(video_pointer, int(CAP_PROP_FRAME_WIDTH)))
    height = int(VideoCapture.get(video_pointer, int(CAP_PROP_FRAME_HEIGHT)))
    fps = int(VideoCapture.get(video_pointer, int(CAP_PROP_FPS)))
    success, image = video_pointer.read()
    video_hash = {}
    while success:
        frame_hash = average_hash(Image.fromarray(image))
        video_hash[str(frame_hash)] = filename
        success, image = video_pointer.read()
    stop = clock()
    time_taken = stop - start
    print("Time taken for ", file_full_path, " is : ", time_taken)
    data_dict = dict()
    data_dict['size'] = size
    data_dict['time_taken'] = time_taken
    data_dict['fps'] = fps
    data_dict['height'] = height
    data_dict['width'] = width
    data_dict['frame_count'] = frame_count
    data_dict['filename'] = filename
    data_dict['video_hash'] = video_hash
    write_to_json(filename, data_dict)
    return
Esempio n. 11
0
def draw_speed(video_path: str, speed_path: str, output_video: str) -> None:
    reader = VideoCapture(video_path)
    writer = VideoWriter(
        output_video,
        VideoWriter_fourcc(*"mp4v"),
        20,
        (640, 480),
    )
    data = loadtxt(speed_path, delimiter="\n", dtype="float32")

    frame_id = 0
    while reader.isOpened():
        ret, frame = reader.read()
        if not ret:
            break

        putText(
            frame,
            f"{data[frame_id]:0.3f}",
            (250, 420),
            FONT_HERSHEY_SIMPLEX,
            0.7,
            (255, 255, 255),
            2,
        )
        writer.write(frame)

        frame_id += 1
        if frame_id == data.shape[0]:
            break

    reader.release()
    writer.release()
Esempio n. 12
0
def main():
    """run the main loop"""
    # %%
    pygame.mixer.init()
    cam = VideoCapture(CAMERA_IDX)  # get camera handle

    detector = SlouchDetector(SLOUCH_THRESHOLD, do_store_imgs=STORE_IMGS)

    try:
        while True:
            img = _capture_img(cam)
            # an (color) image is just a Width x Height x NumChannels 'matrix',
            # really a rank-3 tensor
            # channels are Blue, Green, Red (CV2 ideasyncratic ordering...)
            # print( type(img),  img.shape )   # =>  <ndarray>  (480, 640, 3)
            # Convert into grayscale
            # an grayscale image is just a Width x Height x NumChanels matrix,
            # really a rank-2 tensor
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            # print(type(gray), gray.shape)   # =>  <ndarray>  (480, 640)
            detector.detect(gray)
            time.sleep(1)

    except Exception as exc:
        cam.release()  # close the webcam
        raise exc
Esempio n. 13
0
def captureImage(image_dir, number_of_images=3):
    img_files_count = len([
        name for name in os.listdir(image_dir)
        if os.path.isfile(os.path.join(image_dir, name))
    ])

    image_files = []
    for i in range(0, number_of_images):
        camera = VideoCapture(0)
        if not camera.isOpened():
            with open(LOGS_DIR + "error.log", "a") as f:
                f.write("[Error " +
                        str(datetime.now().strftime("%b %d, %Y %H:%M:%S")) +
                        "] Could not open video device\n")
        return_value, image = camera.read()
        camera.release()
        if not return_value:
            continue
        img_file = datetime.now().strftime(image_dir + "/image_%d%b%y-" +
                                           str(img_files_count) + ".jpg")
        imwrite(img_file, image)
        image_files.append(img_file)
        img_files_count += 1
        sleep(3)
    return image_files
def acquire_img(cap_indx: int = 1, duration: float = 60.0, frequency: float = 0.1, img_dir: str = './') -> None:
    from time import sleep
    from cv2 import VideoCapture, imwrite
    from os.path import join

    cap = VideoCapture(cap_indx)
    n = int(duration/frequency + 0.5)
    k = 1

    print(80 * '#')
    print('img_dir:', img_dir)
    print('create {}-th images'.format(n))
    print(80 * '#')

    for i in range(n):
        _, frame = cap.read()
        img_name = join(img_dir, 'img_{}.png'.format(i))
        imwrite(img_name, frame)
        sleep(frequency)

        if i >= 0.25*k*n:
            print("*done {}".format(i/n))
            k+=1

    # When everything done, release the capture
    # cap.release()
    # cv2.destroyAllWindows()
    return None
Esempio n. 15
0
    def __init__(self, video_path, max_queue=50):
        self.stream = VideoCapture(video_path)
        self.queue = Queue(maxsize=max_queue)
        self.stop_queue_updating = False  # stops loading images

        # Starting right away in separate thread
        Thread(target=self._update_queue, daemon=True).start()
Esempio n. 16
0
    def init_camera(self):
        if USE_SOCKETS:
            try:
                self.client_socket = socket.socket(socket.AF_INET,
                                                   socket.SOCK_STREAM)
                self.client_socket.connect((self.receiver_ip,
                                            self.receiver_port))
            except Exception as e:
                self.dbg("camera_error",
                         "Failed to connect to receiver: {}",
                         [e])
        else:
            self.dbg("camera",
                     "Not setting up sockets")
        try:
            # Create a VideoCapture object and read from input file
            self.camera = VideoCapture(self.camera_num)
        except Exception as e:
            self.dbg("camera_error", "Failed to open camera: {}", [e])

        # Check if camera opened successfully
        if (not self.camera.isOpened()):
            self.dbg("camera_error",
                     "Error opening camera #{}",
                     [self.camera_num])
Esempio n. 17
0
def get_hash(image, src=False):
    '''Return perceptual hash of image'''

    if src:

        try:
            image = Image.open(
                BytesIO(requests.get(image, headers=HEADERS).content))
        except:
            return None

    elif re.search('jp.*g|png|gif', image.suffix, re.IGNORECASE):

        try:
            image = Image.open(image)
        except:
            return None

    elif re.search('webm|mp4', image.suffix, re.IGNORECASE):

        try:
            video_capture = VideoCapture(str(image)).read()[-1]
            image = cvtColor(video_capture, COLOR_BGR2RGB)
            image = Image.fromarray(image)
        except:
            return None

    image.thumbnail([32, 32])
    image = image.convert('L')

    return f'{dhash(image)}'
Esempio n. 18
0
def playVideo():
    excesTime = 0
    cap = VideoCapture('./badapple.mp4')
    res, frame = cap.read()
    
    while res:
        startTime = time.time()
        frame = resize(frame,(90,45))
        height, width, channels = frame.shape
        
        output = ""

        for x in range (0,45):
            for y in range(0,90):
                color = frame[x,y]
                if color[0] > 240 and color[1] > 240 and color[2] > 240:
                    output += u"\u2588"
                else:
                    output += " "
            if x != 62:
                output += "\n"
        system('cls')
        print(output)
        res, frame = cap.read()
        if 1/FRAMERATE - (time.time() - startTime) - excesTime > 0:
            time.sleep(1/FRAMERATE - (time.time() - startTime) - excesTime)
            excesTime -= 1/FRAMERATE - (time.time() - startTime)
        else:
            time.sleep(0)
            excesTime -= 1/FRAMERATE - (time.time() - startTime)
Esempio n. 19
0
 def __init__(self, device_no=0):
     global imwrite
     from cv2 import VideoCapture, imwrite
     from time import sleep
     super(PyCamera, self).__init__()
     self._video_capture = VideoCapture(device_no)
     self._validate_module()
 def __init__(self, window):
     super(Capture, self).__init__(window)
     self.window = window
     self.capturing = False
     self.cam = VideoCapture(0)
     self.cam.set(3, 640)
     self.cam.set(4, 480)
Esempio n. 21
0
    def video2imgs(self, video, size):

        from cv2 import VideoCapture
        from cv2 import cvtColor, resize
        from cv2 import COLOR_BGR2GRAY
        from cv2 import INTER_AREA

        img_list = []

        # 从指定文件创建一个VideoCapture对象
        cap = VideoCapture(video)

        # 如果cap对象已经初始化完成了,就返回true,换句话说这是一个 while true 循环
        while cap.isOpened():
            # cap.read() 返回值介绍:
            #   ret 表示是否读取到图像
            #   frame 为图像矩阵,类型为 numpy.ndarry.
            ret, frame = cap.read()
            if ret:
                # 转换成灰度图,也可不做这一步,转换成彩色字符视频。
                gray = cvtColor(frame, COLOR_BGR2GRAY)

                # resize 图片,保证图片转换成字符画后,能完整地在命令行中显示。
                img = resize(gray, size, interpolation=INTER_AREA)

                # 分帧保存转换结果
                img_list.append(img)
            else:
                break

        # 结束后释放空间
        cap.release()

        return img_list
Esempio n. 22
0
def extractFrames(frames, src, dst):
    reader = VideoCapture(src)

    frame_num = 1

    while reader.isOpened():
        running, frame = reader.read()
        if not running:
            break
        if frame_num > frames:
            break

        #Extract face, with 25 pixels margin
        loc = face_recognition.face_locations(frame)

        if (len(loc) == 0):
            face = frame
        else:
            loc = sorted(
                loc,
                key=cmp_to_key(lambda x, y: (y[2] - y[0]) * (y[1] - y[3]) -
                               (x[2] - x[0]) * (x[1] - x[3])))
            face = frame[loc[0][0] - 25:loc[0][2] + 25,
                         loc[0][3] - 25:loc[0][1] + 25]

        face = cv2.resize(face, (299, 299))
        imwrite(join(dst, '%d.jpg' % frame_num), face)
        frame_num += 1

    reader.release()
Esempio n. 23
0
def getMP4Length(x):
    cap = VideoCapture(x)
    fps = cap.get(CAP_PROP_FPS)  # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
    frame_count = int(cap.get(CAP_PROP_FRAME_COUNT))
    cap.release()
    duration = frame_count / fps
    return duration
Esempio n. 24
0
 def __init__(self, index: int = 0, log_handlers: [StreamHandler] = None):
     self._logger = getLogger(__name__)
     if log_handlers:
         for h in log_handlers:
             self._logger.addHandler(h)
     self._logger.debug("Initializing")
     self.index = index
     self._tracker = FPSTracker()
     self._internal_frame_q = InternalFrameQueue()
     self._running = TEvent()
     self._running.clear()
     self._lock = Lock()
     self._closing_flag = Event()
     self._timeout_limit = 0
     self._stream = VideoCapture(index, defs.cap_backend)
     self.set_resolution(self.get_resolution())
     self._fps_test_status = 0
     self._fps_target = 30
     self._spf_target = 1 / self._fps_target
     self._buffer = 5
     self._use_limiter = False
     self._finalized = False
     self._end = TEvent()
     self._end.clear()
     self._err_event = Event()
     self._num_frms = 0
     self._start = time()
     self._loop = get_event_loop()
     self._logger.debug("Initialized")
    def __init__(self, video_file_name):
        VideoReaderWrapper.__init__(self)

        self.video_file_name = video_file_name
        self.basename_video_file_name = os.path.basename(video_file_name)

        # Create a VideoCapture object and read from input file
        # If the input is the camera, pass 0 instead of the video file name
        self.video_capture = VideoCapture(self.video_file_name)

        if self.video_capture.isOpened() == False:
            raise Exception(f"Error opening video file {self.video_file_name}")

        # length in frames
        self._length = int(self.video_capture.get(cv2.CAP_PROP_FRAME_COUNT))

        # in pixels
        self._width = int(self.video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
        self._height = int(self.video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))

        # frame per seconds
        self._fps = self.video_capture.get(cv2.CAP_PROP_FPS)

        print(
            f"OpenCvVideoReader init for {self.basename_video_file_name}: "
            f"self.width {self.width}, self.height {self.height}, n frames {self._length}"
        )
Esempio n. 26
0
def videoconvert(inp):
    capture = VideoCapture(inp)
    inp_ext = inp.split(".")
    fpsin = capture.get(CAP_PROP_FPS)
    count = 0
    success = 1
    while success:
        success, image = capture.read()
        if (success == False and image == None):
            pass
        else:
            imwrite("zzimg%d.jpg" % count, image)
            count += 1
    outfile = inp_ext[0] + '_output.mp4'
    fourcc = VideoWriter_fourcc(*'DIVX')
    fpsout = fpsin
    img = imread("zzimg0.jpg")
    height, width, layers = img.shape
    size = (width, height)
    out = VideoWriter(outfile, fourcc, fpsout, size, 0)
    for i in range(count):
        img = imread("zzimg%d.jpg" % i, 0)
        out.write(img)
    print(
        "Video Converted to Grayscale, Please check the folder for the output file: ",
        outfile)
    out.release()
    capture.release()

    return outfile
Esempio n. 27
0
	def set_video(self):
		try:
			self.playingvideo = True
			if self.DoingHW == True:
				self.bg.right_port.config(bg=HP.TRANS_CLR_ALT())
			else:
				self.bg.right_port.config(bg=HP.TRANS_CLR())
			if self.vs == None:
				self.vs = VideoCapture(self.video_filename) # capture video frames, 0 is your default video camera
				self.current_image = None	# current image from the camera
			ok, frame = self.vs.read()		# read frame from video stream
			if ok:
				cv2image = cvtColor(frame, COLOR_BGR2RGBA)		# convert colors from BGR to RGBA
				self.current_image = Image.fromarray(cv2image)  # convert image for PIL
				self.imgtk = ImageTk.PhotoImage(image=self.current_image)  # convert image for tkinter 
				self.bg.itemconfig(self.bg.fg, image=self.imgtk)
				del frame	# memory leak prevention
				del cv2image
			else:
				self.clear_screen()
				try:
					del self.current_image	# memory leak prevention
					del self.imgtk
				except AttributeError as e:
					print('Video not found. Is it spelled and formatted correctly, and in the right place? This is where I think it is')
					print(path.abspath(self.video_filename))
				self.vs.release()
				self.vs = None
				self.playingvideo = False
		except Exception as e:
			HP.HandleError(format_exc(2), e, 'set_video', subj='')
Esempio n. 28
0
    def add(connection_string):
        if StreamService.streams.get(connection_string):
            return

        cap = VideoCapture(connection_string)
        stream = Stream(video_capture=cap, connection_string=connection_string)
        StreamService.streams[connection_string] = stream
Esempio n. 29
0
    def __init__(self,
                 image_path="images/face_irad.jpg",
                 audio_path="audio/alarm.wav",
                 wait_sec=10,
                 apps=[
                     "Microsoft Word", "Microsoft Excel", "Spotify",
                     "AdobeAcrobat", "Finder"
                 ],
                 is_gui=True):
        super(Gannenet, self).__init__()
        self.is_gui = is_gui
        self.image_path, self.audio_path, self.wait_sec, self.apps = image_path, audio_path, wait_sec, apps
        if not self.is_gui:
            print("--- Welcome to Get Back To Work! ---")
            print("---  Seriously, Get Back to Work ---")
            print("---     Press Ctrl+C to quit     ---")
            print(
                f"--- Parameters: [Image Path: {self.image_path}, Audio Path: {self.audio_path}, Wait seconds: {self.wait_sec}, self.apps: {self.apps}] ---\n"
            )
        # Get a reference to webcam #0 (the default one)
        self.video_capture = VideoCapture(0)

        # Load pictures and learn how to recognize it.
        self.my_image = load_image_file(self.image_path)
        self.my_face_encoding = face_encodings(self.my_image)[0]

        # Create arrays of known face encodings and their names
        self.known_face_encodings = [self.my_face_encoding]
        self.known_face_names = ["myself"]

        if self.audio_path != "skip":
            self.playing = False
            self.player = audio.Player(self.audio_path)
Esempio n. 30
0
    def run_x_seconds(connection_string, fps=1, seconds=30, detect=False):
        stream = StreamService.streams[connection_string]
        stream.stop_time = StreamService.stop_time(seconds)

        if stream.active:
            return

        if not stream.video_capture.isOpened():
            stream.video_capture = VideoCapture(stream.connection_string)

        stream.active = True
        prev = 0
        while datetime.utcnow() < stream.stop_time:
            time_elapsed = time() - prev
            _, frame = stream.video_capture.read()

            if time_elapsed > 1.0 / fps:
                prev = time()

                frame = resize(frame, (320, 320))
                _, buffer = imencode('.jpg', frame)
                jpg_as_text = b64encode(buffer).decode('utf8')

                resp = dict(image=jpg_as_text)
                if detect:
                    resp['predicted'] = detect_stream.DetectorService.predict(
                        frame)
                    print(resp)

                stream.last_response = resp

        stream.active = False
        stream.video_capture.release()