示例#1
0
class OverlayBackground():
    """
    Provides the background image for the overlay
    window.
    """
    def __init__(self, config):
        """
        Initialises and configures class to provide a background image.
        Image can be a WEISS logo, a blank image, or an image from an
        OpenCV video source.
        :param: A configuration dictionary
        :raises: RunTimeError, KeyError
        """
        self._video_loop_buffer = []
        self._logo_maker = None
        self._blank_image = None
        if "source" in config:
            self.source = VideoCapture(config.get("source"))
            if not self.source.isOpened():
                raise RuntimeError("Failed to open Video camera:" +
                                   str(config.get("source")))

            self.source_name = config.get("source")

            if "loop" in config:
                if config.get("loop"):
                    video_buffer = []
                    ret, image = self.source.read()
                    while ret:
                        video_buffer.append(image)
                        ret, image = self.source.read()

                    self._video_loop_buffer = cycle(video_buffer)
        else:
            if config.get("blank") or config.get("logo"):
                if config.get("logo"):
                    self._logo_maker = WeissLogo()
                else:
                    self._blank_image = zeros(shape=[512, 512, 3], dtype=uint8)
            else:
                raise KeyError("Configuration must contain a" +
                               "video source, blank, or logo")

    def next_image(self):
        """
        Returns a background image.
        The behaviour is determined by the configuration
        dictionary used at init.
        """
        if self._video_loop_buffer:
            image = next(self._video_loop_buffer)
        else:
            if self._logo_maker is not None:
                image = self._logo_maker.get_noisy_logo()
            else:
                if self._blank_image is not None:
                    image = self._blank_image
                else:
                    _, image = self.source.read()
        return image
示例#2
0
class WebcamVideoStream:
    def __init__(self, src=0, time=0.01, name="WebcamVideoStream"):
        self.stream = VideoCapture(src)
        (self.grabbed, self.frame) = self.stream.read()
        self.name = name
        self.stopped = False
        self.time = time

    def start(self):
        t = Thread(target=self.update, name=self.name, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        while True:
            if self.stopped:
                return
            sleep(self.time)
            (self.grabbed, self.frame) = self.stream.read()

    def read(self):
        return self.frame

    def stop(self):
        self.stopped = True
示例#3
0
    def solveFrameDifferences(self, cap: VideoCapture, crop: List[Rect],
                              fold) -> Iterator[Frame]:
        require(cap.isOpened(), "failed to open capture")
        postprocess = lambda mat, index: self.postprocessUMat(
            self.cropUMat(mat, crop, index), index)
        if self.is_crop_debug:
            cv2NormalWin(ExtractSubtitles.WIN_LAST_IMAGE)
            cv2NormalWin(ExtractSubtitles.WIN_LAST_FRAME)
        reducer = fold(cap, crop)

        index = 0
        prev_frame, curr_frame = None, None
        unfinished, img = cap.read()
        prev_frame = postprocess(img, 0)  #< initial (prev == curr)
        while unfinished:
            curr_frame = postprocess(img, index)
            if self.is_crop_debug:
                cv2.imshow(ExtractSubtitles.WIN_LAST_IMAGE, img)
                cv2.imshow(ExtractSubtitles.WIN_LAST_FRAME,
                           curr_frame)  #< must have single title, to animate
                if cv2WaitKey() == 'q': break
            if curr_frame is not None:  # and prev_frame is not None
                try:
                    diff = cv2.absdiff(curr_frame,
                                       prev_frame)  #< main algorithm goes here
                    yield Frame(index, curr_frame, np.sum(diff))
                except cv2.error:
                    pass
            prev_frame = curr_frame
            unfinished, img = cap.read()
            index = index + 1
            reducer.accept(index)
        reducer.finish()
示例#4
0
def playVideo():
    excesTime = 0
    cap = VideoCapture('./badapple.mp4')
    res, frame = cap.read()
    
    while res:
        startTime = time.time()
        frame = resize(frame,(90,45))
        height, width, channels = frame.shape
        
        output = ""

        for x in range (0,45):
            for y in range(0,90):
                color = frame[x,y]
                if color[0] > 240 and color[1] > 240 and color[2] > 240:
                    output += u"\u2588"
                else:
                    output += " "
            if x != 62:
                output += "\n"
        system('cls')
        print(output)
        res, frame = cap.read()
        if 1/FRAMERATE - (time.time() - startTime) - excesTime > 0:
            time.sleep(1/FRAMERATE - (time.time() - startTime) - excesTime)
            excesTime -= 1/FRAMERATE - (time.time() - startTime)
        else:
            time.sleep(0)
            excesTime -= 1/FRAMERATE - (time.time() - startTime)
示例#5
0
def Extract_Frames(source, fps=1, dest=None):
    '''Extracts frames from a given source animation, with optional fps and destination'''
    
    from Webscraping import USER
    from pathlib import Path
    from cv2 import VideoCapture, imencode, CAP_PROP_POS_FRAMES
    
    path = Path(source)
    
    if dest is None:
        dest = USER / 'Pictures' / 'Screenshots' / path.stem
        
    if dest.exists():
        for file in dest.iterdir():
            file.unlink()
        
    dest.mkdir(exist_ok=1)
        
    vidcap = VideoCapture(source)
    success, frame = vidcap.read()

    while success:
        
        if ((vidcap.get(CAP_PROP_POS_FRAMES) % fps) - 1) in (-1, 0):
            
            image = dest / f'{vidcap.get(CAP_PROP_POS_FRAMES)}.jpg'
            image.write_bytes(imencode('.jpg', frame)[-1])
        
        success, frame = vidcap.read()
        
    else: vidcap.release()
示例#6
0
class WebcamVideoStream:
    def __init__(self, config: configparser.ConfigParser):
        self.stream = VideoCapture(config.getint('frame', 'input_slot'))
        self.stream.set(CAP_PROP_FRAME_WIDTH, int(config['frame']['width']))
        self.stream.set(CAP_PROP_FRAME_HEIGHT, int(config['frame']['height']))

        (self.grabbed, self.frame) = self.stream.read()

        self.stopped = False

    def start(self):
        """
        Start the thread to read frames from the video stream
        :return:
        """
        Thread(target=self.update, args=()).start()
        return self

    def update(self):
        # Infinite loop until th thread is stopped
        while not self.stopped:
            (self.grabbed, self.frame) = self.stream.read()

    def read(self):
        return self.frame

    def stop(self):
        self.stopped = True
示例#7
0
def get_tags(driver, path, filter=False):

    tags = set()
    frames = []
    video = path.suffix in ('.gif', '.webm', '.mp4')

    if video:

        tags.add('animated')
        if path.suffix in ('.webm', '.mp4'):
            try:
                for stream in FFProbe(str(path)).streams:
                    if stream.codec_type == 'audio':
                        tags.add('audio')
                        break
            except:
                pass
        temp_dir = tempfile.TemporaryDirectory()
        vidcap = VideoCapture(str(path))
        success, frame = vidcap.read()

        while success:

            temp = ROOT.parent / temp_dir.name / f'{next(tempfile._get_candidate_names())}.jpg'
            temp.write_bytes(imencode('.jpg', frame)[-1])
            frames.append(temp)
            success, frame = vidcap.read()

        else:
            step = 90 * log((len(frames) * .002) + 1) + 1
            frames = frames[::round(step)]

    else:
        frames.append(path)

    for frame in frames:

        driver.get('http://dev.kanotype.net:8003/deepdanbooru/')
        driver.find('//*[@id="exampleFormControlFile1"]', str(frame))
        driver.find('//button[@type="submit"]', click=True)

        for _ in range(4):
            html = bs4.BeautifulSoup(driver.page_source(), 'lxml')
            try:
                tags.update([
                    tag.text for tag in html.find('tbody').findAll(href=True)
                ])
                break
            except AttributeError:
                if driver.current_url().endswith('deepdanbooru/'):
                    driver.find('//*[@id="exampleFormControlFile1"]',
                                str(frame))
                    driver.find('//button[@type="submit"]', click=True)
                driver.refresh()

    else:
        if video: temp_dir.cleanup()
        if filter: tags.difference_update(REMOVE)

    return ' '.join(tags)
示例#8
0
class Processer:
    def __init__(self, input_file, output_file, FPS=30, max_count=-1):
        self.vidcap = VideoCapture(input_file)
        success, self.image = self.vidcap.read()
        self.width = int(self.vidcap.get(4))
        self.height = int(self.vidcap.get(3))
        fourcc = VideoWriter_fourcc(*'XVID')
        self.videoWriter = VideoWriter(output_file, fourcc, float(FPS),
                                       (self.height, self.width))
        self.max_count = max_count

    def run(self, proc_func, end_proc=None):
        count = 0
        success = True
        while success and (count < self.max_count or self.max_count == -1):
            result = proc_func(self.image, count, self.width, self.height)
            self.videoWriter.write(result)
            success, self.image = self.vidcap.read()
            print('Count:', count)
            count += 1

        if end_proc is not None:
            end_proc(self.videoWriter)
        self.videoWriter.release()
        self.vidcap.release()
        print('Total count:', count)
示例#9
0
def video_to_json(filename):
    file_full_path = input_vid_dir + filename
    start = clock()
    size = round(path.getsize(file_full_path) / 1024 / 1024, 2)
    video_pointer = VideoCapture(file_full_path)
    frame_count = int(VideoCapture.get(video_pointer, int(CAP_PROP_FRAME_COUNT)))
    width = int(VideoCapture.get(video_pointer, int(CAP_PROP_FRAME_WIDTH)))
    height = int(VideoCapture.get(video_pointer, int(CAP_PROP_FRAME_HEIGHT)))
    fps = int(VideoCapture.get(video_pointer, int(CAP_PROP_FPS)))
    success, image = video_pointer.read()
    video_hash = {}
    while success:
        frame_hash = average_hash(Image.fromarray(image))
        video_hash[str(frame_hash)] = filename
        success, image = video_pointer.read()
    stop = clock()
    time_taken = stop - start
    print("Time taken for ", file_full_path, " is : ", time_taken)
    data_dict = dict()
    data_dict['size'] = size
    data_dict['time_taken'] = time_taken
    data_dict['fps'] = fps
    data_dict['height'] = height
    data_dict['width'] = width
    data_dict['frame_count'] = frame_count
    data_dict['filename'] = filename
    data_dict['video_hash'] = video_hash
    write_to_json(filename, data_dict)
    return
示例#10
0
def solveFrameDifferences(cap: cv2.VideoCapture, on_frame = lambda x: ()) -> Tuple[list, list]:
  frames, frame_diffs = [], []

  index = 0
  prev_frame, curr_frame = None, None

  unfinished, img = cap.read()
  prev_frame = img #< initial (prev == curr)
  cv2NormalWin(WIN_LAST_FRAME)
  (n_frame, _, _, _) = cv2VideoProps(cap)
  progress = ProgressBar(maxval=n_frame).start()
  while unfinished:
    curr_frame = cv2.cvtColor(img, cv2.COLOR_BGR2LUV) #luv
    if curr_frame is not None: # and prev_frame is not None
      diff = cv2.absdiff(curr_frame, prev_frame) #< main logic goes here
      count = np.sum(diff)
      frame_diffs.append(count)
      frame = Frame(index, img, count)
      #frames.append(frame)
    on_frame(curr_frame)
    prev_frame = curr_frame
    index = index + 1
    progress.update(index)
    unfinished, img = cap.read()
    if app_cfg.crop_debug:
      cv2.imshow(WIN_LAST_FRAME, prev_frame) #< must have single name, to animate
      if cv2WaitKey('q'): break
  progress.finish()
  return (frames, frame_diffs)
示例#11
0
 def camera(camflag, s4):
     global vloop
     #global camflag
     try:
         caps = VideoCapture(camflag)
         _, fram = caps.read()
     except:
         s4.sendall(str.encode(b'cambusy'))
         print('cam busy')
         vloop = False
         return 0
     caps.release()
     width, height, _ = fram.shape
     caps = VideoCapture(camflag)
     s4.sendall(str.encode(str(width) + ':' + str(height)))
     while vloop:
         ret, frame = caps.read()
         a = gzip.compress(pickle.dumps(frame), 9)
         s4.sendall(str.encode(str(len(a))))
         while a:
             chk = a[:3072]
             s4.sendall(chk)
             a = a[3072:]
         time.sleep(0.4)
     caps.release()
     s4.close()
     del s4
示例#12
0
def save_manual(src_cam: cv.VideoCapture,
                dst_cam: cv.VideoCapture,
                out_dir='.'):
    c = 0
    while True:
        # read images from both cameras
        is_ok, src_image = src_cam.read()
        if not is_ok:
            continue

        is_ok, dst_image = dst_cam.read()
        if not is_ok:
            continue

        # resize
        if src_image.shape[0] != 480 or src_image.shape[1] != 640:
            src_image = cv.resize(src_image, (640, 480))
        if dst_image.shape[0] != 480 or dst_image.shape[1] != 640:
            dst_image = cv.resize(dst_image, (640, 480))

        cv.imshow("src", src_image)
        cv.imshow("dst", dst_image)

        key = cv.waitKey(10)
        if key == 27:
            break
        elif key == 32:
            save_image(src_image, dst_image, os.path.join(out_dir, str(c)))
            c += 1

    cv.destroyAllWindows()
示例#13
0
class WebCamStream(object):
    """
    A class that handles threaded video streaming through a USB webcam.
    based on imutils library for python: https://github.com/jrosebr1/imutils

    """

    def __init__(self, src=0):
        """
        Initialize an object that controls a video
        stream from a USB Webcam on a separate thread.
        Args:
          src: the source for the USB webcam, 0 is the default camera
        """
        self.src = src
        self.stream = VideoCapture(src)  # initialize video source
        self.grabbed, self.frame = self.stream.read()  # grab initial frame
        self.stopped = False
        self.has_frame = Event()

    def start(self):
        """
        Start the video stream on a separate daemon thread once the
        capturing device is opened.
        """
        while not self.stream.isOpened():  # wait for I/O to come online
            sleep(8)  # 8 seconds = 1 successful re-open attempt
            self.stream.open(self.src)  # attempt to open again
        thread = Thread(target=self.update, args=())
        thread.daemon = True  # set thread to daemon
        thread.start()  # start thread

    def update(self):
        """
        Continuously update the stream with the most recent frame
        until stopped.
        """
        while not self.stopped:
            self.grabbed, self.frame = self.stream.read()
            if self.grabbed:
                self.has_frame.set()  # notify

    def read(self):
        """
        Read the current frame in the video stream.
        Returns:
          The most recent frame captured
        """
        self.has_frame.wait()
        frame = self.frame
        self.has_frame.clear()
        return frame

    def stop(self):
        """
        Stop the video stream.
        """
        self.stopped = True
        self.stream.release()  # close capturing device
def getFrame(vid):
    print('Please wait...')
    vidcap = VideoCapture(vid)
    success, image = vidcap.read()
    count = 0
    while success:
        imwrite("frames\\frame %d.jpg" % count, image)
        success, image = vidcap.read()
        count += 1
示例#15
0
def video_jump(video_cap: cv2.VideoCapture, frame_id: int):
    # IMPORTANT:
    # - frame is a range actually
    # - frame 1 's timestamp is the beginning of this frame
    #
    # video_jump(cap, 2) means: moving the pointer to the start point of frame 2 => the end point of frame 1

    # another -1 for re-read
    video_cap.set(cv2.CAP_PROP_POS_FRAMES, frame_id - 1 - 1)
    video_cap.read()
示例#16
0
def play(window):
	vidcap = VideoCapture(argv[1])
	success, image = vidcap.read()
	while success:
		for l in I2T(BytesIO(imencode(".jpg", resize(image, (144, 108), interpolation = 3))[1])).split("\n"):
			# time.sleep(x) # Use this to change the fps
			window.addstr(l + "\n")
			window.refresh()
		window.move(0, 0)
		success, image = vidcap.read()
示例#17
0
def get_frames(datum_path):
    video_capture = VideoCapture(datum_path)

    frames = []
    success, frame = video_capture.read()
    while success:
        frames.append(frame)
        success, frame = video_capture.read()

    return frames
class image:
    def __init__(self):
        self.camera = VideoCapture(0)

    def capture(self):
        for i in range(20):
            self.retval,self.im = self.camera.read()
        self.retval,self.im = self.camera.read()
        imwrite("E:/Project/known_people/temp.jpg",self.im)
        del(self.camera)
def selectCropRects(
        cap: VideoCapture,
        title="Video (c:OK; q:finished, <:-; >:=)",
        title_preview="Preview",
        d_seek=5) -> Iterator[Tuple[int, Tuple[int, int, int, int]]]:
    ''' position&size (x, y, w, h) '''
    index = 0
    ltrd = None

    def seek(n):
        nonlocal index
        index += n
        cap.set(CAP_PROP_POS_FRAMES, index)

    def handleSeek(key):
        if key == '-': seek(-d_seek)
        elif key == '=': seek(+d_seek)

    def handleSelect():
        nonlocal ltrd
        area = guiSelectionUMat(img)
        if area != None:
            ltrd = area
            return (index, rectLtrd2Xywh(*ltrd))

    frame_ops = [lambda: seek(-1), lambda: seek(+1)]

    unfinished, img = cap.read()
    while unfinished:
        imshow(title, img)
        if ltrd != None: imshow(title_preview, cv2Crop(img, ltrd))
        key = cv2WaitKey()
        if key == 'c':
            select = handleSelect()
            if select != None: yield select
        elif key == 'q': break

        elif key in '-=': handleSeek(key)
        elif key == ' ':
            while True:
                key1 = cv2WaitKey(0)
                if key1 == ' ': break
                elif key1 == 'c':
                    select = handleSelect()
                    if select != None: yield select
                elif key1 in "-=89":
                    handleSeek(key1)
                    miniseek = ord(key1) - ord('8')  #[89] to mimiseek
                    if miniseek in range(len(frame_ops)): frame_ops[miniseek]()
                    unfinished, img = cap.read()
                    imshow(title, img)
        unfinished, img = cap.read()
        index += 1
示例#20
0
def VideoStream(video: Video) -> Generator[VideoFrame, None, None]:
    from cv2 import VideoCapture
    from numpy import ndarray

    success: bool
    img: ndarray

    cap = VideoCapture(video.filename)
    success, img = cap.read()
    while success:
        yield VideoFrame(img)
        success, img = cap.read()
示例#21
0
def get_frames(video, outpath):
    print(video)
    vidcap = VideoCapture(video)
    success, image = vidcap.read()
    count = 0
    print(success)
    while success:
        # save frame as JPEG file
        imwrite(outpath + "_frame%d.jpg" % count, image)
        success, image = vidcap.read()
        print('Read a new frame: ', success)
        success = True
        count += 1
示例#22
0
def play(window):
	try: from sys import _MEIPASS
	except ImportError::
		from os.path import abspath
		_MEIPASS = abspath(".")
	vidcap = VideoCapture(_MEIPASS + "/video.mp4")
	success, image = vidcap.read()
	while success:
		for l in I2T(BytesIO(imencode(".jpg", resize(image, (144, 108), interpolation = 3))[1])).split("\n"):
			window.addstr(l + "\n")
			window.refresh()
		window.move(0, 0)
		success, image = vidcap.read()
示例#23
0
def test_type_read_times(cap: cv2.VideoCapture, cam_index: int, num_reads: int,
                         show_values: bool):
    # Read camera num_frames times and capture time taken per read.
    times = []
    prev = time()
    for i in range(num_reads):
        cap.read()
        now = time()
        times.append(now - prev)
        prev = now

    # Sort times taken low to high.
    times.sort()

    # Show basic output.
    to_print = "Test type: read time" + \
               "\n- Camera: " + str(cam_index) + \
               "\n- Mean time to read frame: " + str(sum(times) / len(times)) + \
               "\n- Median time to read frames: " + str(median(times)) + \
               "\n- Range of times to read frames: " + str(min(times)) + " to " + str(max(times))
    print(to_print)

    # Create dictionary of intervals of 0.01 seconds.
    intervals = dict()
    i = 0
    while i <= times[-1]:
        i += .01
        intervals[round(i, 3)] = list()

    # Sort times taken into dictionary.
    next_time = .01
    next_time = round(next_time, 3)
    for t in times:
        while t > next_time:
            next_time += .01
            next_time = round(next_time, 3)
        intervals[next_time].append(t)

    # Output results.
    prev_key = 0
    for key in intervals:
        if len(intervals[key]) > 0:
            print("- Number of read times between {:.2f}".format(prev_key) +
                  " and {:.2f}".format(key) + " seconds:" +
                  str(len(intervals[key])) +
                  ". Percentage in this range: {:.1f}".format(
                      len(intervals[key]) / num_reads * 100) + "%")
            if show_values:
                print(intervals[key])
        prev_key = key
    print("\n")
示例#24
0
def iter_read_raw_video_images(video_capture: cv2.VideoCapture,
                               repeat: bool = False) -> Iterable[ImageArray]:
    while True:
        grabbed, image_array = video_capture.read()
        if not grabbed:
            LOGGER.info('video end reached')
            if not repeat:
                return
            video_capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
            grabbed, image_array = video_capture.read()
            if not grabbed:
                LOGGER.info('unable to rewind video')
                return
        yield image_array
示例#25
0
def capture_background_image(capturer: cv2.VideoCapture,
                             dimensions=(64, 64),
                             skip=15) -> np.ndarray:
    """
    Captures and returns a single, still image from the webcam to use as the background.
    Use this image as the initial training point for a cv2.BackgroundSubtractorMOG2 object.
    `skip` is the number of frames to skip before capturing the image, to allow for pre-focusing.
    """
    for i in range(skip):
        capturer.read()
    ret, frame = capturer.read()
    image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # image = cv2.resize(image, dsize=dimensions)
    return image
示例#26
0
def verify_video(vid_cap: cv2.VideoCapture) -> bool:
    success, image = vid_cap.read()
    while success:
        success, image = vid_cap.read()

        # If the video ended return false.
        if not success:
            vid_cap.release()
            return False

        # Return true if the frame contains Rick
        if verify_image(image):
            vid_cap.release()
            return True
def run_video(process_frame=lambda im: im, fps=30, camera_num=0):
    """
    Runs OpenCV video from a connected camera as Jupyter notebook output. Each frame from the camera
    is given to process_frame before being displayed. The default does no processing. The display is
    limited to the given number of frames per second (default 30). It can go below this, but will
    bot go above it. If there is more than one camera connected, settings camera_num will select
    which camera to use.
    
    The video will continue being run until the code is interuppted with the stop button in Jupyter
    notebook.
    """
    from cv2 import VideoCapture, error, imencode
    from IPython.display import Image, display
    from time import time, sleep
    delay = 1 / fps  # the max number of seconds between frames
    vc = VideoCapture(camera_num)
    try:
        if not vc.isOpened():
            return  # if we did not successfully gain access to the camera
        # Try to get the first frame
        is_capturing, frame = vc.read()
        if frame is None: return  # no first frame
        # Process the frame, encode it as PNG, and display it
        im = process_frame(bgr2rgb(frame))
        disp = display(Image(imencode('.jpg', bgr2rgb(im))[1].tostring(),
                             width=im.shape[1],
                             height=im.shape[0]),
                       display_id=True)
        while is_capturing:
            # Keep getting new frames while they are available
            try:
                start = time()
                # Get the next frame
                is_capturing, frame = vc.read()
                if frame is None: break  # no next frame
                # Process the frame, encode it as PNG, and display it
                im = process_frame(bgr2rgb(frame))
                disp.update(
                    Image(imencode('.jpg', bgr2rgb(im))[1].tostring(),
                          width=im.shape[1],
                          height=im.shape[0]))
                # Wait for a small amount of time to avoid frame rate going to high
                wait = delay - (time() - start)
                if wait > 0: sleep(wait)
            except KeyboardInterrupt:
                break  # lookout for a keyboard interrupt (stop button) to stop the script gracefully
        return im
    finally:
        vc.release()
    return None
示例#28
0
def test_type_fps(cap: cv2.VideoCapture, cam_index: int, num_reads: int):
    # Get time taken to read camera num_frames times with no other operations.
    tracker = FPSTracker()
    s = time()
    for i in range(num_reads):
        cap.read()
        tracker.update_fps(datetime.now())
    elapsed = time() - s

    # Print results.
    print("Test type: fps"
          "\n- Camera:", cam_index, "\n- Num_frames:", num_reads,
          "\n- Time_taken:", elapsed, "\n- tester says fps:",
          num_reads / elapsed, " and tracker says:", tracker.get_fps())
    print("\n")
示例#29
0
class CaptureSource:
    def __init__(self):
        self._video = None
        self._image = None

    def get_resolution(self):
        if (self._video):
            return (int(self._video.get(CAP_PROP_FRAME_WIDTH)),
                    int(self._video.get(CAP_PROP_FRAME_HEIGHT)))

    def camera(self, num_cams):
        cam = Cameras()
        cam.check_cameras(num_cams)
        self._video = cam.show_and_select_camera()

    def video(self, filename):
        self._video = VideoCapture(filename)

    def image(self, filename):
        self._image = filename

    def get_frame(self):
        if self._video:
            retval, frame = self._video.read()
            return retval, frame
        return True, imread(self._image)
示例#30
0
def captureCamera(camera: cv2.VideoCapture,
                  tag: str = None,
                  resolution: list = None):
    resolution = [
        512, 512
    ] if resolution is None else resolution  # Параметр по умолчанию
    frame = np.zeros((512, 512, 3), np.uint8)  # Пустой кадр
    DATA[tag] = [True, "", "", frame]  # Начальные результаты камеры
    CAMERA_SETTINGS[tag] = {"HSV": False}  # Дефолтные настройки камеры
    yield None  # Конец инициализации генератора самеры

    if argv.debug and argv.show:  #
        cv2.namedWindow(tag)
        cv2.setMouseCallback(tag, mouseCallback, param=tag)

    while camera.isOpened():
        if DATA[tag][0]:
            flag, frame = camera.read()

            if flag:
                DATA[tag] = [
                    True,
                    getLetterFromFrame(frame),
                    getColorFromFrame(frame), frame
                ]

        if argv.show:
            frame = cv2.resize(frame, CAMERA_SIZE)
            cv2.resizeWindow(tag, *CAMERA_SIZE)
            if argv.debug and CAMERA_SETTINGS[tag]["HSV"]:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
                DATA[tag][3] = frame
            cv2.imshow(tag, frame)

        yield None
示例#31
0
def video_player(vc: cv2.VideoCapture, frame_title: str, skip_frames: int,
                 draw: callable([int, object])):
    width, height, frameCount, fps = video_properties(vc)
    duration = frameCount / fps
    minutes = int(duration / 60)
    seconds = int(duration % 60)
    spf = 1 / fps
    mspf = int(spf * 1000)
    print('Duration = ' + str(minutes) + ':' + str(seconds) + ' (' +
          str(duration) + ' seconds)')
    frame_id = 0
    while vc.isOpened():
        # Capture frame-by-frame
        ret, frame = vc.read()
        if ret:
            frame_id = frame_id + 1
            if frame_id <= skip_frames:
                continue

            video_drawer(frame_id, frame)
            draw(frame_id, frame)

            # Display the resulting frame
            cv2.imshow(frame_title, frame)
            # Press Q on keyboard to  exit
            if cv2.waitKey(mspf) & 0xFF == ord('q'):
                break
        # Break the loop
        else:
            break

    # Closes all the frames
    cv2.destroyAllWindows()
    return
示例#32
0
def start():
    '''
    
    '''
    #Load splash screen
    splScr = splash()
    found = []
    #find connected cameras        
    for num in range(10):
        cam = VideoCapture(num)
        cam.open
        #show progress bar 'movement' while the main program find cameras
        splScr.update()
        if not cam.read()[0]:
            del(cam)
        else:
            cam.release()
            found.append(num)
        while gtk.events_pending():
            gtk.main_iteration()
    #destroy splash screen when all cameras are finded
    splScr.destroy()
    print 'connected cameras:', len(found)
    #run main program
    main_gui(found)
    gtk.main()
    return
class Camera(object):
    """
    The class responsible for communicating with the actual camera and
    getting images from it.
    
    Attributes:
        cam: An instance of an openCV VideoCapture. 
    """
    
    def __init__(self, device_num):
        """
        Uses a device num in case the system has multiple cameras attached.
        """
        
        self.cam = VideoCapture(device_num) 
        
    def get_image(self):
        """
        Grab a frame from the camera. The cameraCommunicator is the caller,
        and is responsible for lighting and location. The filename of the
        image is returned. 
        
        Raises:
            FatalCameraException: An image was not taken successfully.
        """
        
        #create the systematic filename
        timestamp = datetime.datetime.now()
        filename = utils.get_image_dir() + str(timestamp.date()) + \
                    str(timestamp.hour) + str(timestamp.minute) + \
                    str(timestamp.second) + '.jpg'
        
        #A series of reads to allow the camera to adjust to lighting
        self.cam.read()
        self.cam.read()
        self.cam.read()
        self.cam.read()
        
        #only the last is saved
        success, image = self.cam.read()
        
        if not success:
            raise FatalCameraException()
        else:
            imwrite(filename, image)
            
        return timestamp, filename
示例#34
0
文件: main.py 项目: blagarde/robot
class Window(object):
    def __init__(self, title="Video Stream"):
        ''' Uses OpenCV 2.3.1 method of accessing camera '''
        self.title = title
        self.cap = VideoCapture(0)
        self.prev = self.get_frame()
        self.frame = self.get_frame()
        namedWindow(title, 1) 

    def get_frame(self):
        success, frame = self.cap.read()
        return self.to_grayscale(frame) if success else False

    def to_grayscale(self, frame):
        return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    def optical_flow(self): # FIXME
        flow = CreateImage(GetSize(frame), 32, 2)
        CalcOpticalFlowFarneback(self.prev, self.frame, flow, # takes 0.05s
            pyr_scale=0.5, levels=3, winsize=15,
            iterations=3, poly_n=5, poly_sigma=1.2, flags=0)
        return flow

    def disparity(self):
        if self.left is None or self.right is None:
            print "Capture left and right images using 'l' and 'r' keys before running disparity"
            return None

        hl, wl = self.left.shape
        hr, wr = self.right.shape
        disp_left  = cv2.cv.CreateMat(hl, wl, cv2.cv.CV_16S)
        disp_right = cv2.cv.CreateMat(hr, wr, cv2.cv.CV_16S)
        state = cv2.cv.CreateStereoGCState(16,2)
        # running the graph-cut algorithm
        from cv2.cv import fromarray
        cv2.cv.FindStereoCorrespondenceGC(fromarray(self.left), fromarray(self.right), disp_left, disp_right, state)
        cv2.cv.Save( "left.png", disp_left) # save the map
        cv2.cv.Save( "right.pgm", disp_right) # save the map

    def mainloop(self):
        while True:
            self.prev = self.frame
            self.frame = self.get_frame()
            sift(self.frame) # takes ~0.14s!
            imshow(self.title, self.frame)
            k = waitKey(10)
            if k == -1:
                pass
            elif chr(k) == 'l':
                self.left = self.frame
            elif chr(k) == 'r':
                self.right = self.frame
            elif chr(k) == 'd':
                self.disparity()
            elif k == 27:
                break
示例#35
0
文件: __init__.py 项目: Azique/pupil
class Camera_Capture():
    """
    VideoCapture without uvc control using cv2.VideoCapture
    """
    def __init__(self,src_id,size=(640,480),fps=None,timebase=None):
        self.controls = None
        self.cvId = src_id
        self.name = "VideoCapture"
        self.controls = None
        ###add cv videocapture capabilities
        self.capture = VideoCapture(src_id)
        self.set_size(size)

        if timebase == None:
            logger.debug("Capture will run with default system timebase")
            self.timebase = c_double(0)
        elif isinstance(timebase,c_double):
            logger.debug("Capture will run with app wide adjustable timebase")
            self.timebase = timebase
        else:
            logger.error("Invalid timebase variable type. Will use default system timebase")
            self.timebase = c_double(0)


    def get_frame(self):
        s, img = self.capture.read()
        timestamp = time()
        return Frame(timestamp,img)

    def set_size(self,size):
        width,height = size
        self.capture.set(3, width)
        self.capture.set(4, height)

    def get_size(self):
        return self.capture.get(3), self.capture.get(4)

    def set_fps(self,fps):
        self.capture.set(5,fps)

    def get_fps(self):
        return self.capture.get(5)

    def get_now(self):
        return time()

    def create_atb_bar(self,pos):
        size = 0,0
        return size

    def kill_atb_bar(self):
        pass

    def close(self):
        pass
示例#36
0
def video_loop(aframes_queue,person_queue):
    vc = VideoCapture(0)
    rval, frame = vc.read()
    people = {}
    colors = ((0,0,255),(255,255,0))
    while True:
        rval, frame = vc.read()
        if frame is None:
            c = waitKey(10)
            continue
        aframe = NP.asarray(frame[:,:])
        im = Image.fromarray(frame)
        draw = ImageDraw.Draw(im)
        
        while not person_queue.empty():
            name,rect,name_size = person_queue.get()
            people[name] = {'rect' : rect, 'name_size' : name_size, 
                            'time_found' : time.time()}

        name_counter = 0        
        for name in people.keys():
            if name_counter < 2:
                draw_name(draw, people[name], name, name_counter, colors[name_counter])
            name_counter += 1
            
            if time.time()>people[name]['time_found']+2:
                # stop displaying after 2 seconds
                people.pop(name)
                
        frame2 = NP.array(im)
        imshow('frame',frame2)


        if aframes_queue.empty():
            aframes_queue.put(aframe)
        c = waitKey(1)
        if c == 27: # exit on ESC
            break
    
    vc.release()
    destroyAllWindows()
示例#37
0
def caputure():
    # open Camera
    cam = VideoCapture(0)
    if not cam.isOpened():
        LOGGER.debug('FAILED to open camera!!!')
        return None

    # capture image
    for i in range(100):
        status, img = cam.read()
    if not status:
        LOGGER.debug('FAiLED to capture image!!!')
        return None

    cam.release()
    return img
示例#38
0
文件: camgrab.py 项目: corerd/PyDomo
def grabImageFromUSB(cameraNumber=0):
    '''Grabs a snapshot from the specified USB camera.

    Returns bool, video frame decoded as a JPEG bytearray.
    '''
    from cv2 import VideoCapture, imencode

    # initialize the camera
    cam = VideoCapture(cameraNumber)
    retVal, rawData = cam.read()
    if not retVal:
        # frame captured returns errors
        return False, None
    retVal, jpgData = imencode('.jpg', rawData)
    if not retVal:
        # image encode errors
        return False, None
    return retVal, bytearray(jpgData)
示例#39
0
    def check_cameras_linux(self, num=MAX_CAMERAS):
        """Comprueba las cámaras disponibles.

        :Param num: máximo número de cámaras a comprobar
        :Keyword num: 99 por defecto, ya que en Linux es lo permitido
        :Param num: int
        :Return: lista de cámaras disponibles
        :Rtype: list of Capture
        """
        n = 0
        while len(self.cameras) < num and n <= MAX_CAMERAS:
            camera = VideoCapture(n)
            retval, frame = camera.read()
            if retval:
                self.cameras.append(camera)
            n += 1
        if num != MAX_CAMERAS and len(self.cameras) != num:
            print("Found %d of %d cameras. " % (len(self.cameras), num))
            exit()
        return len(self.cameras)
示例#40
0
class Camera_Capture():
    """
    VideoCapture without uvc control using cv2.VideoCapture
    """
    def __init__(self,src_id,size=(640,480),fps=None):
        self.controls = None
        self.cvId = src_id
        self.name = "VideoCapture"
        self.controls = None
        ###add cv videocapture capabilities
        self.capture = VideoCapture(src_id)
        self.set_size(size)

    def get_frame(self):
        s, img = self.capture.read()
        timestamp = time()
        return Frame(timestamp,img)

    def set_size(self,size):
        width,height = size
        self.capture.set(3, width)
        self.capture.set(4, height)

    def get_size(self):
        return self.capture.get(3), self.capture.get(4)

    def set_fps(self,fps):
        self.capture.set(5,fps)

    def get_fps(self):
        return self.capture.get(5)

    def create_atb_bar(self,pos):
        size = 0,0
        return size

    def kill_atb_bar(self):
        pass

    def close(self):
        pass
示例#41
0
class MouseTracker(object):
    def __init__(self, mouse, n=1, data_dir='.', diff_thresh=80, resample=1, translation_max=50, smoothing_kernel=19, consecutive_skip_threshold=0.08, selection_from=[], point_mode='auto'):
        self.mouse = mouse
        self.n = n
        self.data_dir = data_dir
        
        # Parameters (you may vary)
        self.diff_thresh = diff_thresh
        self.resample = resample
        self.translation_max = translation_max
        self.kernel = smoothing_kernel

        # Parameters (you should not vary)
        self.cth1 = 0
        self.cth2 = 0
        plat = sys.platform
        if 'darwin' in plat:
            self.fourcc = CV_FOURCC('m','p','4','v') 
        elif plat[:3] == 'win':
            self.fourcc = 1
        else:
            self.fourcc = -1
        
        self.fh = FileHandler(self.data_dir, self.mouse, self.n)

        self.framei = 0
        self.load_time()
        self.consecutive_skip_threshold = (self.fs/self.resample) * consecutive_skip_threshold
        self.load_background()
        self.height, self.width = self.background.shape
        self.mov = VideoCapture(self.fh.get_path(self.fh.TRIAL, self.fh.MOV))
        self.mov.read();self.time=self.time[1:]
        #self.get_frame(self.mov,n=40) #MUST ADJUST TIME IF USING THIS
        self.load_pts(mode=point_mode)
        self.make_rooms()

    def end(self):
        self.results = dict(pos=self.pos, time=np.array(self.t)-self.t[0], guess=self.guess, heat=self.heat, contour=self.contour, pct_xadj=self.pct_xadj)
        np.savez(self.fh.make_path('tracking.npz'), **self.results)
        savemat(self.fh.make_path('tracking.mat'), self.results)
        
        self.mov.release()
        destroyAllWindows()
    def man_update(self, d):
        for k,v in d.items():
            setattr(self,k,v)
    def make_rooms(self):
        self.path_x = mpl_path.Path(self.pts[np.array([self.xmli,self.xoli,self.xori,self.xmri])])
        self.path_y = mpl_path.Path(self.pts[np.array([self.ymli,self.yoli,self.yori,self.ymri])])
        self.path_z = mpl_path.Path(self.pts[np.array([self.zmli,self.zoli,self.zori,self.zmri])])

        #experimental: hand in frame on x room
        self.path_x_adj = mpl_path.Path(self.pts[np.array([self.xoli,self.xoli_adj,self.xori_adj,self.xori])])
        self.xadj_mask = np.zeros((self.height,self.width))
        for iy in xrange(self.xadj_mask.shape[0]):
            for ix in xrange(self.xadj_mask.shape[1]):
                self.xadj_mask[iy,ix] = self.path_x_adj.contains_point([ix,iy])
        self.xadj_idxs = np.squeeze(np.argwhere(self.xadj_mask==True))

        self.border_mask = np.zeros((self.height,self.width))
        pthpts = self.pts[np.array([self.yoli_adj,self.yori_adj,self.ymri,self.ycri,self.zmli,self.zoli_adj,self.zori_adj,self.zmri,self.zcri,self.xmli,self.xoli_adj,self.xori_adj,self.xmri,self.xcri,self.ymli])]
        pth = mpl_path.Path(pthpts)
        for iy in xrange(self.border_mask.shape[0]):
            for ix in xrange(self.border_mask.shape[1]):
                self.border_mask[iy,ix] = pth.contains_point([ix,iy])
    def classify_pts(self):
        #stored in (x,y)
        #c: center
        #m: middle
        #o: out
        #x: bottom arm, y: left arm, z: right arm
        #l: left when going down arm, r: right when going down arm
        #pt is: [x/y/z c/m/o l/r]
        X,Y = 0,1
        def nn(pidx,n,ex=[]):
            #idxs of n closest pts to p, excluding all idxs in ex
            p = self.pts[pidx]
            ds = np.array([dist(pp,p) for pp in self.pts])
            idxs =  np.argsort(ds)
            idxs = np.array([i for i in idxs if i not in ex])
            return idxs[:n]
        def sortby(pidxs, dim):
            pts = self.pts[np.array(pidxs)]
            return pidxs[np.argsort(pts[:,dim])]
        dists = np.array([dist(self.pts_c, p) for p in self.pts])
        c3i = self.c3i[np.argsort(self.pts[self.c3i][:,0])]
        m6i = self.m6i
        o6i = self.o6i
        
        #classify them:
        xcri=ycli=c3i[0]
        ycri=zcli=c3i[1]
        zcri=xcli=c3i[2]
        temp = nn(xcri, 2, ex=c3i)
        ymli,xmri = sortby(temp, Y)
        temp = nn(ycri, 2, ex=c3i)
        ymri,zmli = sortby(temp, X)
        temp = nn(zcri, 2, ex=c3i)
        zmri,xmli = sortby(temp, Y)
        cm9 = [xcri,ycri,zcri,xmri,xmli,ymri,ymli,zmri,zmli]

        xoli = nn(xmli, 1, ex=cm9)[0]
        xori = nn(xmri, 1, ex=cm9)[0]
        yoli = nn(ymli, 1, ex=cm9)[0]
        yori = nn(ymri, 1, ex=cm9)[0]
        zoli = nn(zmli, 1, ex=cm9)[0]
        zori = nn(zmri, 1, ex=cm9)[0]
        
        #accounting for inner wall reflections:
        zol,zml = np.array([self.pts[zoli],self.pts[zmli]]).astype(np.int32)
        dd = np.sqrt(np.sum((zol-zml)**2))
        cup_dd = 0.80*dd
        #z cup:
        zol,zml = np.array([self.pts[zoli],self.pts[zmli]]).astype(np.int32)
        d_zl = zol-zml
        theta_zl = np.arctan2(*d_zl)
        l2 = zml + cup_dd * np.array([np.sin(theta_zl),np.cos(theta_zl)])
        zor,zmr = np.array([self.pts[zori],self.pts[zmri]]).astype(np.int32)
        d_zr = zor-zmr
        theta_zr = np.arctan2(*d_zr)
        r2 = zmr + cup_dd * np.array([np.sin(theta_zr),np.cos(theta_zr)])
        zr2,zl2 = r2,l2
        #y cup:
        yol,yml = np.array([self.pts[yoli],self.pts[ymli]]).astype(np.int32)
        d_yl = yol-yml
        theta_yl = np.arctan2(*d_yl)
        l2 = yml + cup_dd * np.array([np.sin(theta_yl),np.cos(theta_yl)])
        yor,ymr = np.array([self.pts[yori],self.pts[ymri]]).astype(np.int32)
        d_yr = yor-ymr
        theta_yr = np.arctan2(*d_yr)
        r2 = ymr + cup_dd * np.array([np.sin(theta_yr),np.cos(theta_yr)])
        yr2,yl2 = r2,l2
        #x cup:
        xol,xml = np.array([self.pts[xoli],self.pts[xmli]]).astype(np.int32)
        d_xl = xol-xml
        theta_xl = np.arctan2(*d_xl)
        l2 = xml + cup_dd * np.array([np.sin(theta_xl),np.cos(theta_xl)])
        xor,xmr = np.array([self.pts[xori],self.pts[xmri]]).astype(np.int32)
        d_xr = xor-xmr
        theta_xr = np.arctan2(*d_xr)
        r2 = xmr + cup_dd * np.array([np.sin(theta_xr),np.cos(theta_xr)])
        xr2,xl2 = r2,l2
        self.pts = np.rint(np.concatenate((self.pts, [zr2,zl2,yr2,yl2,xr2,xl2])))

        pts_dict = dict(pts=self.pts,xcri=xcri,ycli=ycli,ycri=ycri,zcli=zcli,zcri=zcri,xcli=xcli,xmri=xmri,ymli=ymli,ymri=ymri,zmli=zmli,xmli=xmli,zmri=zmri,xoli=xoli,xori=xori,yoli=yoli,yori=yori,zoli=zoli,zori=zori,zori_adj=-6,zoli_adj=-5,yori_adj=-4,yoli_adj=-3,xori_adj=-2,xoli_adj=-1)
        self.man_update(pts_dict)

        np.savez(self.fh.make_path('pts.npz', mode=self.fh.BL), **pts_dict)
    def verify_pts(self, add_to_all=True):
        if add_to_all:
            self.all_possible_pts += list(self.pts)
        if len(self.pts) < 15:
            return False
        if len(self.pts) > 25:
            return False
        elif len(self.pts) > 15:
            allperms = np.array(list(it.combinations(range(len(self.pts)), 15)))
            if len(allperms)>200:
                totryi = np.random.choice(range(len(allperms)),200,replace=False)
                totryi = allperms[totryi]
            else:
                totryi = allperms
        elif len(self.pts) == 15:
            totryi = [range(15)]
        
        for ptsi in totryi:
            pts = self.pts[ptsi]
            self.pts_c = np.mean(pts,axis=0)
            dists = np.array([dist(self.pts_c, p) for p in pts])
            c3i = np.argsort(dists)[:3]
            m6i = np.argsort(dists)[3:9]
            o6i = np.argsort(dists)[9:]

            good = True
            
            #test dists from center
            if np.std(dists[c3i]) > 1. or np.std(dists[m6i]) > 1. or np.std(dists[o6i]) > 2.5:
                good = False
            #x = self.background.copy()
            #for pt in pts:
            #    cv2.circle(x, tuple(pt), 4, (255,255,255), thickness=3)
            #pl.figure(2);pl.imshow(x)
            #raw_input()
            ##
            
            #test outer dists from each other
            o6 = pts[o6i]
            omindists = np.array([np.min(np.array([dist(p,pp) for p in o6])) for pp in o6])
            if np.std(omindists) > 0.6:
                good = False
           
           #test middle dists from each other
            m6 = pts[m6i]
            mmindists = np.array([np.min(np.array([dist(p,pp) for p in m6])) for pp in m6])
            if np.std(mmindists) > 0.6:
                good = False
            
            if good:
                self.c3i = c3i
                self.m6i = m6i
                self.o6i = o6i
                self.pts = pts
                return True
           
        return False
    def permute_pts(self):
        #NOT IN USE
        apts = np.array(self.all_possible_pts)
        unpts = []
        for pt in apts:
            if len(unpts)==0:
                unpts.append(pt)
            else:
                dists = np.sqrt(np.sum((pt-np.array(unpts))**2,axis=1))
                mindist = np.min(dists)
                if mindist > 5:
                    unpts.append(pt)
        unpts = np.array(unpts)
        if len(unpts)<15:
            return False
        for _ in xrange(10000):
            testpts = np.random.choice(np.arange(len(unpts)),15, replace=False)
            testpts = unpts[testpts]
            self.pts = testpts
            ##
            #x = self.background.copy()
            #for pt in self.pts:
            #    cv2.circle(x, tuple(pt), 4, (255,255,255), thickness=3)
            #pl.figure(2);pl.imshow(x)
            ##
            if self.verify_pts(add_to_all=False):
                return True
        return False
    def load_pts(self, mode='auto'):
        try:
            self.man_update(np.load(self.fh.make_path('pts.npz', mode=self.fh.BL)))
        except:
            if mode == 'auto':
                self.all_possible_pts = []
                invalid = True
                attempts = 0
                while invalid:
                    if attempts > 500:
                        raise Exception('Pts cannot be found.')
                    img = self.background_image.copy()
                    lp_ksizes = [13,15,17,19,21,23,25] #from 5-15 before
                    lp_ksize = rand.choice(lp_ksizes)
                    sbd_areas = [range(3,20), range(61,140)] #8,26 46,55
                    sbd_area = [rand.choice(sbd_areas[0]), rand.choice(sbd_areas[1])]
                    sbd_circs = [np.arange(0.05,0.35), range(1000,1001)]#0.19,0.35 1000
                    sbd_circ = [rand.choice(sbd_circs[0]), rand.choice(sbd_circs[1])]
                    subtr_rowmeans = rand.choice([True,False])

                    if subtr_rowmeans:
                        img = img-np.mean(img,axis=1)[:,None]
                    img = cv2.Laplacian(img, cv2.CV_32F, ksize=lp_ksize)
                    img += abs(img.min())
                    img = img/img.max() *255
                    img = img.astype(np.uint8)

                    #pl.figure(1);pl.imshow(img,cmap=pl.cm.Greys_r)
                    params = cv2.SimpleBlobDetector_Params()
                    params.filterByArea = True
                    params.filterByCircularity = True
                    params.minArea,params.maxArea = sbd_area
                    params.minCircularity,params.maxCircularity = sbd_circ
                    detector = cv2.SimpleBlobDetector(params)
                    fs = detector.detect(img)
                    pts = np.array([f.pt for f in fs])
                    pts = np.round(pts).astype(np.uint32)
                    x = img.copy()
                    for pt in pts:
                        cv2.circle(x, tuple(pt), 4, (255,255,255), thickness=3)
                    #pl.figure(2);pl.imshow(x);raw_input()
                    self.pts = pts
                    invalid = not self.verify_pts()
                    attempts += 1
            elif mode == 'manual':
                pl.imshow(self.background_image, cmap=pl.cm.Greys_r)
                pl.title('center3->middle6->outer6')
                pts = pl.ginput(n=15, timeout=-1)
                pts = np.round(pts).astype(np.uint32)
                self.pts = np.array(pts)
                pl.close()
                # note that verify is being skipped
                self.c3i,self.m6i,self.o6i = np.arange(0,3),np.arange(3,9),np.arange(9,15)
                self.pts_c = np.mean(self.pts, axis=0)
            self.classify_pts()
    def load_time(self):
        with open(self.fh.get_path(self.fh.TRIAL,self.fh.TIME),'r') as f:
            self.time = np.array(json.loads(f.read()))
        self.Ts = np.mean(self.time[1:]-self.time[:-1])
        self.fs = 1/self.Ts
    def load_background(self):
        try:
            bg = np.load(self.fh.make_path('background.npz',mode=self.fh.BL))
            background = bg['computations']
            background_image = bg['image']
        except:
            blmov = VideoCapture(self.fh.get_path(self.fh.BL,self.fh.MOV))
            valid, background, ts = self.get_frame(blmov, n=-1, blur=True)
            blmov.release()
            
            blmov = VideoCapture(self.fh.get_path(self.fh.BL,self.fh.MOV))
            valid, background_image, ts = self.get_frame(blmov, n=-1, blur=False)
            blmov.release()
            
            np.savez(self.fh.make_path('background.npz',mode=self.fh.BL), computations=background, image=background_image)
        self.background, self.background_image = background, background_image
    def get_frame(self, mov, n=1, skip=0, blur=True):
        for s in range(skip):
            mov.read()
            self.framei += 1 #the number of frames that have been read
        if n==-1:
            n = 99999999999999999.
        def get():
            valid, frame = mov.read()
            if not valid:
                return (False, None, None)
            ts = self.time[self.framei]
            self.framei += 1
            frame = frame.astype(np.float32)
            frame = cvtColor(frame, CV_RGB2GRAY)
            if blur:
                frame = GaussianBlur(frame, (self.kernel,self.kernel), 0)
            return valid,frame,ts

        valid,frame,ts = get()
        i = 1
        while valid and i<n:
            valid,new,ts = get()
            i += 1
            if valid:
                frame += new
        
        if frame is not None:
            frame = frame/i
        return (valid, frame, ts)
    def find_possible_contours(self, frame, consecutive_skips):
        self.diff = absdiff(frame,self.background)
        _, self.diff = threshold(self.diff, self.diff_thresh, 1, THRESH_BINARY)
        diff_raw = self.diff.copy()
        self.diff = self.diff*self.border_mask
        edges = Canny(self.diff.astype(np.uint8), self.cth1, self.cth2)
        contours, hier = findContours(edges, RETR_EXTERNAL, CHAIN_APPROX_TC89_L1)
        #contours = [c for c in contours if not any([pa.contains_point(contour_center(c)) for pa in self.paths_ignore])]
        if consecutive_skips>self.consecutive_skip_threshold:
            consecutive_skips=0
            possible = contours
        else:
            possible = [c for c in contours if dist(contour_center(c),self.last_center)<self.translation_max]
        return possible, diff_raw
    def choose_best_contour(self, possible):
        chosen = possible[np.argmax([cv2.arcLength(c,False) for c in possible])] #arcLength or contourArea
        center = contour_center(chosen,asint=True)[0]
        return chosen,center
    def label_frame(self, frame, center):
        showimg = np.copy(frame).astype(np.uint8)
        if self.path_x.contains_point(center):
            color = (0,0,0)
        elif self.path_y.contains_point(center):
            color = (210,210,210)
        elif self.path_z.contains_point(center):
            color = (100,100,100)
        else:
            color = (255,255,255)
        circle(showimg, tuple(center), radius=10, thickness=5, color=color)
        for pt in self.pts.astype(int):
            circle(showimg, tuple(pt), radius=4, thickness=3, color=(0,0,0))
        return showimg
    def show_frame(self, frame, wait=1):
        cv2imshow('Tracking',frame)
        waitKey(wait)
    def run(self, show=False, save=False, tk_var_frame=None, wait=1, start_pos='none'):
        #interfaces
        if show or save:
            fsize = (self.width*2, self.height)
            save_frame = np.zeros([self.height, self.width*2, 3], dtype=np.uint8)
        if show:
            namedWindow('Tracking')
        if save:
            writer = VideoWriter()
            writer.open(self.fh.make_path('tracking.avi'),self.fourcc,round(self.fs),frameSize=fsize,isColor=True)
        
        #run
        self.framei = 0
        self.pos = []
        self.t = []
        self.guess = []
        self.contour = []
        self.pct_xadj = []
        self.heat = np.zeros((self.height,self.width))
        consecutive_skips = 0
        if start_pos == 'x':
            start_pts = [self.xori_adj, self.xoli_adj]
        elif start_pos == 'y':
            start_pts = [self.yori_adj, self.yoli_adj]
        elif start_pos == 'z':
            start_pts = [self.zori_adj, self.zoli_adj]
        elif start_pos == 'c':
            start_pts = [self.zori, self.xori, self.yori]
        elif start_pos == 'none':
            start_pts = [self.zori, self.xori, self.yori]
            consecutive_skips = self.consecutive_skip_threshold+1
        self.last_center = np.mean(self.pts[np.array(start_pts)],axis=0).astype(int)
        self.last_contour = np.array([self.last_center])
        valid,frame,ts = self.get_frame(self.mov,skip=self.resample-1)
        while valid:
            possible,diff_raw = self.find_possible_contours(frame,consecutive_skips)
            self.pct_xadj.append(np.mean( diff_raw[self.xadj_idxs[:,0],self.xadj_idxs[:,1]]))
            
            if len(possible) == 0:
                center = self.last_center
                contour = self.last_contour
                self.guess.append(True)
                consecutive_skips+=1
            else:
                contour,center = self.choose_best_contour(possible)
                self.guess.append(False)
                consecutive_skips = 0
            self.pos.append(center)
            self.contour.append(contour)
            self.t.append(ts)
            self.heat[center[1],center[0]] += 1
            
            if show or save:
                lframe = self.label_frame(frame, center)
                save_frame[:,:self.width, :] = cvtColor(lframe.astype(np.float32), CV_GRAY2RGB)
                save_frame[:,self.width:, :] = cvtColor((self.diff*255).astype(np.float32), CV_GRAY2RGB)
            if show:
                self.show_frame(save_frame, wait=wait)
            if save:
                writer.write(save_frame)
             
            self.last_center = center
            self.last_contour = contour
            valid,frame,ts = self.get_frame(self.mov,skip=self.resample-1)
        
            if tk_var_frame != None:
                tk_var_frame[0].set('%i/%i'%(self.results['n_frames'], len(self.time)/float(self.resample) ))
                tk_var_frame[1].update()
        if save:
            writer.release()
        self.end()
示例#42
0
def main():
    cap = VideoCapture("feng.mp4")
    ret, frame = cap.read()
    imwrite("feng1.jpg", frame)
示例#43
0
class camera(object):
    '''
    Object containing camera information
    Call-able, retrieve current frame in camera buffer
    
    User accessible attributes:
        device        system device number
        resolution    camera resolution
        BGRimage      image in BGR format
        HSVimage      image in HSV format
        RGBimage      image in RGB format
        FPS           camera speed in FPS
        
    User accessible methods:
        close         close camera device
    '''
    def __init__(self, cam_num = -1, resolution = (640, 480)):
        '''
        create camera object
            cam_num            device number (integer)
            resolution         image resolution (tuple width x height)
        '''
        self.device = cam_num
        self.resolution = resolution
        self.BGRimage = []
        self.HSVimage = []
        self.RGBimage = []
        self.FPS = [0, 0]
        self.__avr = 0
        #assign and open device
        self.__capture = VideoCapture(cam_num)
        self.__capture.set(CV_CAP_PROP_FRAME_WIDTH,resolution[0])
        self.__capture.set(CV_CAP_PROP_FRAME_HEIGHT,resolution[1])
        self.__capture.open
        self.__flag = False
        t0 = time()
        self.__flag, self.BGRimage = self.__capture.read()
        self.FPS[0] = 1/(time()-t0)
        self.FPS[1] = self.FPS[0]
        self.__avr = self.FPS[0]
        print "camera", self.device, "ready @", self.FPS[0], "fps"
        return
    def __call__(self, frame_delay = 0, fast = False):
        '''
        retrieve current frame in camera buffer
            frame_delay        delay the frame decoding (integer)
            fast               if true don't decode image to RGB format (logic)    
        '''
        #set timer to meassure fps
        self.__avr = self.FPS[1]
        t0 = time()
        #try to retrieve current frame
        while not self.__flag:
            if frame_delay > 0:
                for i in xrange(frame_delay + 1):
                    self.__capture.grab()
                self.__flag, self.BGRimage = self.__capture.retrieve()
                del i
            else:
                self.__flag, self.BGRimage = self.__capture.read()
        self.__flag = False
        #decode bgr format to hsv
        self.HSVimage = cvtColor(self.BGRimage, CV_BGR2HSV)
        if fast:
            self.FPS[0] = 1/(time()-t0)
            self.FPS[1] = (self.FPS[0]+self.__avr)/2
            return
        #decode bgr format to rgb
        self.RGBimage = cvtColor(self.BGRimage, CV_BGR2RGB)
        self.FPS[0] = 1/(time()-t0)
        self.FPS[1] = (self.FPS[0]+self.__avr)/2
        return
    def __str__(self):
        '''
        return camera information;
            device number
            device resolution
            instant speed
            average speed
        '''
        tmp = "camera object @ dev "+str(self.device)+", resolution: "+str(self.resolution)
        tmp = tmp +", fps: "+str(self.FPS[0])+", Avr. fps: "+str(self.FPS[1])
        return tmp
    def __del__(self):
        '''
        when the object is deleted, it closes the device
        '''
        self.close()
        return
    def close(self):
        '''
        close device, making it available to use 
        '''
        #if the device is open then close it
        if self.__capture.isOpened():
            self.__capture.release()
            print "camera", self.device, "closed"
        return
示例#44
0
def captureTStamp(files, duration, cod,  fps=0, verbose=True):
    '''
    guarda por un tiempo en minutos (duration) el video levantado desde la
    direccion indicada en el archvo indicado. tambíen archivos con los time
    stamps de cada frame.
    
    files = [ur, saveVideoFile, saveDateFile, saveMillisecondFile]
    duration = time in mintes
    cod = codec
    fps = frames per second for video to be saved
    verbose = print messages to screen
    
    si fpscam=0 trata de llerlo de la captura. para fe hay que especificarla
    
    para opencv '2.4.9.1'
    
    Examples
    --------
    
    from cameraUtils import captureTStamp
    
    # para la FE
    duration = 1 # in minutes
    files = ['rtsp://192.168.1.48/live.sdp',
             "/home/alumno/Documentos/sebaPhDdatos/vca_test_video.avi",
             "/home/alumno/Documentos/sebaPhDdatos/vca_test_tsFrame.txt"]
    fpsCam = 12
    cod = 'XVID'
    
    captureTStamp(files, duration, cod, fps=fpsCam)
    
    # %% para la PTZ
    duration = 0.2 # in minutes
    files = ["rtsp://192.168.1.49/live.sdp",
             "/home/alumno/Documentos/sebaPhDdatos/ptz_test_video.avi",
             "/home/alumno/Documentos/sebaPhDdatos/ptz_test_tsFrame.txt"]  
    
    fpsCam = 20
    cod = 'XVID'
    
    captureTStamp(files, duration, cod, fpsCam)
    
    '''
    
    fcc = fourcc(cod[0],cod[1],cod[2],cod[3]) # Códec de video
    
    if verbose:
        print(files)
        print("Duration",duration,"minutes")
        print("fps",fps)
        print("codec",cod)
    
    # Inicializacion
    tFin = datetime.datetime.now() + datetime.timedelta(minutes=duration)
    
    ts = list()  # timestamp de la captura
    
    # abrir captura
    cap = VideoCapture(files[0])
    while not cap.isOpened():
        cap = VideoCapture(files[0])
    
    print("capture opened")
	# configurar writer
    w = int(cap.get(frame_width))
    h = int(cap.get(frame_height))
    if not fps:
        fps = cap.get(prop_fps)
    #para fe especificar los fps pq toma cualquier cosa con la propiedad
    
    out = VideoWriter(files[1], fcc, fps,( w, h), True)
    
    if verbose:
        print("capture open",cap.isOpened())
        print("frame size",w,h)
        print("output opened",out.isOpened())
    
    if not out.isOpened() or not cap.isOpened():
        out.release()
        cap.release()
        # exit function if unable to open cap or out
        return
    
    s0 = getsize(files[1]) # initial filesize before writing frame
    # Primera captura
    ret, frame = cap.read()
    if ret:
        t = datetime.datetime.now()
        ts.append(t)
        out.write(frame)
        if verbose:
            print("first frame captured")
    # Segunda captura
    ret, frame = cap.read()
    if ret:
        t = datetime.datetime.now()
        ts.append(t)
        out.write(frame)
        if verbose:
            print("second frame captured")
    # Tercera captura
    ret, frame = cap.read()
    if ret:
        t = datetime.datetime.now()
        ts.append(t)
        out.write(frame)
        if verbose:
            print("third frame captured")
    
    s1 = getsize(files[1])  # size after saving 3 frames
    
    if s1==s0:
        out.release()
        cap.release()
        print("error when saving 3 frames, exiting")
        return 1 # error while saving first frame to file
    print(tFin)
    # loop
    while (t <= tFin):
        ret, frame = cap.read()
        
        if ret:
            t = datetime.datetime.now()
            ts.append(t)
            out.write(frame)
            if verbose:
                print(tFin,t)
                print("seconds elapsed",cap.get(pos_msec)/1000)
                print(frame.size)

    # end of loop
    
    # release and save
    out.release()
    cap.release()
    
    if verbose:
        print('loop exited, cap, out released, times saved to files')
        
    savetxt(files[2],ts, fmt= ["%s"])
    
    return 0  # success
示例#45
0
class Camera_Capture(object):
    """docstring for uvcc_camera"""
    def __init__(self, cam,size=(640,480),fps=30):
        self.src_id = cam.src_id
        self.uId = cam.uId
        self.name = cam.name
        self.controls = Controls(self.uId)

        try:
            self.controls['UVCC_REQ_FOCUS_AUTO'].set_val(0)
        except KeyError:
            pass

        if '6000' in self.name and False: #on mac we dont have enough controls to use this right.
            logger.info("adjusting exposure for HD-6000 camera")
            try:
                self.controls['UVCC_REQ_EXPOSURE_AUTOMODE'].set_val(1)
                self.controls['UVCC_REQ_EXPOSURE_ABS'].set_val(156)
            except KeyError:
                pass

        self.capture = VideoCapture(self.src_id)
        self.set_size(size)

    def get_frame(self):
        s, img = self.capture.read()
        timestamp = time()
        return Frame(timestamp,img)

    def set_size(self,size):
        width,height = size
        self.capture.set(3, width)
        self.capture.set(4, height)

    def get_size(self):
        return self.capture.get(3), self.capture.get(4)

    def set_fps(self,fps):
        self.capture.set(5,fps)

    def get_fps(self):
        return self.capture.get(5)

    def create_atb_bar(self,pos):
        # add uvc camera controls to a separate ATB bar
        size = (200,200)

        self.bar = atb.Bar(name="Camera_Controls", label=self.name,
            help="UVC Camera Controls", color=(50,50,50), alpha=100,
            text='light',position=pos,refresh=2., size=size)

        sorted_controls = [c for c in self.controls.itervalues()]
        sorted_controls.sort(key=lambda c: c.order)

        for control in sorted_controls:
            name = control.atb_name
            if control.type=="bool":
                self.bar.add_var(name,vtype=atb.TW_TYPE_BOOL8,getter=control.get_val,setter=control.set_val)
            elif control.type=='int':
                self.bar.add_var(name,vtype=atb.TW_TYPE_INT32,getter=control.get_val,setter=control.set_val)
                self.bar.define(definition='min='+str(control.min),   varname=name)
                self.bar.define(definition='max='+str(control.max),   varname=name)
                self.bar.define(definition='step='+str(control.step), varname=name)
            elif control.type=="menu":
                if control.menu is None:
                    vtype = None
                else:
                    vtype= atb.enum(name,control.menu)
                self.bar.add_var(name,vtype=vtype,getter=control.get_val,setter=control.set_val)
                if control.menu is None:
                    self.bar.define(definition='min='+str(control.min),   varname=name)
                    self.bar.define(definition='max='+str(control.max),   varname=name)
                    self.bar.define(definition='step='+str(control.step), varname=name)
            else:
                pass
            if control.flags == "inactive":
                pass
                # self.bar.define(definition='readonly=1',varname=control.name)

        self.bar.add_button("refresh",self.controls.update_from_device)
        self.bar.add_button("load defaults",self.controls.load_defaults)

        return size

    def kill_atb_bar(self):
        pass

    def close(self):
        pass
示例#46
0
class Camera(object):
    ''' Communicate with the camera.

    Class governing the communication with the camera.

    Parameters
    -----------
    camera : int
        the index of the camera, best taken from func lookForCameras,
        from eyetracker.camera.capture
    dic : dic{propID  value}
        to check corresponding propIDs check
        opencv documentation under the term VideoCapture. 
        They will be set in the moment of object creation.

    Defines
    --------
    self.camera : index of the camera
    self.cap : capturing object
    self.frame : returns a frame from camera
    self.close : closes cap
    self.reOpen : reopens cap
    '''
    def __init__(self, camera, dic=None):
        self.camera = int(camera)
        self.cap = VideoCapture(self.camera)
        if dic:
            for propID, value in dic.iteritems():
                self.cap.set(propID, value)
        first_frame = self.frame()

    def frame(self):
        ''' Read frame from camera.

        Returns
        --------
        frame : np.array
            frame from camera
        '''
        if self.cap.isOpened:
            return self.cap.read()[1]
        else:
            print 'Cap is not opened.'
            return None

    def set(self, **kwargs):
        ''' Set camera parameters.

        Parameters
        -----------
        kwargs : {propID : value}
        '''
        for propID, value in kwargs.iteritems():
            self.cap.set(propID, value)

    def close(self):
        ''' Closes cap, you can reopen it with self.reOpen.
        '''
        self.cap.release()

    def reOpen(self, cameraIndex):
        ''' Reopens cap.
        '''
        self.cap.open(self.camera)
        first_frame = self.frame()
示例#47
0
class Worker:

  ## Initialize Worker robot.
  def __init__(self):
    
    try:
      self.camera = VideoCapture(CAMERA_INDEX)
      self.camera.set(3,CAMERA_WIDTH)
      self.camera.set(4,CAMERA_HEIGHT)
      message = 'Success.'
    except Exception:
      message = 'Failure.'
    print('[Setting up Camera]...' + message)

    try:
      self.arduino = serial.Serial(DEVICE, BAUD)
      message = 'Success.'
    except Exception:
      message = 'Failure.'
    print('[Setting up Controller]...' + message)

    self.reset_worker()

  ## Capture image then identify Home.
  def detect_green(self):
    objects = []
    x = 0
    for cache in range(10):
      (success, frame) = self.camera.read()
    raw = Image.fromarray(frame)
    BGR = raw.split()
    B = array(BGR[0].getdata(), dtype=float32)
    G = array(BGR[1].getdata(), dtype=float32)
    R = array(BGR[2].getdata(), dtype=float32)
    NDI_G = (BIAS*G + MINIMUM_COLOR)/(R + B + MINIMUM_COLOR)
    matrix = NDI_G.reshape(CAMERA_HEIGHT,CAMERA_WIDTH)
    columns = matrix.sum(axis=0)
    high = numpy.mean(columns) + numpy.std(columns)
    low = numpy.mean(columns)
    while (x < CAMERA_WIDTH-1):
      if (columns[x] > high):
        start = x
        while (columns[x] > low) and (x < CAMERA_WIDTH-1):
          x += 1
        end = x
        size = (end - start)
        offset = (start + (end - start)/2) - CAMERA_CENTER
        if (size > MINIMUM_SIZE):
          objects.append((size,offset))
      else:
        x += 1
    return objects

  ## Detect 
  def detect_yellow(self):
    objects = []
    x = 0
    for cache in range(30):
      (success, frame) = self.camera.read()
    raw = Image.fromarray(frame)
    BGR = raw.split()
    B = array(BGR[0].getdata(), dtype=float32)
    G = array(BGR[1].getdata(), dtype=float32)
    R = array(BGR[2].getdata(), dtype=float32)
    NDI_G = BIAS*(G + R + MINIMUM_COLOR)/(2*B + MINIMUM_COLOR)
    matrix = NDI_G.reshape(CAMERA_HEIGHT,CAMERA_WIDTH)
    columns = matrix.sum(axis=0)
    high = numpy.mean(columns) + numpy.std(columns)
    low = numpy.mean(columns)
    while (x < CAMERA_WIDTH-1):
      if (columns[x] > high):
        start = x
        while (columns[x] > low) and (x < CAMERA_WIDTH-1):
          x += 1
        end = x
        size = (end - start)
        offset = (start + (end - start)/2) - CAMERA_CENTER
        if (size > MINIMUM_SIZE):
          objects.append((size,offset))
      else:
        x += 1
    return objects

  ## Is Oriented? --> Boolean
  def is_oriented(self):
    for cache in range(10):
      (success, frame) = self.camera.read()
    grayscale = cv2.cvtColor(frame, cv.CV_BGR2GRAY)
    blurred = cv2.GaussianBlur(grayscale, (0,0), 5) # (0,0), 5
    colored = cv2.cvtColor(blurred,cv2.COLOR_GRAY2BGR)
    (flag, thresholded) = cv2.threshold(blurred, 50, 255, cv2.THRESH_BINARY) # 50 and 255 WORKS
    circles = cv2.HoughCircles(thresholded,cv2.cv.CV_HOUGH_GRADIENT,DP,MIN_DISTANCE,param1=EDGE_THRESHOLD, param2=CENTER_THRESHOLD, minRadius=MIN_RADIUS,maxRadius=MAX_RADIUS)
    try:
      for target in circles[0,:]:
        x = target[0]
        y = target[1]
        r = target[2]
      if (abs(CAMERA_CENTER - x) < CAMERA_THRESHOLD):  
        return True
      else:
        return False
    except TypeError:
      return False

  ## Connect to Server.
  def connect(self):
    if (self.connected_out == False) and (self.connected_in == False):
      try:
        self.socket_out = socket.socket(AF_INET, SOCK_STREAM)
        self.socket_out.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.socket_out.bind((ADDRESS_OUT))
        self.socket_out.listen(QUEUE_MAX)
        (self.connection, self.address) = self.socket_out.accept()
        self.connected_out = True
        message = 'Success.'
      except socket.error as message:
        message = 'Failure.'
        self.connected_out = False
        self.socket_out.close()
        pass
      print('[Establishing Send Port]...' + str(message))
      try:
        self.socket_in = socket.socket(AF_INET,SOCK_STREAM)
        self.socket_in.connect((ADDRESS_IN))
        self.connected_in = True
        message = 'Success.'
      except socket.error as message:
        self.socket_in.close()
        self.connected_in = False
        pass
      print('[Establishing Receive Port]...' + str(message))
  
  ## Receives COMMAND from Server.
  def receive_command(self):
    try: 
      json_command = self.socket_in.recv(BUFFER_SIZE)
      dict_command = json.loads(json_command)
      self.command = dict_command['COMMAND']
      message = str(json_command)
    except socket.error as message:
      pass #!
    print('----------------------------------')
    print('[Receiving COMMAND]...' + str(message))

  ## After receiving COMMAND, determine action.
  def decide_action(self):
    if (self.command == 'START'):
      self.goal = 'STARTING'
      self.start()
    elif (self.command == 'CONTINUE'):
      if (self.gathered < 4):
        self.goal = 'GATHERING'
        self.gather()
      elif (not self.dumped):
        self.goal = 'STACKING'
        self.find_zone()
      elif (not self.returning):
        self.goal = 'RETURNING'
        self.return_home()
      else:
        self.action = 'W'
    elif (self.command == 'PAUSE'):
      self.goal = 'PAUSING'
      self.action = 'W'
    elif (self.command == 'DISCONNECT'):
      self.goal = 'DISCONNECTING'
      self.action = 'W'
    else:
      self.action = 'W'

  ## Start Logic
  def start(self):
    self.action = 'H'
    print('[Starting]...Helping Extend Rack.')

  ## Gather Logic
  def gather(self):
    objects = self.detect_green()
    #1 There is an object in view...
    try:
      (size, offset) = max(objects)
      #2 ...and it is small...
      if (size < SIZE_GRAB_RANGE) and not (self.error_number == ERROR_LOAD):
        #3 ...and I just tried avoiding to the left...
        if (self.error_number == ERROR_AVOID_RIGHT):
            message = 'Blocked After Avoiding Right -> Avoiding Left.'
            self.action = 'J'
        #3 ...and I just tried avoid to the right...
        elif (self.error_number == ERROR_AVOID_LEFT):
          message = 'Blocked After Avoiding Left --> Avoiding Right.'
          self.action = 'I'
        #3 ...and I just tried to turn right...
        elif (self.error_number == ERROR_BLOCKED_LEFT):
          message = 'Blocked After Turning Left --> Avoiding Right.'
          self.action = 'I'
        #3 ... and i just tried to turn left...
        elif (self.error_number == ERROR_BLOCKED_LEFT):
          message = 'Blocked After Turning Right --> Avoiding Left.'
          self.action = 'J'
        #3 ...and I just was blocked after moving...
        elif (self.error_number == ERROR_CLOSE):
          message = 'Blocked After Turning Right --> Avoiding Left.'
          self.action = 'I'         
        #3 ...and I didn't make bad movements...
        else:
          #4 ...and it's to the right...
          if (offset > CAMERA_THRESHOLD):
            if (offset > 4*CAMERA_THRESHOLD):
              message = '(Object in View -> 4 Right.'
              self.action = 'T'
            elif (offset > 3*CAMERA_THRESHOLD):
              message = 'Object in View -> 3 Right.'
              self.action = 'S'
            elif (offset > 2*CAMERA_THRESHOLD):
              message = 'Object in View -> 2 Right.'
              self.action = 'R'
            else:
              message = 'Object in View -> 1 Right'
              self.action = 'Q'
          #4 ...and it's to the left.
          elif (offset < -CAMERA_THRESHOLD):
            if (offset < -4*CAMERA_THRESHOLD):
              message = 'Object in View -> 4 Left'
              self.action = 'N'
            elif (offset < -3*CAMERA_THRESHOLD):
              message = 'Object in View -> 3 Left'
              self.action = 'M'
            elif (offset < -2*CAMERA_THRESHOLD):
              message = 'Object in View -> 2 Left'
              self.action = 'L'
            else:
              message = 'Object in View -> 1 Left'
              self.action = 'K'
          #4 ...and it's in front...
          else:
            message = 'Too Small -> Out of Range'
            self.action = 'F'
      #2 ...and it is large...
      elif (size > SIZE_GRAB_RANGE):
        #3 ...and I was just trying to grab something...
        if (self.error_number == ERROR_LOAD):
          message = 'Load Failed -> Reversing.'
          self.action = 'B'
        #3 ...and I was not just trying to grab something...
        else:
          #4 ...and it is oriented...
          if True: #! (self.is_oriented())
              message = 'Large Enough -> In Range, Oriented -> Grab'
              self.action = 'G'
              self.gathered += 1
          #4 ...but it is not oriented..
          else:
            #5 ...and I already tried to orbit left.
            if (self.error_number == ERROR_ORBIT_LEFT):
              message = 'Large Enough -> In Range, Not Oriented --> Orbiting Right.'
              self.action = 'O'
            #5 ...and I didn't just try to go around right.
            elif (self.error_number == ERROR_ORBIT_RIGHT):
              message = 'Large Enough -> In Range, Not Oriented & Blocked Right -> Orbiting Left.'
              self.action = 'P'
            #5 ... grab anyway.
            else:
              message = 'In Range, Not Oriented -> Attempting to Orbing .'
              self.action = 'O'  
      #2
      else:
        self.action = 'W'
        message = 'Confused -> Waiting.'    
    #1 There are no objects in view.
    except ValueError:
      message = 'No Objects Detected -> Searching Right.'
      self.action = 'T'
    
    print('[Gathering]...' + str(objects) + '...' + message)
    print('[Remembering Action]...' )
    self.previous_action = self.action # remember newest action

  ## Find Zone
  def find_zone(self):
    objects = self.detect_green()
    # The Zone is in view...
    try:
      (size, offset) = max(objects)
      #2 ... but not at the zone...
      if (size < SIZE_HOME_RANGE):
        #3 ... and it's to the right.
        if (offset > CAMERA_THRESHOLD):
          if (offset > 4*CAMERA_THRESHOLD):
            message = '(Zone in View -> 4 Right.'
            self.action = 'T'
          elif (offset > 3*CAMERA_THRESHOLD):
            message = 'Zone in View -> 3 Right.'
            self.action = 'S'
          elif (offset > 2*CAMERA_THRESHOLD):
            message = 'Zone in View -> 2 Right.'
            self.action = 'R'
          else:
            message = 'Zone in View -> 1 Right'
            self.action = 'Q'
        #3 ...and it's to the left.
        elif (offset < -CAMERA_THRESHOLD):
          if (offset < -4*CAMERA_THRESHOLD):
            message = 'Zone in View -> 4 Left'
            self.action = 'N'
          elif (offset < -3*CAMERA_THRESHOLD):
            message = 'Zone in View -> 3 Left'
            self.action = 'M'
          elif (offset < -2*CAMERA_THRESHOLD):
            message = 'Zone in View -> 2 Left'
            self.action = 'L'
          else:
            message = 'Zone in View -> 1 Left'
            self.action = 'K'
        #3 ...and it is straight ahead.
        else:
          #4 ...but it is blocked ahead.
          if (self.error_number == ERROR_CLOSE):
            message = 'Object in Way -> Avoiding Right.'
            self.action = 'I'
          #4 ...but it is blocked after turning right.
          elif (self.error_number == ERROR_AVOID_RIGHT):
            message = 'Failed to Turn Right -> Avoiding Left.'
            self.action = 'J'
          #4 ...but it is blocked after avoiding right.
          elif (self.error_number == ERROR_AVOID_RIGHT):
            message = 'Failed to Avoid Right -> Avoiding Left.'
            self.action = 'J'
          #4 ...but it is blocked to the left.
          elif (self.error_number == ERROR_BLOCKED_LEFT):
            message = 'Failed to Turn Left -> Avoiding Right.'
            self.action = 'I'
          #4 ...but it is blocked after avoiding right.
          elif (self.error_number == ERROR_BLOCKED_RIGHT):
            message = 'Failed to Avoid Left -> Avoiding Right.'
            self.action = 'I'
          #4 ...and isn't blocked.
          else:
            message = 'Zone Too Small -> Moving Forward.'
            self.action = 'F'
      #2 ..and have reached the zone.
      else:  
        message = 'At Zone -> Dumping Bales.'
        self.action = 'D'
        self.dumped = True
    #1 The Zone is not in view...
    except ValueError:
        message = 'Cannot See Zone -> Searching Left For Zone.'
        self.action = 'N'
    print('[Finding Zone]...' + message)
  
  ## Return Home
  def return_home(self):
    objects = self.detect_yellow()
    #1 Home is in view...
    try:
      (size, offset) = max(objects)
      #2 ... but not at the home...
      if (size < SIZE_ZONE_RANGE):
        #3 ... and it's to the right.
        if (offset > CAMERA_THRESHOLD):
          if (offset > 4*CAMERA_THRESHOLD):
            message = 'Home in View -> 4 Right.'
            self.action = 'T'
          elif (offset > 3*CAMERA_THRESHOLD):
            message = 'Home in View -> 3 Right.'
            self.action = 'S'
          elif (offset > 2*CAMERA_THRESHOLD):
            message = 'Home in View -> 2 Right.'
            self.action = 'R'
          else:
            message = 'Home in View -> 1 Right'
            self.action = 'Q'
        #3 ...and it's to the left.
        elif (offset < -CAMERA_THRESHOLD):
          if (offset < -4*CAMERA_THRESHOLD):
            message = 'Home in View -> 4 Left'
            self.action = 'N'
          elif (offset < -3*CAMERA_THRESHOLD):
            message = 'Home in View -> 3 Left'
            self.action = 'M'
          elif (offset < -2*CAMERA_THRESHOLD):
            message = 'Home in View -> 2 Left'
            self.action = 'L'
          else:
            message = 'Home in View -> 1 Left'
            self.action = 'K'
        #3 ...and it is straight ahead.
        else:
          #4 ...but it is blocked.
          if ((self.error_number == ERROR_BLOCKED_LEFT) or (self.error_number == ERROR_CLOSE) or (self.error_number == ERROR_AVOID_LEFT)):
            message = 'Object in Way Failed to go Left -> Avoiding Right.'
            self.action = 'I'
          elif ((self.error_number == ERROR_AVOID_RIGHT) or (self.error_number == ERROR_BLOCKED_RIGHT)):
            message = 'Object in Way, Failed to go Right -> Avoiding Left.'
            self.action = 'J'
          else:
            message = 'Home Too Small -> Moving Forward.'
            self.action = 'F'
      #2 ..and have reached the zone...
      else:  
        message = 'At Home -> Parking.'
        self.action = 'F'
        self.returned = True
    #1 Home is not in view...
    except ValueError:
        message = 'Cannot See Home -> Searching Left For Home.'
        self.action = 'N'
    print('[Finding Home]...' + message)

  ## Execute action with arduino.
  def control_arduino(self):
    ### Send
    try:
      self.arduino.write(self.action)
      message = self.action
    except Exception:
      message = 'Failure.'
    print('[Sending ACTION]...' + message)
    ### Receive
    try:
      self.error_number = int(self.arduino.readline())
      message = str(self.error_number)
    except ValueError:
      self.error_number = ERROR_PARSE
      message = "ValueError: Failed to parse signal, retrying..."
    except OSError:
      self.error_number = ERROR_CONNECTION
      message = "OSError: Connection lost, retrying..."
    except SerialException:
      self.error_number = ERROR_CONNECTION
      message = "Serial Exception: Connection lost, retrying..."
    except SyntaxError:
      self.error_number = ERROR_PARSE
      message = "Syntax Error: Failed to parse signal, retrying..."
    except KeyError:
      self.error_number = ERROR_PARSE
      message = 'KeyError: Failed to parse signal, retrying...'
    print('[Receiving ERROR from Controller]...' + message)

  ## Handle Errors from Arduino
  def handle_error(self):
    if (self.error_number == ERROR_NONE):
      self.error = 'NONE'
    elif (self.error_number == ERROR_CLOSE):
      self.error = 'TOO CLOSE'
    elif (self.error_number == ERROR_LOAD):
      self.error = 'LOAD FAILED'
      self.gathered -= 1
    elif (self.error_number == ERROR_PARSE):
      self.error = 'PARSE FAILED'
    elif (self.error_number == ERROR_ACTION):
      self.error = 'BAD ACTION'
    elif (self.error_number == ERROR_BLOCKED_RIGHT):
      self.error = 'BLOCKED AFTER RIGHT TURN'
    elif (self.error_number == ERROR_BLOCKED_LEFT):
      self.error = 'BLOCKED AFTER LEFT TURN'
    elif (self.error_number == ERROR_ORBIT_RIGHT):
      self.error = 'ORBIT RIGHT FAILED'
    elif (self.error_number == ERROR_ORBIT_LEFT):
      self.error = 'ORBIT LEFT FAILED'
    elif (self.error_number == ERROR_AVOID_RIGHT):
      self.error = 'AVOID RIGHT FAILED'
    elif (self.error_number == ERROR_AVOID_LEFT):
      self.error = 'AVOID LEFT FAILED'
    else:
      self.error = 'UNKNOWN ERROR'
    print('[Handling ERRORS]...' + self.error)

  ## Send RESPONSE to Server
  def send_response(self):   
    try:
      json_response = json.dumps({'ACTION':self.action, 'ERROR':self.error, 'GATHERED':str(self.gathered), 'GOAL':self.goal})
      self.connection.send(json_response)
      message = str(json_response)
    except Exception:
      message = "Failure."
    print('[Sending RESPONSE]...' + message)

  ## Disconnect from Server.
  def disconnect(self):
    try:
      self.socket_in.close()
      self.socket_out.close()
      self.connection.close()
      self.reset_worker()
      message = 'Success.'
    except Exception:
      message = 'Failure.'
      pass
    print('[Disconnecting]...' + message)

  ## Reset Worker
  def reset_worker(self):
    try:
      self.connected_out = False
      self.connected_in = False
      self.command = None
      self.response = None
      self.action = None
      self.previous_action = None
      self.error = None
      self.error_number = None
      self.gathered = 0
      self.dumped = False
      self.returned = False
      self.x = 0
      self.y = 0
      self.orientation = 0
      message = 'Success.'
    except Exception:
      message = 'Failure.'
    print('[Resetting Worker]...' + message)
示例#48
0
frame_count = int(np.ceil(video.get(7))/3)
internal = 0
# while internal < 2507:
#     condition = video.grab()    
#     print "Skipping frame %d" % internal
#     count += 1
#     internal += 1
print "At frame: ", video.get(1)
print "Total frames: ", frame_count, "vs. ", video.get(7)
# sleep(10)
width,height = frame_count, int(np.ceil(frame_count/(16.0/9)))
# barcode = Image.new('RGB', (width, height), (255,255,255))
# draw = ImageDraw.Draw(barcode)
# f = open("barcode.jpg", 'w')
f = open("color_codes.txt", 'a')
condition,frame = video.read()
while condition:
    print "Processing frame %d" % count
    # color = findColor(frame)
    if count % 3 == 0:
        color = findColor(frame)
        f.write(str(color) + "\n")
        # draw.line([(count/3,0), (count/3,height)], fill=tuple(color), width=1)
    count += 1
    condition,frame = video.read()
    # if count == 2:
        # break
    print "%0.3f % complete." % (video.get(1)/video.get(7))
    # barcode.save(f)
    # print "Saved."
# plt.clf()
示例#49
0
class Camera_Capture(object):
    """docstring for uvcc_camera"""

    def __init__(self, cam, size=(640, 480), fps=30):
        self.src_id = cam.src_id
        self.uId = cam.uId
        self.name = cam.name
        self.controls = Controls(self.uId)

        try:
            self.controls["UVCC_REQ_FOCUS_AUTO"].set_val(0)
        except KeyError:
            pass

        self.capture = VideoCapture(self.src_id)
        self.set_size(size)

    def re_init(self, cam, size=(640, 480), fps=30):
        self.src_id = cam.src_id
        self.uId = cam.uId
        self.name = cam.name
        self.controls = Controls(self.uId)

        try:
            self.controls["UVCC_REQ_FOCUS_AUTO"].set_val(0)
        except KeyError:
            pass

        self.capture = VideoCapture(self.src_id)
        self.set_size(size)

        # recreate the bar with new values
        bar_pos = self.bar._get_position()
        self.bar.destroy()
        self.create_atb_bar(bar_pos)

    def re_init_cam_by_src_id(self, src_id):
        try:
            cam = Camera_List()[src_id]
        except KeyError:
            logger.warning("could not reinit capture, src_id not valid anymore")
            return
        self.re_init(cam, self.get_size())

    def get_frame(self):
        s, img = self.capture.read()
        timestamp = time()
        return Frame(timestamp, img)

    def set_size(self, size):
        width, height = size
        self.capture.set(3, width)
        self.capture.set(4, height)

    def get_size(self):
        return self.capture.get(3), self.capture.get(4)

    def set_fps(self, fps):
        self.capture.set(5, fps)

    def get_fps(self):
        return self.capture.get(5)

    def create_atb_bar(self, pos):
        # add uvc camera controls to a separate ATB bar
        size = (200, 200)

        self.bar = atb.Bar(
            name="Camera_Controls",
            label=self.name,
            help="UVC Camera Controls",
            color=(50, 50, 50),
            alpha=100,
            text="light",
            position=pos,
            refresh=2.0,
            size=size,
        )

        sorted_controls = [c for c in self.controls.itervalues()]
        sorted_controls.sort(key=lambda c: c.order)

        cameras_enum = atb.enum("Capture", dict([(c.name, c.src_id) for c in Camera_List()]))

        self.bar.add_var("Capture", vtype=cameras_enum, getter=lambda: self.src_id, setter=self.re_init_cam_by_src_id)
        for control in sorted_controls:
            name = control.atb_name
            if control.type == "bool":
                self.bar.add_var(name, vtype=atb.TW_TYPE_BOOL8, getter=control.get_val, setter=control.set_val)
            elif control.type == "int":
                self.bar.add_var(name, vtype=atb.TW_TYPE_INT32, getter=control.get_val, setter=control.set_val)
                self.bar.define(definition="min=" + str(control.min), varname=name)
                self.bar.define(definition="max=" + str(control.max), varname=name)
                self.bar.define(definition="step=" + str(control.step), varname=name)
            elif control.type == "menu":
                if control.menu is None:
                    vtype = None
                else:
                    vtype = atb.enum(name, control.menu)
                self.bar.add_var(name, vtype=vtype, getter=control.get_val, setter=control.set_val)
                if control.menu is None:
                    self.bar.define(definition="min=" + str(control.min), varname=name)
                    self.bar.define(definition="max=" + str(control.max), varname=name)
                    self.bar.define(definition="step=" + str(control.step), varname=name)
            else:
                pass
            if control.flags == "inactive":
                pass
                # self.bar.define(definition='readonly=1',varname=control.name)

        self.bar.add_button("refresh", self.controls.update_from_device)
        self.bar.add_button("load defaults", self.controls.load_defaults)

        return size

    def kill_atb_bar(self):
        pass

    def close(self):
        self.control = None
        logger.info("Capture released")
        pass
示例#50
0
from moustache import _putmoustache_
from itertools import count

backgroundTreshold = 35

beach = imread('manzara640.png', -1)

video_capture = VideoCapture(0)

if not video_capture.isOpened():
    exit('The Camera is not opened')

counter = count(1)

for i in range(0, 50):
    ret, background = video_capture.read()
    imshow("Video", background)
    waitKey(10)
    print "!!! Step out of the frame !!!"
    print "Background will be detected in %d seconds" % (7 - i)
ret, background = video_capture.read()
background = pyrDown(background)
#background = pyrDown(background)

while True:
    print "Iteration %d" % counter.next()

    ret, temp = video_capture.read()
    #temp = pyrDown(temp)
    frame2 = _putmoustache_(temp)
    frame3 = _putglass_(frame2)
示例#51
0
# HAAR cascades for face and eyes
faceCasc = CascadeClassifier(os.path.join(basedir,'cascadeFiles/haarcascade_frontalface_alt2.xml'))
eyeCasc = CascadeClassifier(os.path.join(basedir,'cascadeFiles/haarcascade_eye_tree_eyeglasses.xml'))

gender1 = 0
gender2 = 0
count = 0

# Important parameters for alignAtEyes()    
sz = 210
offSetH = 0.25
offSetV = 0.25

keyPressed = -1
while(keyPressed != 27):
    unused_retval, img0 = capture.read()
    
    img1 = cvtColor(img0, COLOR_BGR2GRAY)
    
    faceImgs, faceRegions = findFace(img1,faceCasc)
    
    if faceImgs is None:
        pass
    else:
        for indx, img in enumerate(faceImgs):
            # Register the image with alignment at eyes
            img = alignAtEyes(eyeCasc, img, name, sz, offSetH, offSetV)
            
            # Get prediction and draw on img0
            if img is None:
                pass
示例#52
0
from cv2 import VideoCapture, imwrite
from datetime import datetime

cam = VideoCapture(0) # 0 -> index of camera
s, img = cam.read()
if s:
	tm = datetime.now()
	filename = "date-"+str(tm.year)+"-"+ str(tm.month)+"-"+ str(tm.day)+"_time-"+ str(tm.hour)+"-"+ str(tm.minute)+"-"+ str(tm.second)+"-"+str(tm.microsecond)+".jpg"
	imwrite(filename,img)