def detect_face_cv2_webcam():
    cap = VideoCapture(0)
    classifier = CascadeClassifier('haarcascade_frontalface_default.xml')

    # Check if the webcam is opened correctly
    if not cap.isOpened():
        raise IOError("Cannot open webcam")

    while True:
        ret, frame = cap.read()
        frame = flip(frame, 1)
        gray = cvtColor(frame, COLOR_BGR2GRAY)
        faces = classifier.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(30, 30),
            # flags=cv2.cv.CV_HAAR_SCALE_IMAGE
        )

        for (x, y, w, h) in faces:
            rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)

        imshow("Video", frame)
        if waitKey(1) == ord('q'):
            break

    cap.release()
    destroyAllWindows()
    def get_screenshot(self, carNo, show_view=True):
        """
        截图一帧
        :param carNo:
        :param show_view:
        :return:
        """
        try:
            # 打开rtsp
            cap = VideoCapture(self.URL)
            ret, frame = cap.read()
            if not ret:
                consoleLog(self.logPre, "未捕获到帧")

            imencode('.jpg', frame)[1].tofile(self.img_path.format(carNo=carNo, now=fmt_date(fmt=FMT_DATETIME)))
            if show_view:
                # 预览窗口
                namedWindow('view', WINDOW_NORMAL | WINDOW_KEEPRATIO)
                imshow("view", frame)
                waitKey(5 * 1000)
        except Exception as e:
            consoleLog(self.logPre, "保存截图异常:", repr(e))
        finally:
            if cap:
                cap.release()
            destroyAllWindows()
Beispiel #3
0
def main():
    """run the main loop"""
    # %%
    pygame.mixer.init()
    cam = VideoCapture(CAMERA_IDX)  # get camera handle

    detector = SlouchDetector(SLOUCH_THRESHOLD, do_store_imgs=STORE_IMGS)

    try:
        while True:
            img = _capture_img(cam)
            # an (color) image is just a Width x Height x NumChannels 'matrix',
            # really a rank-3 tensor
            # channels are Blue, Green, Red (CV2 ideasyncratic ordering...)
            # print( type(img),  img.shape )   # =>  <ndarray>  (480, 640, 3)
            # Convert into grayscale
            # an grayscale image is just a Width x Height x NumChanels matrix,
            # really a rank-2 tensor
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            # print(type(gray), gray.shape)   # =>  <ndarray>  (480, 640)
            detector.detect(gray)
            time.sleep(1)

    except Exception as exc:
        cam.release()  # close the webcam
        raise exc
Beispiel #4
0
def videoconvert(inp):
    capture = VideoCapture(inp)
    inp_ext = inp.split(".")
    fpsin = capture.get(CAP_PROP_FPS)
    count = 0
    success = 1
    while success:
        success, image = capture.read()
        if (success == False and image == None):
            pass
        else:
            imwrite("zzimg%d.jpg" % count, image)
            count += 1
    outfile = inp_ext[0] + '_output.mp4'
    fourcc = VideoWriter_fourcc(*'DIVX')
    fpsout = fpsin
    img = imread("zzimg0.jpg")
    height, width, layers = img.shape
    size = (width, height)
    out = VideoWriter(outfile, fourcc, fpsout, size, 0)
    for i in range(count):
        img = imread("zzimg%d.jpg" % i, 0)
        out.write(img)
    print(
        "Video Converted to Grayscale, Please check the folder for the output file: ",
        outfile)
    out.release()
    capture.release()

    return outfile
Beispiel #5
0
def start():
    '''
    
    '''
    #Load splash screen
    splScr = splash()
    found = []
    #find connected cameras        
    for num in range(10):
        cam = VideoCapture(num)
        cam.open
        #show progress bar 'movement' while the main program find cameras
        splScr.update()
        if not cam.read()[0]:
            del(cam)
        else:
            cam.release()
            found.append(num)
        while gtk.events_pending():
            gtk.main_iteration()
    #destroy splash screen when all cameras are finded
    splScr.destroy()
    print 'connected cameras:', len(found)
    #run main program
    main_gui(found)
    gtk.main()
    return
Beispiel #6
0
    def cv_write_video_stream(videosrc: cv2.VideoCapture, width: int,
                              height: int, videoname: str):
        """Writes cv2.VideoCapture feed to a file.
                Args:
                    videosrc: A VideoCapture object.
                    width: Video width.
                    height: Video height.
                    videoname: Name of video file to be output.
                Returns:
                    No values, runs stream to file until key is pressed.
                """

        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('%s.avi' % videoname, fourcc, 30,
                              (width, height))

        while True:
            ret, frame = videosrc.read()
            if ret is True:
                out.write(frame)

            else:
                break

        videosrc.release()
        out.release()
class CameraStream:
    def __init__(self,camera=0,video_file=None,video=False):
        from cv2 import VideoCapture

        if(video):
            # Use a video file as input.
            self.stream = VideoCapture(video_file)
        else:
            # Use a camera as input.
            self.stream = VideoCapture(camera)

        # Check if we were successful in opening stream.
        if(self.stream.isOpened() == False):
            name = video_file if video else camera
            raise IOError("Error opening video stream or file '{}'".format(name))

    def __del__(self):
        """ Destructor to close everything. """
        # When everything done, release the video capture object    
        self.stream.release()
		 

    def get_frame(self):
        """ Retrieve frame. """
        # Read until video is completed
        while(self.stream.isOpened()):
            # Capture frame-by-frame
            ret,frame = self.stream.read()
            if(ret == True):
                yield frame
Beispiel #8
0
class Camera(object):
    def get_settings(self):
        if not hasattr(self, '_video_capture'):
            raise Exception("Start video capture before getting settings")
        settings = []
        for prop in global_camera_properties:
            prop_value = self._video_capture.get(prop['value'])
            if prop_value >= 0:
                settings.append({'name': prop['name'], 'value': prop_value})
        return settings

    def set_setting(self, setting, value):
        if not hasattr(self, '_video_capture'):
            raise Exception("Start video capture before setting a setting")
        setting_id = filter(lambda x: x['name'] == setting,
                            global_camera_properties)
        if len(setting_id) == 1:
            setting_id = setting_id[0]['value']
        else:
            raise Exception("Setting {} not available".format(setting))
        self._video_capture.set(setting_id, value)

    def read(self):
        (retVal, image) = self._video_capture.read()
        return image

    def start(self):
        self._video_capture = VideoCapture(0)
        self.shape = self.read().shape

    def stop(self):
        self._video_capture.release()
Beispiel #9
0
def open_camera(cam='/dev/psEye', w=320, h=240, fps=100):
    """
    Opens camera and configures height, width, and fps settings.

    INPUTS
        (optional)
        cam -- str/int -- camera descriptor for VideoCapture; '/dev/psEye'
        w   -- int     -- width (px) to set camera frame; 320
        h   -- int     -- height (px) to set camera frame; 240
        fps -- int     -- frames per second to set camera; 100

    OUTPUTS
        cv2.VideoCapture object

    EXCEPTIONS
        Raises RuntimeError when unable to open camera
    """
    if type(cam) not in (str, int):
        raise TypeError('\'cam\' must be int or str')
    if type(w) != int:
        raise TypeError('\'w\' must be int')
    if type(h) != int:
        raise TypeError('\'h\' must be int')
    if type(fps) != int:
        raise TypeError('\'fps\' must be int')
    cap = VideoCapture(cam)
    if not cap.isOpened():
        cap.release()
        raise RuntimeError('failed to open camera \'%s\'' % cam)
    cap.set(CAP_PROP_FRAME_HEIGHT, h)
    cap.set(CAP_PROP_FRAME_WIDTH, w)
    cap.set(CAP_PROP_FPS, fps)
    return cap
Beispiel #10
0
def read_images(photos_path, video_path):
    cap = VideoCapture(
        video_path)  # Reproducir video para realizar la captura por frames

    fps = cap.get(CAP_PROP_FPS)  # Optener los fps del video

    print(f'fps: {fps}')

    mkdir(photos_path)

    currentFrame = 0

    while True:
        _, frame = cap.read()  # Obtener 1 frame
        try:
            len(frame)
        except:
            break
        # Salvar la catura
        name = prt(photos_path, currentFrame)
        imwrite(name, frame)

        currentFrame += 1  # Indice de la imagen

    # Cerrar el video
    cap.release()
    destroyAllWindows()
Beispiel #11
0
def Extract_Frames(source, fps=1, dest=None):
    '''Extracts frames from a given source animation, with optional fps and destination'''
    
    from Webscraping import USER
    from pathlib import Path
    from cv2 import VideoCapture, imencode, CAP_PROP_POS_FRAMES
    
    path = Path(source)
    
    if dest is None:
        dest = USER / 'Pictures' / 'Screenshots' / path.stem
        
    if dest.exists():
        for file in dest.iterdir():
            file.unlink()
        
    dest.mkdir(exist_ok=1)
        
    vidcap = VideoCapture(source)
    success, frame = vidcap.read()

    while success:
        
        if ((vidcap.get(CAP_PROP_POS_FRAMES) % fps) - 1) in (-1, 0):
            
            image = dest / f'{vidcap.get(CAP_PROP_POS_FRAMES)}.jpg'
            image.write_bytes(imencode('.jpg', frame)[-1])
        
        success, frame = vidcap.read()
        
    else: vidcap.release()
Beispiel #12
0
def getMP4Length(x):
    cap = VideoCapture(x)
    fps = cap.get(CAP_PROP_FPS)  # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
    frame_count = int(cap.get(CAP_PROP_FRAME_COUNT))
    cap.release()
    duration = frame_count / fps
    return duration
Beispiel #13
0
def make_video(photos_path, video_path, video_name):
    cap = VideoCapture(v_path)
    fps = cap.get(CAP_PROP_FPS)
    cap.release()

    mkdir(video_path)

    images_cnt = jpgcount(photos_path)

    print(f'{images_cnt} imagenes')

    img = []

    # Cargar las imágenes
    for i in range(images_cnt):
        img.append(imread(photos_path + '/frame' + str(i) + '.jpg'))

    height, width, _ = img[1].shape

    # Inicializar el video
    video = VideoWriter(video_path + '/' + video_name + '.mp4',
                        VideoWriter_fourcc(*'MP4V'), fps, (width, height))

    print('Codificando video')

    # Insertar cada imagen
    for j in range(images_cnt):
        video.write(img[j])

    # Cerrar proceso
    destroyAllWindows()
    video.release()

    print('Video Codificado')
Beispiel #14
0
    def getEpisodeInfo(self, names, season):

        for number, name in enumerate(names, 1):

            if number in self.episodeNumbers:

                input_file = self.path + 'episodio-' + str(number) + '.mp4'

                videoInfo = VideoCapture(input_file)
                fps = videoInfo.get(CAP_PROP_FPS)
                frameCount = videoInfo.get(CAP_PROP_FRAME_COUNT)

                duration = self.getCorretDuration(
                    int(round(frameCount / fps, 0)))
                quality = videoInfo.get(4)

                episodeData = {
                    'temporada': season + 1,
                    'episodio': number,
                    'nome': name,
                    'duracao': duration,
                    'thumb': 'thumb-' + str(number) + '.png',
                    'qualidade': str(quality)[:-2] + 'p'
                }
                self.episodeInfoList.append(episodeData)

                videoInfo.release()
class Camera_from_url(object):
    """Camera_from_url captures video from a <url>.mjpg source
    using open cv2 "VideoCapture" and encodes it as jpeg using imencode

    :param object: <URL>.mjpg
    :type object: mjpg
    :return: jpeg in raw bytes
    :rtype: bytes
    """
    def __init__(self):
        nothing_to_do = "yes"

    def init_teststand(self, url):
        if url is None:
            self.video = None
        elif url is not None:
            self.video = VideoCapture(url)

    def __del__(self):
        if self.video is None:
            nothing_to_do = "yes"
        else:
            self.video.release()

    def get_frame(self):
        _, image = self.video.read()
        image = resize(image, (1920, 1080))
        _, jpeg = imencode('.jpg', image)
        return jpeg.tobytes()
Beispiel #16
0
def WebCam_OnOff(device_num: int, cam: cv2.VideoCapture = None):
    """
    WebCameraを読み込む関数

    Args:
        device_num(int): カメラデバイスを番号で指定
            0:PC内臓カメラ
            1:外部カメラ
        cam(cv2.VideoCapture optional): 接続しているカメラ情報

    Returns:
        response(int): 動作終了を表すフラグ
            0: connect
            1: release
            2: NotFound
        capture(cv2.VideoCapture): 接続したデバイス情報を返す
            cv2.VideoCapture: connect
            None: release or NotFound
    """
    if cam is None:  # カメラが接続されていないとき
        cam = cv2.VideoCapture(device_num)
        # カメラに接続できなかった場合
        if not cam.isOpened():
            return 2, None
        # 接続できた場合
        else:
            return 0, cam

    else:  # カメラに接続されていたとき
        cam.release()
        return 1, None
Beispiel #17
0
class Camera(object):

    def get_settings(self):
        if not hasattr(self, '_video_capture'):
            raise Exception("Start video capture before getting settings")
        settings = []
        for prop in global_camera_properties:
            prop_value = self._video_capture.get(prop['value'])
            if prop_value >= 0:
                settings.append({'name': prop['name'], 'value': prop_value})
        return settings

    def set_setting(self, setting, value):
        if not hasattr(self, '_video_capture'):
            raise Exception("Start video capture before setting a setting")
        setting_id = filter(lambda x: x['name'] == setting, global_camera_properties)
        if len(setting_id) == 1:
            setting_id = setting_id[0]['value']
        else:
            raise Exception("Setting {} not available".format(setting))
        self._video_capture.set(setting_id, value)

    def read(self):
        (retVal, image) = self._video_capture.read()
        return image

    def start(self):
        self._video_capture = VideoCapture(0)
        self.shape = self.read().shape

    def stop(self):
        self._video_capture.release()
Beispiel #18
0
class Processer:
    def __init__(self, input_file, output_file, FPS=30, max_count=-1):
        self.vidcap = VideoCapture(input_file)
        success, self.image = self.vidcap.read()
        self.width = int(self.vidcap.get(4))
        self.height = int(self.vidcap.get(3))
        fourcc = VideoWriter_fourcc(*'XVID')
        self.videoWriter = VideoWriter(output_file, fourcc, float(FPS),
                                       (self.height, self.width))
        self.max_count = max_count

    def run(self, proc_func, end_proc=None):
        count = 0
        success = True
        while success and (count < self.max_count or self.max_count == -1):
            result = proc_func(self.image, count, self.width, self.height)
            self.videoWriter.write(result)
            success, self.image = self.vidcap.read()
            print('Count:', count)
            count += 1

        if end_proc is not None:
            end_proc(self.videoWriter)
        self.videoWriter.release()
        self.vidcap.release()
        print('Total count:', count)
Beispiel #19
0
def stream():
    prepare_stream()
    global net, cap
    cap = VideoCapture(2)
    time.sleep(1)
    while(True):

        # Load frame from he camera
        ret, frame = cap.read()

        # Image pre-processing
        frame = mx.nd.array(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).astype('uint8')

        # Apply GluonCV pre-processing
        #rgb_nd, scaled_frame = gcv.data.transforms.presets.yolo.transform_test(frame, short=608, max_size=1024)
        rgb_nd, scaled_frame = gcv.data.transforms.presets.ssd.transform_test(frame, short=512, max_size=1024)

        # Run inference on the frame
        class_IDs, scores, bounding_boxes = net(rgb_nd.as_in_context(mx.gpu(0)))
        scale = 1.0 * frame.shape[0] / scaled_frame.shape[0]


        img = gcv.utils.viz.cv_plot_bbox(frame.asnumpy(), bounding_boxes[0], scores[0], class_IDs[0],
                                        class_names=klasses, scale=scale)
        gcv.utils.viz.cv_plot_image(img)

        # Display frame
        #cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Beispiel #20
0
def main(filename, win_size, win_name, screen_fit=False, speed=1):
    '''
    opens window and responds to user input
    closes when user presses "q"
    '''
    cap = VideoCapture(filename)
    namedWindow(win_name, 1)

    follow = None

    while cap.isOpened():
        if follow == 'q':
            break
        if follow in binds:
            follow = play(binds[follow][0],
                          binds[follow][1],
                          cap,
                          win_size=win_size,
                          win_name=win_name,
                          speed=speed,
                          screen_fit=screen_fit)
        else:
            follow = wait(cap,
                          win_size=win_size,
                          win_name=win_name,
                          screen_fit=screen_fit)

    cap.release()
    destroyAllWindows()
Beispiel #21
0
def main(args):
  apg1.add_argument("--subtitle", type=FileType("r"), help="subtitle file for -text")
  apg1.add_argument("--subtitle-placeholder", type=str, default="#", help="placeholder for subtitle")
  apg1.add_argument("--mon-background", type=str, default=None, help="replacement back-color for mon (default -key-color)")
  readSrt = lambda it: srt.parse(it.read())

  cfg = app.parse_args(args)
  cfg.font = ImageFont.truetype(cfg.font, cfg.font_size) if cfg.font != None else ImageFont.load_default()
  cfg.key_color = colorFromHtml(cfg.key_color)

  print(f"{cfg.font_size}px, {cfg.key_color} ±{cfg.key_thres} {cfg.spacing}")
  cfg.calc_draw_color = lambda c: None if isColorNearTo(cfg.key_color, cfg.key_thres, c) else c
  for path in cfg.images:
    (name, ext) = fileExtNameSplit(path)
    if ext in "mp4 webm mkv flv".split(" "):
      cap = VideoCapture(path)
      (fps, count, width, height) = cv2VideoInfo(cap)
      print(f"{fps}fps*{count} {width}x{height}")

      mon = Montage(cfg, (width, height) )
      playCvMontage(cap, mon, filename=f"{name}_mon.avi", subtitle=let(readSrt, cfg.subtitle), placeholder=cfg.subtitle_placeholder)
      cap.release()
    else:
      image = Image.open(path)
      mon = Montage(cfg, image.size)
      mon.runOn(image).save(f"{name}_mon.png")
Beispiel #22
0
 def camera(camflag, s4):
     global vloop
     #global camflag
     try:
         caps = VideoCapture(camflag)
         _, fram = caps.read()
     except:
         s4.sendall(str.encode(b'cambusy'))
         print('cam busy')
         vloop = False
         return 0
     caps.release()
     width, height, _ = fram.shape
     caps = VideoCapture(camflag)
     s4.sendall(str.encode(str(width) + ':' + str(height)))
     while vloop:
         ret, frame = caps.read()
         a = gzip.compress(pickle.dumps(frame), 9)
         s4.sendall(str.encode(str(len(a))))
         while a:
             chk = a[:3072]
             s4.sendall(chk)
             a = a[3072:]
         time.sleep(0.4)
     caps.release()
     s4.close()
     del s4
Beispiel #23
0
def draw_speed(video_path: str, speed_path: str, output_video: str) -> None:
    reader = VideoCapture(video_path)
    writer = VideoWriter(
        output_video,
        VideoWriter_fourcc(*"mp4v"),
        20,
        (640, 480),
    )
    data = loadtxt(speed_path, delimiter="\n", dtype="float32")

    frame_id = 0
    while reader.isOpened():
        ret, frame = reader.read()
        if not ret:
            break

        putText(
            frame,
            f"{data[frame_id]:0.3f}",
            (250, 420),
            FONT_HERSHEY_SIMPLEX,
            0.7,
            (255, 255, 255),
            2,
        )
        writer.write(frame)

        frame_id += 1
        if frame_id == data.shape[0]:
            break

    reader.release()
    writer.release()
def web_cam(face_detector=default_face_detector,
            emotion_classifier=default_emotion_classifier):

    video_capture = VideoCapture(0)

    while True:
        on_failed_to_load_camera(video_capture)

        _, frame = video_capture.read()

        face_bounding_boxes = face_detector.detect(image=frame)

        draw_bounding_boxes(image=frame, bouding_boxes=face_bounding_boxes)

        faces = extract_objects(
            image=frame,
            bounding_boxes=face_bounding_boxes,
        )

        if len(faces) != 0:
            results = emotion_classifier.predict(images=faces, verbose=1)
            for r, bbox in zip(results, face_bounding_boxes):
                cv2.putText(frame,
                            str(r[0][0]), (bbox[0], bbox[-1]),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            .5, (255, 255, 255),
                            lineType=cv2.LINE_AA)

        if waitKey(1) & 0xFF == ord('q'):
            break

        imshow('Web Camera Emotion Classification', frame)

    video_capture.release()
    destroyAllWindows()
Beispiel #25
0
def captureImage(image_dir, number_of_images=3):
    img_files_count = len([
        name for name in os.listdir(image_dir)
        if os.path.isfile(os.path.join(image_dir, name))
    ])

    image_files = []
    for i in range(0, number_of_images):
        camera = VideoCapture(0)
        if not camera.isOpened():
            with open(LOGS_DIR + "error.log", "a") as f:
                f.write("[Error " +
                        str(datetime.now().strftime("%b %d, %Y %H:%M:%S")) +
                        "] Could not open video device\n")
        return_value, image = camera.read()
        camera.release()
        if not return_value:
            continue
        img_file = datetime.now().strftime(image_dir + "/image_%d%b%y-" +
                                           str(img_files_count) + ".jpg")
        imwrite(img_file, image)
        image_files.append(img_file)
        img_files_count += 1
        sleep(3)
    return image_files
Beispiel #26
0
def main():
    if len(sys.argv) > 1:
        casc_path = sys.argv[1]
    else:
        casc_path = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml"  # Path on fedora 25

    video_capture = VideoCapture(0)
    face_detector = FaceDetector(casc_path, min_face_dim=(200, 200))

    w, h = 0, 0
    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        gray = cvtColor(frame, COLOR_RGB2GRAY)

        faces = face_detector.detect(gray)

        # Draw a rectangle around the faces
        for (x, y, w, h) in faces:
            rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            print(w, h)
            if w > 300 and h > 300 and len(
                    faces) == 1 and GetCoffeeThread.nb_instance == 0:
                imwrite("/tmp/face.png", frame[y:y + h, x:x + w])
                GetCoffeeThread(GATEWAY_ROUTE, "/tmp/face.png").start()

        # Display the resulting frame
        imshow('Video', frame)
        if waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
def extractFrames(frames, src, dst):
    reader = VideoCapture(src)

    frame_num = 1

    while reader.isOpened():
        running, frame = reader.read()
        if not running:
            break
        if frame_num > frames:
            break

        #Extract face, with 25 pixels margin
        loc = face_recognition.face_locations(frame)

        if (len(loc) == 0):
            face = frame
        else:
            loc = sorted(
                loc,
                key=cmp_to_key(lambda x, y: (y[2] - y[0]) * (y[1] - y[3]) -
                               (x[2] - x[0]) * (x[1] - x[3])))
            face = frame[loc[0][0] - 25:loc[0][2] + 25,
                         loc[0][3] - 25:loc[0][1] + 25]

        face = cv2.resize(face, (299, 299))
        imwrite(join(dst, '%d.jpg' % frame_num), face)
        frame_num += 1

    reader.release()
Beispiel #28
0
def GetFrames(fileName, redFact=0.5, skipLength=1, debug=False):
    '''
	returns numpy array of frames
	'''
    cap = VideoCapture(fileName)

    frameList = []
    cnt = -1

    if debug:
        print("Started creating Frame List")

    while True:
        retval, image = cap.read()
        cnt = (cnt + 1) % skipLength
        if (cnt != 0):
            continue
        if not retval:
            break
        image = cv2.resize(image, None, fx=redFact, fy=redFact)
        image = image[:, :, ::-1]
        image = np.array(image, dtype=np.uint8)
        frameList.append(image)
    cap.release()

    if debug:
        print("Finished creating Frame List")
    frameList = np.array(frameList)
    print("该视频总共抽取了", frameList.shape[0], "个帧文件")
    return frameList
Beispiel #29
0
def main():
    # 画像サイズ
    img_shape = (224, 224, 3)
    # 日本語辞書の読み込み
    ja_labels = load_japanese_labels('src/imagenet_class_index_ja.json')
    # カメラの初期化
    cam = VideoCapture(0)
    # モデルの読み込み
    model = load_mobilenet(img_shape)

    width = cam.get(CAP_PROP_FRAME_WIDTH)
    height = cam.get(CAP_PROP_FRAME_HEIGHT)
    fps = cam.get(CAP_PROP_FPS)
    print("Image Capture FPS: %d" % (fps))
    print('Image Capture Size: width=%d height=%d' % (width, height))

    # Julisu へ接続
    sock = connect_julius()

    # Hello, Raspberry Pi!
    call_jtalk("おはようございます。")
    try:
        while True:
            data = receive_voice(sock, cam)
            if data != "":
                execute_command(data, cam, model, ja_labels)
    except KeyboardInterrupt:
        print("Bye.")

    cam.release()
def videoGetter(filename,
                outdir='out/prepro/',
                update_rate=50):
    DEST_IMG_SIZE = (270, 480)
    cap = VideoCapture(filename)
    frameCount = int(cap.get(CAP_PROP_FRAME_COUNT))
    frameWidth = int(cap.get(CAP_PROP_FRAME_WIDTH))
    frameHeight = int(cap.get(CAP_PROP_FRAME_HEIGHT))
    status = Progbar(frameCount, text=filename)
    # Note that for some reason, cv2 reads images hieght and then width
    buf = empty(
        (frameCount, DEST_IMG_SIZE[1], DEST_IMG_SIZE[0], 3), dtype('int8'))
    raw = empty((frameWidth, frameHeight, 3), dtype('uint8'))
    middle = empty((DEST_IMG_SIZE[1], DEST_IMG_SIZE[0], 3), dtype('uint8'))
    fc = 0
    ret = True
    while (fc < frameCount and ret):
        ret, temp = cap.read()
        middle[:, :, :] = resize(
            raw, None, fx=0.25, fy=0.25, interpolation=INTER_AREA)
        buf[fc] = (middle.astype('int8') - 255 // 2)
        if fc % update_rate == 0:
            status.update(fc)
        fc += 1
    cap.release()
    del cap, raw, middle
    filename = filename.rsplit('/', 1)[1]
    outpath = outdir + filename.rsplit('.', 1)[0] + '.npy'
    return save(outpath, buf)
Beispiel #31
0
    def video2imgs(self, video, size):

        from cv2 import VideoCapture
        from cv2 import cvtColor, resize
        from cv2 import COLOR_BGR2GRAY
        from cv2 import INTER_AREA

        img_list = []

        # 从指定文件创建一个VideoCapture对象
        cap = VideoCapture(video)

        # 如果cap对象已经初始化完成了,就返回true,换句话说这是一个 while true 循环
        while cap.isOpened():
            # cap.read() 返回值介绍:
            #   ret 表示是否读取到图像
            #   frame 为图像矩阵,类型为 numpy.ndarry.
            ret, frame = cap.read()
            if ret:
                # 转换成灰度图,也可不做这一步,转换成彩色字符视频。
                gray = cvtColor(frame, COLOR_BGR2GRAY)

                # resize 图片,保证图片转换成字符画后,能完整地在命令行中显示。
                img = resize(gray, size, interpolation=INTER_AREA)

                # 分帧保存转换结果
                img_list.append(img)
            else:
                break

        # 结束后释放空间
        cap.release()

        return img_list
Beispiel #32
0
def WebCam_OnOff(device_num: int,
                 cam: cv2.VideoCapture = None) -> cv2.VideoCapture:
    """
    WebCameraを読み込む関数

    Args:
        device_num (int):
            カメラデバイスを番号で指定
            0: PC内臓カメラ
            1: 外部カメラ
        cam (cv2.VideoCapture):
            接続しているカメラ情報

    Return:
        response (int):
            動作終了を表すフラグ
            0: カメラを開放した
            1: カメラに接続した
            -1: エラー
        cam (cv2.VideoCapture):
            接続したデバイス情報を返す
    """
    if cam is None:  # カメラが接続されていないとき
        cam = cv2.VideoCapture(device_num)
        # カメラに接続できなかった場合
        if not cam.isOpened():
            return -1, None
        # 接続できた場合
        else:
            return 1, cam

    else:  # カメラに接続されていたとき
        cam.release()
        return 0, None
 def load_background(self):
     try:
         bg = np.load(self.fh.make_path('background.npz',mode=self.fh.BL))
         background = bg['computations']
         background_image = bg['image']
     except:
         blmov = VideoCapture(self.fh.get_path(self.fh.BL,self.fh.MOV))
         valid, background, ts = self.get_frame(blmov, n=-1, blur=True)
         blmov.release()
         
         blmov = VideoCapture(self.fh.get_path(self.fh.BL,self.fh.MOV))
         valid, background_image, ts = self.get_frame(blmov, n=-1, blur=False)
         blmov.release()
         
         np.savez(self.fh.make_path('background.npz',mode=self.fh.BL), computations=background, image=background_image)
     self.background, self.background_image = background, background_image
Beispiel #34
0
def caputure():
    # open Camera
    cam = VideoCapture(0)
    if not cam.isOpened():
        LOGGER.debug('FAILED to open camera!!!')
        return None

    # capture image
    for i in range(100):
        status, img = cam.read()
    if not status:
        LOGGER.debug('FAiLED to capture image!!!')
        return None

    cam.release()
    return img
Beispiel #35
0
def video_loop(aframes_queue,person_queue):
    vc = VideoCapture(0)
    rval, frame = vc.read()
    people = {}
    colors = ((0,0,255),(255,255,0))
    while True:
        rval, frame = vc.read()
        if frame is None:
            c = waitKey(10)
            continue
        aframe = NP.asarray(frame[:,:])
        im = Image.fromarray(frame)
        draw = ImageDraw.Draw(im)
        
        while not person_queue.empty():
            name,rect,name_size = person_queue.get()
            people[name] = {'rect' : rect, 'name_size' : name_size, 
                            'time_found' : time.time()}

        name_counter = 0        
        for name in people.keys():
            if name_counter < 2:
                draw_name(draw, people[name], name, name_counter, colors[name_counter])
            name_counter += 1
            
            if time.time()>people[name]['time_found']+2:
                # stop displaying after 2 seconds
                people.pop(name)
                
        frame2 = NP.array(im)
        imshow('frame',frame2)


        if aframes_queue.empty():
            aframes_queue.put(aframe)
        c = waitKey(1)
        if c == 27: # exit on ESC
            break
    
    vc.release()
    destroyAllWindows()
 def load_background(self):
     try:
         bg = np.load(os.path.join(self.background_dir,'%s_background.npz'%self.background_name))
         background = bg['computations']
         background_image = bg['image']
     except:
         #print "Acquiring background information..."
         #print os.path.join(self.background_dir, self.background_name+'-cam0.avi')
         try:
             blmov = VideoCapture(os.path.join(self.background_dir, self.background_name+'-cam.avi'))
         except:
             blmov = VideoCapture(os.path.join(self.background_dir, self.background_name+'-cam0.avi'))
         valid, background = self.get_frame(blmov, n=-1, blur=True)
         blmov.release()
         
         try:
             blmov = VideoCapture(os.path.join(self.background_dir, self.background_name+'-cam.avi'))
         except:
             blmov = VideoCapture(os.path.join(self.background_dir, self.background_name+'-cam0.avi'))
         valid, background_image = self.get_frame(blmov, n=-1, blur=False)
         blmov.release()
         
         np.savez(os.path.join(self.background_dir,'%s_background'%self.background_name), computations=background, image=background_image)
     return background, background_image
Beispiel #37
0
class camera(object):
    '''
    Object containing camera information
    Call-able, retrieve current frame in camera buffer
    
    User accessible attributes:
        device        system device number
        resolution    camera resolution
        BGRimage      image in BGR format
        HSVimage      image in HSV format
        RGBimage      image in RGB format
        FPS           camera speed in FPS
        
    User accessible methods:
        close         close camera device
    '''
    def __init__(self, cam_num = -1, resolution = (640, 480)):
        '''
        create camera object
            cam_num            device number (integer)
            resolution         image resolution (tuple width x height)
        '''
        self.device = cam_num
        self.resolution = resolution
        self.BGRimage = []
        self.HSVimage = []
        self.RGBimage = []
        self.FPS = [0, 0]
        self.__avr = 0
        #assign and open device
        self.__capture = VideoCapture(cam_num)
        self.__capture.set(CV_CAP_PROP_FRAME_WIDTH,resolution[0])
        self.__capture.set(CV_CAP_PROP_FRAME_HEIGHT,resolution[1])
        self.__capture.open
        self.__flag = False
        t0 = time()
        self.__flag, self.BGRimage = self.__capture.read()
        self.FPS[0] = 1/(time()-t0)
        self.FPS[1] = self.FPS[0]
        self.__avr = self.FPS[0]
        print "camera", self.device, "ready @", self.FPS[0], "fps"
        return
    def __call__(self, frame_delay = 0, fast = False):
        '''
        retrieve current frame in camera buffer
            frame_delay        delay the frame decoding (integer)
            fast               if true don't decode image to RGB format (logic)    
        '''
        #set timer to meassure fps
        self.__avr = self.FPS[1]
        t0 = time()
        #try to retrieve current frame
        while not self.__flag:
            if frame_delay > 0:
                for i in xrange(frame_delay + 1):
                    self.__capture.grab()
                self.__flag, self.BGRimage = self.__capture.retrieve()
                del i
            else:
                self.__flag, self.BGRimage = self.__capture.read()
        self.__flag = False
        #decode bgr format to hsv
        self.HSVimage = cvtColor(self.BGRimage, CV_BGR2HSV)
        if fast:
            self.FPS[0] = 1/(time()-t0)
            self.FPS[1] = (self.FPS[0]+self.__avr)/2
            return
        #decode bgr format to rgb
        self.RGBimage = cvtColor(self.BGRimage, CV_BGR2RGB)
        self.FPS[0] = 1/(time()-t0)
        self.FPS[1] = (self.FPS[0]+self.__avr)/2
        return
    def __str__(self):
        '''
        return camera information;
            device number
            device resolution
            instant speed
            average speed
        '''
        tmp = "camera object @ dev "+str(self.device)+", resolution: "+str(self.resolution)
        tmp = tmp +", fps: "+str(self.FPS[0])+", Avr. fps: "+str(self.FPS[1])
        return tmp
    def __del__(self):
        '''
        when the object is deleted, it closes the device
        '''
        self.close()
        return
    def close(self):
        '''
        close device, making it available to use 
        '''
        #if the device is open then close it
        if self.__capture.isOpened():
            self.__capture.release()
            print "camera", self.device, "closed"
        return
Beispiel #38
0
print "At frame: ", video.get(1)
print "Total frames: ", frame_count, "vs. ", video.get(7)
# sleep(10)
width,height = frame_count, int(np.ceil(frame_count/(16.0/9)))
# barcode = Image.new('RGB', (width, height), (255,255,255))
# draw = ImageDraw.Draw(barcode)
# f = open("barcode.jpg", 'w')
f = open("color_codes.txt", 'a')
condition,frame = video.read()
while condition:
    print "Processing frame %d" % count
    # color = findColor(frame)
    if count % 3 == 0:
        color = findColor(frame)
        f.write(str(color) + "\n")
        # draw.line([(count/3,0), (count/3,height)], fill=tuple(color), width=1)
    count += 1
    condition,frame = video.read()
    # if count == 2:
        # break
    print "%0.3f % complete." % (video.get(1)/video.get(7))
    # barcode.save(f)
    # print "Saved."
# plt.clf()
# plt.imshow(barcode)
# plt.show()

# barcode.save(f)
f.close()
video.release()
print "Total time: %0.5f seconds." % (time() - t)
Beispiel #39
0
class Camera(object):
    ''' Communicate with the camera.

    Class governing the communication with the camera.

    Parameters
    -----------
    camera : int
        the index of the camera, best taken from func lookForCameras,
        from eyetracker.camera.capture
    dic : dic{propID  value}
        to check corresponding propIDs check
        opencv documentation under the term VideoCapture. 
        They will be set in the moment of object creation.

    Defines
    --------
    self.camera : index of the camera
    self.cap : capturing object
    self.frame : returns a frame from camera
    self.close : closes cap
    self.reOpen : reopens cap
    '''
    def __init__(self, camera, dic=None):
        self.camera = int(camera)
        self.cap = VideoCapture(self.camera)
        if dic:
            for propID, value in dic.iteritems():
                self.cap.set(propID, value)
        first_frame = self.frame()

    def frame(self):
        ''' Read frame from camera.

        Returns
        --------
        frame : np.array
            frame from camera
        '''
        if self.cap.isOpened:
            return self.cap.read()[1]
        else:
            print 'Cap is not opened.'
            return None

    def set(self, **kwargs):
        ''' Set camera parameters.

        Parameters
        -----------
        kwargs : {propID : value}
        '''
        for propID, value in kwargs.iteritems():
            self.cap.set(propID, value)

    def close(self):
        ''' Closes cap, you can reopen it with self.reOpen.
        '''
        self.cap.release()

    def reOpen(self, cameraIndex):
        ''' Reopens cap.
        '''
        self.cap.open(self.camera)
        first_frame = self.frame()
class MouseTracker(object):
    def __init__(self, mouse, mode,  data_directory='.', diff_thresh=100, resample=8, translation_max=100, smoothing_kernel=19, consecutive_skip_threshold=2, selection_from=[]):
        self.mouse = mouse
        self.data_dir = data_directory
        self.mode = mode
        self.selection_from = selection_from
        
        # Parameters (you may vary)
        self.diff_thresh = diff_thresh
        self.resample = resample
        self.translation_max = translation_max
        self.kernel = smoothing_kernel
        self.consecutive_skip_threshold = (37./self.resample) * consecutive_skip_threshold

        # Parameters (you should not vary)
        self.duration = 1
        self.cth1 = 0
        self.cth2 = 0
        plat = sys.platform
        if 'darwin' in plat:
            self.fourcc = CV_FOURCC('m','p','4','v') 
        elif plat[:3] == 'win':
            self.fourcc = 1
        else:
            self.fourcc = -1

        fh = FileHandler(self.data_dir, self.mouse)
        self.background_name = fh[mode][BACKGROUND][NAME]
        self.background_dir = fh[mode][BACKGROUND][DIR]
        self.trial_name = fh[mode][TRIAL][NAME]
        self.trial_dir = fh[mode][TRIAL][DIR]

        self.background, self.background_image = self.load_background()
        self.height, self.width = np.shape(self.background)
        
        timefile = os.path.join(self.trial_dir, self.trial_name+'-timestamps.json')
        self.time = json.loads(open(timefile,'r').read())
        vidfile = os.path.join(self.trial_dir, self.trial_name+'-cam.avi')
        if not os.path.exists(vidfile):
            vidfile = os.path.join(self.trial_dir, self.trial_name+'-cam0.avi')
        if not os.path.exists(vidfile):
            raise Exception('Movie %s not found.'%vidfile)
        self.mov = VideoCapture(vidfile)

        self.results = {}
        self.results['centers'] = []
        self.results['centers_all'] = []
        self.results['left'] = 0
        self.results['right'] = 0
        self.results['middle'] = 0
        self.results['left_assumed'] = 0
        self.results['right_assumed'] = 0
        self.results['middle_assumed'] = 0
        self.results['skipped'] = 0
        self.results['heat'] = np.zeros(np.shape(self.background))
        self.results['n_frames'] = 0
        self.results['params'] = [self.diff_thresh, self.kernel, self.translation_max, self.resample]
        self.results['params_key'] = ['diff_thresh','kernel','translation_max','resample']

        self.path_l, self.path_r, self.path_c, self.rooms_mask, self.paths_ignore, self.last_center = self.get_pt_selections()
    def end(self):
        np.savez(os.path.join(self.trial_dir,'%s_tracking'%self.trial_name), **self.results)
        savemat(os.path.join(self.trial_dir,'%s_tracking'%self.trial_name), self.results)
        
        self.mov.release()
        destroyAllWindows()
    def get_pt_selections(self):
        valid,first = self.get_frame(self.mov, blur=False, n=30)
        try: #did they select for this trial
            pts = np.load(os.path.join(self.trial_dir, '%s_selections.npz'%self.trial_name))
            pts_l = pts['pts_l']
            pts_r = pts['pts_r']
            pts_c = pts['pts_c']
            pts_mouse = pts['pts_mouse']
            regions_ignore = pts['regions_ignore']
        except:
            found_rooms = False
            found_ignore = False
            for sf in self.selection_from:
                fh = FileHandler(self.data_dir, sf)
                s_trial_name = fh[self.mode][TRIAL][NAME]
                s_trial_dir = fh[self.mode][TRIAL][DIR]
                
                try:
                    pts = np.load(os.path.join(s_trial_dir, '%s_selections.npz'%s_trial_name))
                    
                    if not found_rooms:
                        pts_l = pts['pts_l']
                        pts_r = pts['pts_r']
                        pts_c = pts['pts_c']
                        
                        plimshow(first, cmap=mpl_cm.Greys_r)
                        title('Good room corners? If so, click image, otherwise, close window.')
                        scatter(pts_l[:,0], pts_l[:,1], c='b', marker='o')
                        scatter(pts_r[:,0], pts_r[:,1], c='r', marker='o')
                        scatter(pts_c[:,0], pts_c[:,1], c='g', marker='o')

                        use_rooms = ginput(1)
                        close()
                        if use_rooms.any():
                            found_rooms = True
                    if not found_ignore:
                        plimshow(first, cmap=mpl_cm.Greys_r)
                        title('Good ignore regions? If so, click image, otherwise, close window.')
                        regions_ignore = pts['regions_ignore']
                        [scatter(ptsi[:,0], ptsi[:,1], c='k',marker='x') for ptsi in regions_ignore]
                        use_ignore = ginput(1)
                        close()
                        if use_ignore.any():
                            found_ignore = True

                    if found_rooms and found_ignore:
                        break
                except:
                    pass
                
            plimshow(first, cmap=mpl_cm.Greys_r)
            title("Select mouse (4 pts).")
            pts_mouse = ginput(4)
            if not found_rooms:
                title("Select left room- around the corners in order.")
                pts_l = ginput(4)
                title("Select right room- around the corners in order.")
                pts_r = ginput(4)
                title("Select middle room- around the corners in order.")
                pts_c = ginput(4)
            if not found_ignore: 
                valid,frame = self.get_frame(self.mov)
                diff = absdiff(frame,self.background)
                _, diff = threshold(diff, self.diff_thresh, 1, THRESH_BINARY)
                plimshow(np.ma.masked_where(diff==0, diff), cmap=mpl_cm.jet)
                regions_ignore = []
                pts_ignore = [1]
                title("Select cups and any other region to ignore. (10 pts per)")
                while True:
                    pts_ignore = ginput(10)
                    if not pts_ignore.any():
                        break
                    regions_ignore.append(pts_ignore)
                regions_ignore = np.array(regions_ignore)
            close()
            np.savez(os.path.join(self.trial_dir, '%s_selections'%self.trial_name), pts_l=pts_l, pts_r=pts_r, pts_c=pts_c, pts_mouse=pts_mouse, regions_ignore=regions_ignore)
            savemat(os.path.join(self.trial_dir, '%s_selections'%self.trial_name), dict(pts_l=pts_l, pts_r=pts_r, pts_c=pts_c, pts_mouse=pts_mouse, regions_ignore=regions_ignore)) #THIS LINE
        path_l, path_r, path_c = [mpl_path.Path(pts) for pts in [pts_l,pts_r,pts_c]]
        last_center = np.round(np.mean(pts_mouse, axis=0)).astype(int)
        paths_ignore = [mpl_path.Path(pts) for pts in regions_ignore]

        rooms_mask = np.zeros(np.shape(self.background))
        for row in range(self.height):
            for col in range(self.width):
                pt = [col,row]
                rooms_mask[row][col] = path_l.contains_point(pt) or path_r.contains_point(pt) or path_c.contains_point(pt)

        return (path_l, path_r, path_c, rooms_mask, paths_ignore, last_center)
    def load_background(self):
        try:
            bg = np.load(os.path.join(self.background_dir,'%s_background.npz'%self.background_name))
            background = bg['computations']
            background_image = bg['image']
        except:
            #print "Acquiring background information..."
            #print os.path.join(self.background_dir, self.background_name+'-cam0.avi')
            try:
                blmov = VideoCapture(os.path.join(self.background_dir, self.background_name+'-cam.avi'))
            except:
                blmov = VideoCapture(os.path.join(self.background_dir, self.background_name+'-cam0.avi'))
            valid, background = self.get_frame(blmov, n=-1, blur=True)
            blmov.release()
            
            try:
                blmov = VideoCapture(os.path.join(self.background_dir, self.background_name+'-cam.avi'))
            except:
                blmov = VideoCapture(os.path.join(self.background_dir, self.background_name+'-cam0.avi'))
            valid, background_image = self.get_frame(blmov, n=-1, blur=False)
            blmov.release()
            
            np.savez(os.path.join(self.background_dir,'%s_background'%self.background_name), computations=background, image=background_image)
        return background, background_image
    def get_frame(self, mov, n=1, skip=0, blur=True):
        for s in range(skip):
            valid,fr = mov.read()
            if not valid:
                return (False, None)
        if n==-1:
            n = 1e10
        def get():
            valid, frame = mov.read()
            if not valid:
                return (False, None)
            frame = frame.astype(np.float32)
            frame = cvtColor(frame, CV_RGB2GRAY)
            if blur:
                frame = GaussianBlur(frame, (self.kernel,self.kernel), 0)
            return valid,frame

        valid,frame = get()
        i = 1
        while valid and i<n:
            valid,new = get()
            i += 1
            if valid:
                frame += new
        
        if frame is not None:
            frame = frame/i
        return (valid, frame)
    def run(self, show=False, save=False, tk_var_frame=None):
        if show:
            namedWindow('Movie')
            namedWindow('Tracking')
        if save:
            bgs = np.shape(self.background)
            fsize = (bgs[0], bgs[1]*2)
            writer = VideoWriter()
            writer.open(os.path.join(self.trial_dir,'%s_tracking_movie'%self.trial_name),self.fourcc,37.,frameSize=(fsize[1],fsize[0]),isColor=True)

        self.results['n_frames'] = 0
        consecutive_skips = 0
        while True:
            valid,frame = self.get_frame(self.mov,skip=self.resample-1)
            if not valid:
                break
            diff = absdiff(frame,self.background)
            _, diff = threshold(diff, self.diff_thresh, 1, THRESH_BINARY)
            diff = diff*self.rooms_mask
            edges = Canny(diff.astype(np.uint8), self.cth1, self.cth2)
            contours, hier = findContours(edges, RETR_EXTERNAL, CHAIN_APPROX_TC89_L1)
            contours = [c for c in contours if not any([pa.contains_point(contour_center(c)) for pa in self.paths_ignore])]
            if consecutive_skips>self.consecutive_skip_threshold:
                consecutive_skips=0
                possible = contours
            else:
                possible = [c for c in contours if dist(contour_center(c),self.last_center)<self.translation_max]
            
            if len(possible) == 0:
                center = self.last_center
                self.results['skipped'] += 1
                consecutive_skips+=1
                if self.path_l.contains_point(center):
                    self.results['left_assumed']+=1
                if self.path_r.contains_point(center):
                    self.results['right_assumed']+=1
                if self.path_c.contains_point(center):
                    self.results['middle_assumed']+=1
            else:
                chosen = possible[np.argmax([contourArea(c) for c in possible])]   
                center = contour_center(chosen)
                self.results['centers'].append(center)
                self.results['heat'][center[1],center[0]] += 1
                if self.path_l.contains_point(center):
                    self.results['left']+=1
                if self.path_r.contains_point(center):
                    self.results['right']+=1
                if self.path_c.contains_point(center):
                    self.results['middle']+=1
            self.results['centers_all'].append(center) 
            #display
            if show or save:
                showimg = np.copy(frame).astype(np.uint8)
                if self.path_l.contains_point(center):
                    color = (0,0,0)
                elif self.path_r.contains_point(center):
                    color = (255,255,255)
                else:
                    color = (120,120,120)
                circle(showimg, tuple(center), radius=10, thickness=5, color=color)
                if show:
                    cv2imshow('Movie',showimg)
                    cv2imshow('Tracking', diff)
                    waitKey(1)
            #/display
            if save:
                save_frame = np.zeros([fsize[0], fsize[1], 3],dtype=np.uint8)
                save_frame[:,:np.shape(frame)[1]] = cvtColor(showimg.astype(np.float32), CV_GRAY2RGB)
                save_frame[:,np.shape(frame)[1]:] = cvtColor((diff*255).astype(np.float32), CV_GRAY2RGB)
                writer.write(save_frame)
             
            self.results['n_frames'] += 1
            self.last_center = center
            if tk_var_frame != None:
                tk_var_frame[0].set('%i/%i'%(self.results['n_frames'], len(self.time)/float(self.resample) ))
                tk_var_frame[1].update()
            #pbar.update(self.results['n_frames'])
        #pbar.finish()
        if save:
            writer.release()
        self.end()
Beispiel #41
0
    ret, temp = video_capture.read()
    #temp = pyrDown(temp)
    frame2 = _putmoustache_(temp)
    frame3 = _putglass_(frame2)

    frame3 = pyrDown(frame3)
    #frame3 = pyrDown(frame3)

    height, width = frame3.shape[:2]
    for i in range(1, height, 4):
        for j in range(1, width, 4):
            if background[i][j][0] - backgroundTreshold <= frame3[i][j][0] <= background[i][j][0] + backgroundTreshold:
                if background[i][j][0] - backgroundTreshold <= frame3[i][j][1] <= background[i][j][1] + backgroundTreshold:
                    if background[i][j][0] - backgroundTreshold <= frame3[i][j][2] <= background[i][j][2] + backgroundTreshold:
                        for x in range(-2, 2):
                            for y in range(-2, 2):
                                frame3[i+x][j+y][0] = beach[i+x][j+y][0]
                                frame3[i+x][j+y][1] = beach[i+x][j+y][1]
                                frame3[i+x][j+y][2] = beach[i+x][j+y][2]

   #frame3 = pyrUp(frame3)
    frame3 = pyrUp(frame3)
    imshow("Video", frame3)
    waitKey(100)

video_capture.release()
destroyAllWindows()


Beispiel #42
0
def captureTStamp(files, duration, cod,  fps=0, verbose=True):
    '''
    guarda por un tiempo en minutos (duration) el video levantado desde la
    direccion indicada en el archvo indicado. tambíen archivos con los time
    stamps de cada frame.
    
    files = [ur, saveVideoFile, saveDateFile, saveMillisecondFile]
    duration = time in mintes
    cod = codec
    fps = frames per second for video to be saved
    verbose = print messages to screen
    
    si fpscam=0 trata de llerlo de la captura. para fe hay que especificarla
    
    para opencv '2.4.9.1'
    
    Examples
    --------
    
    from cameraUtils import captureTStamp
    
    # para la FE
    duration = 1 # in minutes
    files = ['rtsp://192.168.1.48/live.sdp',
             "/home/alumno/Documentos/sebaPhDdatos/vca_test_video.avi",
             "/home/alumno/Documentos/sebaPhDdatos/vca_test_tsFrame.txt"]
    fpsCam = 12
    cod = 'XVID'
    
    captureTStamp(files, duration, cod, fps=fpsCam)
    
    # %% para la PTZ
    duration = 0.2 # in minutes
    files = ["rtsp://192.168.1.49/live.sdp",
             "/home/alumno/Documentos/sebaPhDdatos/ptz_test_video.avi",
             "/home/alumno/Documentos/sebaPhDdatos/ptz_test_tsFrame.txt"]  
    
    fpsCam = 20
    cod = 'XVID'
    
    captureTStamp(files, duration, cod, fpsCam)
    
    '''
    
    fcc = fourcc(cod[0],cod[1],cod[2],cod[3]) # Códec de video
    
    if verbose:
        print(files)
        print("Duration",duration,"minutes")
        print("fps",fps)
        print("codec",cod)
    
    # Inicializacion
    tFin = datetime.datetime.now() + datetime.timedelta(minutes=duration)
    
    ts = list()  # timestamp de la captura
    
    # abrir captura
    cap = VideoCapture(files[0])
    while not cap.isOpened():
        cap = VideoCapture(files[0])
    
    print("capture opened")
	# configurar writer
    w = int(cap.get(frame_width))
    h = int(cap.get(frame_height))
    if not fps:
        fps = cap.get(prop_fps)
    #para fe especificar los fps pq toma cualquier cosa con la propiedad
    
    out = VideoWriter(files[1], fcc, fps,( w, h), True)
    
    if verbose:
        print("capture open",cap.isOpened())
        print("frame size",w,h)
        print("output opened",out.isOpened())
    
    if not out.isOpened() or not cap.isOpened():
        out.release()
        cap.release()
        # exit function if unable to open cap or out
        return
    
    s0 = getsize(files[1]) # initial filesize before writing frame
    # Primera captura
    ret, frame = cap.read()
    if ret:
        t = datetime.datetime.now()
        ts.append(t)
        out.write(frame)
        if verbose:
            print("first frame captured")
    # Segunda captura
    ret, frame = cap.read()
    if ret:
        t = datetime.datetime.now()
        ts.append(t)
        out.write(frame)
        if verbose:
            print("second frame captured")
    # Tercera captura
    ret, frame = cap.read()
    if ret:
        t = datetime.datetime.now()
        ts.append(t)
        out.write(frame)
        if verbose:
            print("third frame captured")
    
    s1 = getsize(files[1])  # size after saving 3 frames
    
    if s1==s0:
        out.release()
        cap.release()
        print("error when saving 3 frames, exiting")
        return 1 # error while saving first frame to file
    print(tFin)
    # loop
    while (t <= tFin):
        ret, frame = cap.read()
        
        if ret:
            t = datetime.datetime.now()
            ts.append(t)
            out.write(frame)
            if verbose:
                print(tFin,t)
                print("seconds elapsed",cap.get(pos_msec)/1000)
                print(frame.size)

    # end of loop
    
    # release and save
    out.release()
    cap.release()
    
    if verbose:
        print('loop exited, cap, out released, times saved to files')
        
    savetxt(files[2],ts, fmt= ["%s"])
    
    return 0  # success
Beispiel #43
0
                    fontColor = (250,250,250)
                    gender1 = gender1+1

                count = gender1 + gender2

                dt = datetime.datetime.now()
                time_milli_secs = unix_time(dt)

                json = {'timestamp' : time_milli_secs, 'gender' : gender, 'count' : count, 'gender1' : gender1, 'gender2' : gender2}

                insert_into_mongo(json)

                
                x1, y1, w1, h1 = faceRegions[indx]
                rectangle(img0,
                          (x1,y1),
                          (x1+w1,y1+h1),
                          (100,255,0),2)
                putText(img=img0,
                            text='Gender: ' + gender,
                            org=(x1,y1+h1-10),
                            fontFace=CV_FONT_HERSHEY_DUPLEX,
                            fontScale=0.75,
                            color=fontColor)
                
    imshow('appDemo', img0) 
    keyPressed = waitKey(2)
destroyAllWindows()
capture.release()

class MouseTracker(object):
    def __init__(self, mouse, n=1, data_dir='.', diff_thresh=80, resample=1, translation_max=50, smoothing_kernel=19, consecutive_skip_threshold=0.08, selection_from=[], point_mode='auto'):
        self.mouse = mouse
        self.n = n
        self.data_dir = data_dir
        
        # Parameters (you may vary)
        self.diff_thresh = diff_thresh
        self.resample = resample
        self.translation_max = translation_max
        self.kernel = smoothing_kernel

        # Parameters (you should not vary)
        self.cth1 = 0
        self.cth2 = 0
        plat = sys.platform
        if 'darwin' in plat:
            self.fourcc = CV_FOURCC('m','p','4','v') 
        elif plat[:3] == 'win':
            self.fourcc = 1
        else:
            self.fourcc = -1
        
        self.fh = FileHandler(self.data_dir, self.mouse, self.n)

        self.framei = 0
        self.load_time()
        self.consecutive_skip_threshold = (self.fs/self.resample) * consecutive_skip_threshold
        self.load_background()
        self.height, self.width = self.background.shape
        self.mov = VideoCapture(self.fh.get_path(self.fh.TRIAL, self.fh.MOV))
        self.mov.read();self.time=self.time[1:]
        #self.get_frame(self.mov,n=40) #MUST ADJUST TIME IF USING THIS
        self.load_pts(mode=point_mode)
        self.make_rooms()

    def end(self):
        self.results = dict(pos=self.pos, time=np.array(self.t)-self.t[0], guess=self.guess, heat=self.heat, contour=self.contour, pct_xadj=self.pct_xadj)
        np.savez(self.fh.make_path('tracking.npz'), **self.results)
        savemat(self.fh.make_path('tracking.mat'), self.results)
        
        self.mov.release()
        destroyAllWindows()
    def man_update(self, d):
        for k,v in d.items():
            setattr(self,k,v)
    def make_rooms(self):
        self.path_x = mpl_path.Path(self.pts[np.array([self.xmli,self.xoli,self.xori,self.xmri])])
        self.path_y = mpl_path.Path(self.pts[np.array([self.ymli,self.yoli,self.yori,self.ymri])])
        self.path_z = mpl_path.Path(self.pts[np.array([self.zmli,self.zoli,self.zori,self.zmri])])

        #experimental: hand in frame on x room
        self.path_x_adj = mpl_path.Path(self.pts[np.array([self.xoli,self.xoli_adj,self.xori_adj,self.xori])])
        self.xadj_mask = np.zeros((self.height,self.width))
        for iy in xrange(self.xadj_mask.shape[0]):
            for ix in xrange(self.xadj_mask.shape[1]):
                self.xadj_mask[iy,ix] = self.path_x_adj.contains_point([ix,iy])
        self.xadj_idxs = np.squeeze(np.argwhere(self.xadj_mask==True))

        self.border_mask = np.zeros((self.height,self.width))
        pthpts = self.pts[np.array([self.yoli_adj,self.yori_adj,self.ymri,self.ycri,self.zmli,self.zoli_adj,self.zori_adj,self.zmri,self.zcri,self.xmli,self.xoli_adj,self.xori_adj,self.xmri,self.xcri,self.ymli])]
        pth = mpl_path.Path(pthpts)
        for iy in xrange(self.border_mask.shape[0]):
            for ix in xrange(self.border_mask.shape[1]):
                self.border_mask[iy,ix] = pth.contains_point([ix,iy])
    def classify_pts(self):
        #stored in (x,y)
        #c: center
        #m: middle
        #o: out
        #x: bottom arm, y: left arm, z: right arm
        #l: left when going down arm, r: right when going down arm
        #pt is: [x/y/z c/m/o l/r]
        X,Y = 0,1
        def nn(pidx,n,ex=[]):
            #idxs of n closest pts to p, excluding all idxs in ex
            p = self.pts[pidx]
            ds = np.array([dist(pp,p) for pp in self.pts])
            idxs =  np.argsort(ds)
            idxs = np.array([i for i in idxs if i not in ex])
            return idxs[:n]
        def sortby(pidxs, dim):
            pts = self.pts[np.array(pidxs)]
            return pidxs[np.argsort(pts[:,dim])]
        dists = np.array([dist(self.pts_c, p) for p in self.pts])
        c3i = self.c3i[np.argsort(self.pts[self.c3i][:,0])]
        m6i = self.m6i
        o6i = self.o6i
        
        #classify them:
        xcri=ycli=c3i[0]
        ycri=zcli=c3i[1]
        zcri=xcli=c3i[2]
        temp = nn(xcri, 2, ex=c3i)
        ymli,xmri = sortby(temp, Y)
        temp = nn(ycri, 2, ex=c3i)
        ymri,zmli = sortby(temp, X)
        temp = nn(zcri, 2, ex=c3i)
        zmri,xmli = sortby(temp, Y)
        cm9 = [xcri,ycri,zcri,xmri,xmli,ymri,ymli,zmri,zmli]

        xoli = nn(xmli, 1, ex=cm9)[0]
        xori = nn(xmri, 1, ex=cm9)[0]
        yoli = nn(ymli, 1, ex=cm9)[0]
        yori = nn(ymri, 1, ex=cm9)[0]
        zoli = nn(zmli, 1, ex=cm9)[0]
        zori = nn(zmri, 1, ex=cm9)[0]
        
        #accounting for inner wall reflections:
        zol,zml = np.array([self.pts[zoli],self.pts[zmli]]).astype(np.int32)
        dd = np.sqrt(np.sum((zol-zml)**2))
        cup_dd = 0.80*dd
        #z cup:
        zol,zml = np.array([self.pts[zoli],self.pts[zmli]]).astype(np.int32)
        d_zl = zol-zml
        theta_zl = np.arctan2(*d_zl)
        l2 = zml + cup_dd * np.array([np.sin(theta_zl),np.cos(theta_zl)])
        zor,zmr = np.array([self.pts[zori],self.pts[zmri]]).astype(np.int32)
        d_zr = zor-zmr
        theta_zr = np.arctan2(*d_zr)
        r2 = zmr + cup_dd * np.array([np.sin(theta_zr),np.cos(theta_zr)])
        zr2,zl2 = r2,l2
        #y cup:
        yol,yml = np.array([self.pts[yoli],self.pts[ymli]]).astype(np.int32)
        d_yl = yol-yml
        theta_yl = np.arctan2(*d_yl)
        l2 = yml + cup_dd * np.array([np.sin(theta_yl),np.cos(theta_yl)])
        yor,ymr = np.array([self.pts[yori],self.pts[ymri]]).astype(np.int32)
        d_yr = yor-ymr
        theta_yr = np.arctan2(*d_yr)
        r2 = ymr + cup_dd * np.array([np.sin(theta_yr),np.cos(theta_yr)])
        yr2,yl2 = r2,l2
        #x cup:
        xol,xml = np.array([self.pts[xoli],self.pts[xmli]]).astype(np.int32)
        d_xl = xol-xml
        theta_xl = np.arctan2(*d_xl)
        l2 = xml + cup_dd * np.array([np.sin(theta_xl),np.cos(theta_xl)])
        xor,xmr = np.array([self.pts[xori],self.pts[xmri]]).astype(np.int32)
        d_xr = xor-xmr
        theta_xr = np.arctan2(*d_xr)
        r2 = xmr + cup_dd * np.array([np.sin(theta_xr),np.cos(theta_xr)])
        xr2,xl2 = r2,l2
        self.pts = np.rint(np.concatenate((self.pts, [zr2,zl2,yr2,yl2,xr2,xl2])))

        pts_dict = dict(pts=self.pts,xcri=xcri,ycli=ycli,ycri=ycri,zcli=zcli,zcri=zcri,xcli=xcli,xmri=xmri,ymli=ymli,ymri=ymri,zmli=zmli,xmli=xmli,zmri=zmri,xoli=xoli,xori=xori,yoli=yoli,yori=yori,zoli=zoli,zori=zori,zori_adj=-6,zoli_adj=-5,yori_adj=-4,yoli_adj=-3,xori_adj=-2,xoli_adj=-1)
        self.man_update(pts_dict)

        np.savez(self.fh.make_path('pts.npz', mode=self.fh.BL), **pts_dict)
    def verify_pts(self, add_to_all=True):
        if add_to_all:
            self.all_possible_pts += list(self.pts)
        if len(self.pts) < 15:
            return False
        if len(self.pts) > 25:
            return False
        elif len(self.pts) > 15:
            allperms = np.array(list(it.combinations(range(len(self.pts)), 15)))
            if len(allperms)>200:
                totryi = np.random.choice(range(len(allperms)),200,replace=False)
                totryi = allperms[totryi]
            else:
                totryi = allperms
        elif len(self.pts) == 15:
            totryi = [range(15)]
        
        for ptsi in totryi:
            pts = self.pts[ptsi]
            self.pts_c = np.mean(pts,axis=0)
            dists = np.array([dist(self.pts_c, p) for p in pts])
            c3i = np.argsort(dists)[:3]
            m6i = np.argsort(dists)[3:9]
            o6i = np.argsort(dists)[9:]

            good = True
            
            #test dists from center
            if np.std(dists[c3i]) > 1. or np.std(dists[m6i]) > 1. or np.std(dists[o6i]) > 2.5:
                good = False
            #x = self.background.copy()
            #for pt in pts:
            #    cv2.circle(x, tuple(pt), 4, (255,255,255), thickness=3)
            #pl.figure(2);pl.imshow(x)
            #raw_input()
            ##
            
            #test outer dists from each other
            o6 = pts[o6i]
            omindists = np.array([np.min(np.array([dist(p,pp) for p in o6])) for pp in o6])
            if np.std(omindists) > 0.6:
                good = False
           
           #test middle dists from each other
            m6 = pts[m6i]
            mmindists = np.array([np.min(np.array([dist(p,pp) for p in m6])) for pp in m6])
            if np.std(mmindists) > 0.6:
                good = False
            
            if good:
                self.c3i = c3i
                self.m6i = m6i
                self.o6i = o6i
                self.pts = pts
                return True
           
        return False
    def permute_pts(self):
        #NOT IN USE
        apts = np.array(self.all_possible_pts)
        unpts = []
        for pt in apts:
            if len(unpts)==0:
                unpts.append(pt)
            else:
                dists = np.sqrt(np.sum((pt-np.array(unpts))**2,axis=1))
                mindist = np.min(dists)
                if mindist > 5:
                    unpts.append(pt)
        unpts = np.array(unpts)
        if len(unpts)<15:
            return False
        for _ in xrange(10000):
            testpts = np.random.choice(np.arange(len(unpts)),15, replace=False)
            testpts = unpts[testpts]
            self.pts = testpts
            ##
            #x = self.background.copy()
            #for pt in self.pts:
            #    cv2.circle(x, tuple(pt), 4, (255,255,255), thickness=3)
            #pl.figure(2);pl.imshow(x)
            ##
            if self.verify_pts(add_to_all=False):
                return True
        return False
    def load_pts(self, mode='auto'):
        try:
            self.man_update(np.load(self.fh.make_path('pts.npz', mode=self.fh.BL)))
        except:
            if mode == 'auto':
                self.all_possible_pts = []
                invalid = True
                attempts = 0
                while invalid:
                    if attempts > 500:
                        raise Exception('Pts cannot be found.')
                    img = self.background_image.copy()
                    lp_ksizes = [13,15,17,19,21,23,25] #from 5-15 before
                    lp_ksize = rand.choice(lp_ksizes)
                    sbd_areas = [range(3,20), range(61,140)] #8,26 46,55
                    sbd_area = [rand.choice(sbd_areas[0]), rand.choice(sbd_areas[1])]
                    sbd_circs = [np.arange(0.05,0.35), range(1000,1001)]#0.19,0.35 1000
                    sbd_circ = [rand.choice(sbd_circs[0]), rand.choice(sbd_circs[1])]
                    subtr_rowmeans = rand.choice([True,False])

                    if subtr_rowmeans:
                        img = img-np.mean(img,axis=1)[:,None]
                    img = cv2.Laplacian(img, cv2.CV_32F, ksize=lp_ksize)
                    img += abs(img.min())
                    img = img/img.max() *255
                    img = img.astype(np.uint8)

                    #pl.figure(1);pl.imshow(img,cmap=pl.cm.Greys_r)
                    params = cv2.SimpleBlobDetector_Params()
                    params.filterByArea = True
                    params.filterByCircularity = True
                    params.minArea,params.maxArea = sbd_area
                    params.minCircularity,params.maxCircularity = sbd_circ
                    detector = cv2.SimpleBlobDetector(params)
                    fs = detector.detect(img)
                    pts = np.array([f.pt for f in fs])
                    pts = np.round(pts).astype(np.uint32)
                    x = img.copy()
                    for pt in pts:
                        cv2.circle(x, tuple(pt), 4, (255,255,255), thickness=3)
                    #pl.figure(2);pl.imshow(x);raw_input()
                    self.pts = pts
                    invalid = not self.verify_pts()
                    attempts += 1
            elif mode == 'manual':
                pl.imshow(self.background_image, cmap=pl.cm.Greys_r)
                pl.title('center3->middle6->outer6')
                pts = pl.ginput(n=15, timeout=-1)
                pts = np.round(pts).astype(np.uint32)
                self.pts = np.array(pts)
                pl.close()
                # note that verify is being skipped
                self.c3i,self.m6i,self.o6i = np.arange(0,3),np.arange(3,9),np.arange(9,15)
                self.pts_c = np.mean(self.pts, axis=0)
            self.classify_pts()
    def load_time(self):
        with open(self.fh.get_path(self.fh.TRIAL,self.fh.TIME),'r') as f:
            self.time = np.array(json.loads(f.read()))
        self.Ts = np.mean(self.time[1:]-self.time[:-1])
        self.fs = 1/self.Ts
    def load_background(self):
        try:
            bg = np.load(self.fh.make_path('background.npz',mode=self.fh.BL))
            background = bg['computations']
            background_image = bg['image']
        except:
            blmov = VideoCapture(self.fh.get_path(self.fh.BL,self.fh.MOV))
            valid, background, ts = self.get_frame(blmov, n=-1, blur=True)
            blmov.release()
            
            blmov = VideoCapture(self.fh.get_path(self.fh.BL,self.fh.MOV))
            valid, background_image, ts = self.get_frame(blmov, n=-1, blur=False)
            blmov.release()
            
            np.savez(self.fh.make_path('background.npz',mode=self.fh.BL), computations=background, image=background_image)
        self.background, self.background_image = background, background_image
    def get_frame(self, mov, n=1, skip=0, blur=True):
        for s in range(skip):
            mov.read()
            self.framei += 1 #the number of frames that have been read
        if n==-1:
            n = 99999999999999999.
        def get():
            valid, frame = mov.read()
            if not valid:
                return (False, None, None)
            ts = self.time[self.framei]
            self.framei += 1
            frame = frame.astype(np.float32)
            frame = cvtColor(frame, CV_RGB2GRAY)
            if blur:
                frame = GaussianBlur(frame, (self.kernel,self.kernel), 0)
            return valid,frame,ts

        valid,frame,ts = get()
        i = 1
        while valid and i<n:
            valid,new,ts = get()
            i += 1
            if valid:
                frame += new
        
        if frame is not None:
            frame = frame/i
        return (valid, frame, ts)
    def find_possible_contours(self, frame, consecutive_skips):
        self.diff = absdiff(frame,self.background)
        _, self.diff = threshold(self.diff, self.diff_thresh, 1, THRESH_BINARY)
        diff_raw = self.diff.copy()
        self.diff = self.diff*self.border_mask
        edges = Canny(self.diff.astype(np.uint8), self.cth1, self.cth2)
        contours, hier = findContours(edges, RETR_EXTERNAL, CHAIN_APPROX_TC89_L1)
        #contours = [c for c in contours if not any([pa.contains_point(contour_center(c)) for pa in self.paths_ignore])]
        if consecutive_skips>self.consecutive_skip_threshold:
            consecutive_skips=0
            possible = contours
        else:
            possible = [c for c in contours if dist(contour_center(c),self.last_center)<self.translation_max]
        return possible, diff_raw
    def choose_best_contour(self, possible):
        chosen = possible[np.argmax([cv2.arcLength(c,False) for c in possible])] #arcLength or contourArea
        center = contour_center(chosen,asint=True)[0]
        return chosen,center
    def label_frame(self, frame, center):
        showimg = np.copy(frame).astype(np.uint8)
        if self.path_x.contains_point(center):
            color = (0,0,0)
        elif self.path_y.contains_point(center):
            color = (210,210,210)
        elif self.path_z.contains_point(center):
            color = (100,100,100)
        else:
            color = (255,255,255)
        circle(showimg, tuple(center), radius=10, thickness=5, color=color)
        for pt in self.pts.astype(int):
            circle(showimg, tuple(pt), radius=4, thickness=3, color=(0,0,0))
        return showimg
    def show_frame(self, frame, wait=1):
        cv2imshow('Tracking',frame)
        waitKey(wait)
    def run(self, show=False, save=False, tk_var_frame=None, wait=1, start_pos='none'):
        #interfaces
        if show or save:
            fsize = (self.width*2, self.height)
            save_frame = np.zeros([self.height, self.width*2, 3], dtype=np.uint8)
        if show:
            namedWindow('Tracking')
        if save:
            writer = VideoWriter()
            writer.open(self.fh.make_path('tracking.avi'),self.fourcc,round(self.fs),frameSize=fsize,isColor=True)
        
        #run
        self.framei = 0
        self.pos = []
        self.t = []
        self.guess = []
        self.contour = []
        self.pct_xadj = []
        self.heat = np.zeros((self.height,self.width))
        consecutive_skips = 0
        if start_pos == 'x':
            start_pts = [self.xori_adj, self.xoli_adj]
        elif start_pos == 'y':
            start_pts = [self.yori_adj, self.yoli_adj]
        elif start_pos == 'z':
            start_pts = [self.zori_adj, self.zoli_adj]
        elif start_pos == 'c':
            start_pts = [self.zori, self.xori, self.yori]
        elif start_pos == 'none':
            start_pts = [self.zori, self.xori, self.yori]
            consecutive_skips = self.consecutive_skip_threshold+1
        self.last_center = np.mean(self.pts[np.array(start_pts)],axis=0).astype(int)
        self.last_contour = np.array([self.last_center])
        valid,frame,ts = self.get_frame(self.mov,skip=self.resample-1)
        while valid:
            possible,diff_raw = self.find_possible_contours(frame,consecutive_skips)
            self.pct_xadj.append(np.mean( diff_raw[self.xadj_idxs[:,0],self.xadj_idxs[:,1]]))
            
            if len(possible) == 0:
                center = self.last_center
                contour = self.last_contour
                self.guess.append(True)
                consecutive_skips+=1
            else:
                contour,center = self.choose_best_contour(possible)
                self.guess.append(False)
                consecutive_skips = 0
            self.pos.append(center)
            self.contour.append(contour)
            self.t.append(ts)
            self.heat[center[1],center[0]] += 1
            
            if show or save:
                lframe = self.label_frame(frame, center)
                save_frame[:,:self.width, :] = cvtColor(lframe.astype(np.float32), CV_GRAY2RGB)
                save_frame[:,self.width:, :] = cvtColor((self.diff*255).astype(np.float32), CV_GRAY2RGB)
            if show:
                self.show_frame(save_frame, wait=wait)
            if save:
                writer.write(save_frame)
             
            self.last_center = center
            self.last_contour = contour
            valid,frame,ts = self.get_frame(self.mov,skip=self.resample-1)
        
            if tk_var_frame != None:
                tk_var_frame[0].set('%i/%i'%(self.results['n_frames'], len(self.time)/float(self.resample) ))
                tk_var_frame[1].update()
        if save:
            writer.release()
        self.end()