Exemplo n.º 1
0
class API(object):
    def __init__(self, win, source):
        self._source = source
        #self._device = cv2.VideoCapture(source)
        if isinstance(source, str):
            self.paused = True
        else:
            self.paused = False

        self.win = win
        cv2.namedWindow(self.win, 1)
        self.rect_selector = RectSelector(self.win, self.on_rect)
        self._bounding_box = None

        self._tracker = MedianFlowTracker()

    def on_rect(self, rect):
        self._bounding_box = rect

    def run(self):
        prev, curr = None, None

        data_files = []
        read_path = os.path.join(self._source, "*.jpg")
        data_files = glob(read_path)
        data_files.sort()

        frame = imread(data_files[0], mode='RGB')  # test data in github

        count = 0
        while True:

            print(data_files[count])
            if not self.rect_selector.dragging and not self.paused:
                count += 1
                frame = imread(data_files[count],
                               mode='RGB')  # test data in github

            prev, curr = curr, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if prev is not None and self._bounding_box is not None:
                bb = self._tracker.track(self._bounding_box, prev, curr)

                if bb is not None:
                    self._bounding_box = bb
                    cv2.rectangle(frame, self._bounding_box[:2],
                                  self._bounding_box[2:], (0, 255, 0), 2)
                else:
                    cv2.rectangle(frame, self._bounding_box[:2],
                                  self._bounding_box[2:], (0, 0, 255), 2)

            self.rect_selector.draw(frame)

            cv2.imshow(self.win, frame)

            ch = cv2.waitKey(1)
            if ch == 27 or ch in (ord('q'), ord('Q')):
                break
            elif ch in (ord('p'), ord('P')):
                self.paused = not self.paused
Exemplo n.º 2
0
    def __init__(self, camera):
        self.cap = cv.VideoCapture(camera)
        # run `ffmpeg -f v4l2 -list_formats all -i /dev/video0` to check
        # list of available video modes
        resolutions = "1280x720"
        resolutions = [int(i) for i in "1280x720".split('x')]
        self.cap.set(cv.CAP_PROP_FRAME_WIDTH, resolutions[0])
        self.cap.set(cv.CAP_PROP_FRAME_HEIGHT, resolutions[1])

        _, self.frame = self.cap.read()
        self.processor = Processor(self.frame, "camera.yml")
        self.player = Play()
        cv.namedWindow('processed')
        self.rect_sel = RectSelector('processed', self.onrect)

        self.the_rect = 0, 0, self.processor.w, self.processor.h
        self.color = (128, 255, 255)

        self.start_play = False
        self.paused = False
        self.store = False
        self.end = False
        self.winner = None
        self.store_points = []
        self.store_radius = []
        self.location = tuple()
Exemplo n.º 3
0
class API(object):
    def __init__(self, win, source):
        self._device = cv2.VideoCapture(source)
        if isinstance(source, str):
            self.paused = True
        else:
            self.paused = False

        self.win = win
        cv2.namedWindow(self.win, 1)
        self.rect_selector = RectSelector(self.win, self.on_rect)
        self._bounding_box = None

        self._tracker = MedianFlowTracker()

    def on_rect(self, rect):
        self._bounding_box = rect

    def run(self):
        prev, curr = None, None

        ret, frame = self._device.read()
        if not ret:
            raise IOError('can\'t reade frame')

        while True:
            if not self.rect_selector.dragging and not self.paused:
                ret, grabbed_frame = self._device.read()
                if not ret:
                    break

            frame = grabbed_frame.copy()

            prev, curr = curr, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if prev is not None and self._bounding_box is not None:
                bb = self._tracker.track(self._bounding_box, prev, curr)

                if bb is not None:
                    self._bounding_box = bb
                    cv2.rectangle(frame, self._bounding_box[:2],
                                  self._bounding_box[2:], (0, 255, 0), 2)
                else:
                    cv2.rectangle(frame, self._bounding_box[:2],
                                  self._bounding_box[2:], (0, 0, 255), 2)

            self.rect_selector.draw(frame)

            cv2.imshow(self.win, frame)

            ch = cv2.waitKey(1)
            if ch == 27 or ch in (ord('q'), ord('Q')):
                break
            elif ch in (ord('p'), ord('P')):
                self.paused = not self.paused
Exemplo n.º 4
0
class API(object):
    def __init__(self, win, source):
        self._device = cv2.VideoCapture(source)
        if isinstance(source, str):
            self.paused = True
        else:
            self.paused = False

        self.win = win
        cv2.namedWindow(self.win, 1)
        self.rect_selector = RectSelector(self.win, self.on_rect)
        self._bounding_box = None

        self._tracker = MedianFlowTracker()

    def on_rect(self, rect):
        self._bounding_box = rect

    def run(self):
        prev, curr = None, None

        ret, frame = self._device.read()
        if not ret:
            raise IOError('can\'t reade frame')

        while True:
            if not self.rect_selector.dragging and not self.paused:
                ret, grabbed_frame = self._device.read()
                if not ret:
                    break

            frame = grabbed_frame.copy()

            prev, curr = curr, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if prev is not None and self._bounding_box is not None:
                bb = self._tracker.track(self._bounding_box, prev, curr)

                if bb is not None:
                    self._bounding_box = bb
                    cv2.rectangle(frame, self._bounding_box[:2], self._bounding_box[2:], (0, 255, 0), 2)
                else:
                    cv2.rectangle(frame, self._bounding_box[:2], self._bounding_box[2:], (0, 0, 255), 2)

            self.rect_selector.draw(frame)

            cv2.imshow(self.win, frame)

            ch = cv2.waitKey(1)
            if ch == 27 or ch in (ord('q'), ord('Q')):
                break
            elif ch in (ord('p'), ord('P')):
                self.paused = not self.paused
Exemplo n.º 5
0
    def __init__(self, win, source):
        self._device = cv2.VideoCapture(source)
        if isinstance(source, str):
            self.paused = True
        else:
            self.paused = False

        self.win = win
        cv2.namedWindow(self.win, 1)
        self.rect_selector = RectSelector(self.win, self.on_rect)
        self._bounding_box = None

        self._tracker = MedianFlowTracker()
Exemplo n.º 6
0
    def __init__(self, win, source):
        self._device = cv2.VideoCapture(source)
        if isinstance(source, str):
            self.paused = True
        else:
            self.paused = False

        self.win = win
        cv2.namedWindow(self.win, 1)
        self.rect_selector = RectSelector(self.win, self.on_rect)
        self._target = None

        self._tracker = MedianFlowTracker()
Exemplo n.º 7
0
class API(object):
    def __init__(self, win, source):
        self._device = cv2.VideoCapture(source)
        if isinstance(source, str):
            self.paused = True
        else:
            self.paused = False

        self.win = win
        cv2.namedWindow(self.win, 1)
        self.rect_selector = RectSelector(self.win, self.on_rect)
        self._target = None

        self._tracker = MedianFlowTracker()

    def on_rect(self, rect):
        self._target = [0.5 * (rect[0] + rect[2]),
                        0.5 * (rect[1] + rect[3]),
                        0.5 * (rect[2] - rect[0]),
                        0.5 * (rect[3] - rect[1]),
                        0.0]

    def run(self):
        prev, curr = None, None

        ret, frame = self._device.read()
        if not ret:
            raise IOError('can\'t reade frame')

        while True:
            if not self.rect_selector.dragging and not self.paused:
                ret, grabbed_frame = self._device.read()
                if not ret:
                    break

            frame = grabbed_frame.copy()

            prev, curr = curr, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if prev is not None and self._target is not None:
                tgt = self._tracker.track(self._target, prev, curr)

                if tgt is not None:
                    self._target = tgt[:]
                    color = (0, 255, 0)
                else:
                    tgt = self._target[:]
                    color = (0, 0, 255)

                center = (int(tgt[0]), int(tgt[1]))
                scale = (int(tgt[2]), int(tgt[3]))
                angle = tgt[4] * 180.0 / np.pi
                cv.Ellipse(cv.fromarray(frame), center, scale, angle, 0., 360., color, 2)

            self.rect_selector.draw(frame)

            cv2.imshow(self.win, frame)

            ch = cv2.waitKey(1)
            if ch == 27 or ch in (ord('q'), ord('Q')):
                break
            elif ch in (ord('p'), ord('P')):
                self.paused = not self.paused
Exemplo n.º 8
0
class API(object):
    def __init__(self, win, source):
        self._source = source
        #self._device = cv2.VideoCapture(source)
        if isinstance(source, str):
            self.paused = True
        else:
            self.paused = False

        self.win = win
        cv2.namedWindow(self.win, 1)
        self.rect_selector = RectSelector(self.win, self.on_rect)
        self._target = None

        self._tracker = MedianFlowTracker()

    def on_rect(self, rect):
        self._target = [0.5 * (rect[0] + rect[2]),
                        0.5 * (rect[1] + rect[3]),
                        0.5 * (rect[2] - rect[0] + 1.0),
                        0.5 * (rect[3] - rect[1] + 1.0),
                        0.0]

    def run(self):
        prev, curr = None, None

        data_files = []
        read_path = os.path.join(self._source, "*.jpg")
        data_files = glob(read_path)
        data_files.sort()

        frame = imread(data_files[0], mode='RGB') # test data in github

        count = 0
        while True:

            print (data_files[count])
            if not self.rect_selector.dragging and not self.paused:
                count += 1
                frame = imread(data_files[count], mode='RGB') # test data in github

            prev, curr = curr, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if prev is not None and self._target is not None:
                tgt = self._tracker.track(self._target, prev, curr)

                if tgt is not None:
                    self._target = tgt[:]
                    color = (0, 255, 0)
                else:
                    tgt = self._target[:]
                    color = (0, 0, 255)

                center = (int(tgt[0]), int(tgt[1]))
                scale = (int(tgt[2]), int(tgt[3]))
                angle = tgt[4] * 180.0 / np.pi
                cv.Ellipse(cv.fromarray(frame), center, scale, angle, 0., 360., color, 2)


            self.rect_selector.draw(frame)

            cv2.imshow(self.win, frame)

            ch = cv2.waitKey(1)
            if ch == 27 or ch in (ord('q'), ord('Q')):
                break
            elif ch in (ord('p'), ord('P')):
                self.paused = not self.paused
Exemplo n.º 9
0
class App:
    def __init__(self, camera):
        self.cap = cv.VideoCapture(camera)
        # run `ffmpeg -f v4l2 -list_formats all -i /dev/video0` to check
        # list of available video modes
        resolutions = "1280x720"
        resolutions = [int(i) for i in "1280x720".split('x')]
        self.cap.set(cv.CAP_PROP_FRAME_WIDTH, resolutions[0])
        self.cap.set(cv.CAP_PROP_FRAME_HEIGHT, resolutions[1])

        _, self.frame = self.cap.read()
        self.processor = Processor(self.frame, "camera.yml")
        self.player = Play()
        cv.namedWindow('processed')
        self.rect_sel = RectSelector('processed', self.onrect)

        self.the_rect = 0, 0, self.processor.w, self.processor.h
        self.color = (128, 255, 255)

        self.start_play = False
        self.paused = False
        self.store = False
        self.end = False
        self.winner = None
        self.store_points = []
        self.store_radius = []
        self.location = tuple()

    def reset_store(self):
        self.store_points = []
        self.store_radius = []

    def onrect(self, rect):
        self.the_rect = rect
        print("select rect:", self.the_rect)
        self.reset_store()
        self.store = True

    def read_frame(self, timeout):
        start_time = time.time()
        while True:
            _, self.frame = self.cap.read()
            self.frame, tsps, tsrs = self.processor.centers_detect(
                self.frame.copy(), self.the_rect, self.color, self.store)
            self.store_points.extend(tsps)
            self.store_radius.extend(tsrs)
            if (time.time() - start_time) > timeout:
                break

    def ai_play(self):
        self.read_frame(0.5)
        self.location, self.end, self.winner = self.player.game.play()
        print("AI move:", self.location)
        self.processor.store_coors.append(tuple(self.location))
        self.processor.grid(self.frame, self.store_points, self.store_radius,
                            self.paused)

    def run(self):
        while True:
            if not self.start_play:
                self.read_frame(0)
                self.rect_sel.draw(self.frame)
            elif not self.paused:
                self.ai_play()

            cv.imshow("processed", self.frame)
            k = cv.waitKey(5) & 0xFF
            if k == 27:
                break
            if k == ord('p'):
                print(len(self.store_points))
            if k == ord('c'):
                print("clean store coordinates!")
                self.processor.store_coors = []
            if k == ord('s'):
                cv.imwrite('frame.png', self.frame)
                print("frame saved")

            if k == ord(' ') and self.store:
                self.start_play = True
                self.paused = not self.paused

                if self.paused:
                    self.ai_play()

                else:
                    durations = 1.4
                    while True:
                        self.read_frame(durations)
                        ai_loc = self.processor.store_coors[-1]
                        self.processor.grid(self.frame, self.store_points,
                                            self.store_radius, self.paused)
                        location = self.processor.store_coors[-1]
                        if ai_loc != location:
                            location, self.end, winner = self.player.game.play(
                                location)
                            print("Human move:", location)
                            break
                        print("Human not found,trying..")
                        durations += 0.3
                    self.reset_store()
                if self.end:
                    print("game end")
                    print("the winner is:", winner)
                    break

        cv.destroyAllWindows()
Exemplo n.º 10
0
class API(object):
    def __init__(self, win, source):
        self._device = cv2.VideoCapture(source)
        if isinstance(source, str):
            self.paused = True
        else:
            self.paused = False

        self.win = win
        cv2.namedWindow(self.win, 1)
        self.rect_selector = RectSelector(self.win, self.on_rect)
        self._target = None

        self._tracker = MedianFlowTracker()

    def on_rect(self, rect):
        self._target = [
            0.5 * (rect[0] + rect[2]), 0.5 * (rect[1] + rect[3]),
            0.5 * (rect[2] - rect[0] + 1.0), 0.5 * (rect[3] - rect[1] + 1.0),
            0.0
        ]

    def run(self):
        prev, curr = None, None

        ret, frame = self._device.read()
        if not ret:
            raise IOError('can\'t reade frame')

        while True:
            if not self.rect_selector.dragging and not self.paused:
                ret, grabbed_frame = self._device.read()
                if not ret:
                    break

            frame = grabbed_frame.copy()

            prev, curr = curr, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if prev is not None and self._target is not None:
                tgt = self._tracker.track(self._target, prev, curr)

                if tgt is not None:
                    self._target = tgt[:]
                    color = (0, 255, 0)
                else:
                    tgt = self._target[:]
                    color = (0, 0, 255)

                center = (int(tgt[0]), int(tgt[1]))
                scale = (int(tgt[2]), int(tgt[3]))
                angle = tgt[4] * 180.0 / np.pi
                cv2.ellipse(frame, center, scale, angle, 0., 360., color, 2)

            self.rect_selector.draw(frame)

            cv2.imshow(self.win, frame)

            ch = cv2.waitKey(1)
            if ch == 27 or ch in (ord('q'), ord('Q')):
                break
            elif ch in (ord('p'), ord('P')):
                self.paused = not self.paused