Пример #1
0
def Show(window_name, image):
    ym = 256 * image.shape[0] / image.shape[1]
    cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
    cv2.imshow(window_name, image)
    x = cv2.getWindowImageRect(window_name)[2] * 4
    y = cv2.getWindowImageRect(window_name)[3] * 4
    cv2.resizeWindow(window_name, 256, 256)
Пример #2
0
def show_image():
    global visualisation_on
    global data
    global visualiser_showing
    global window_name

    if visualisation_on:
        if not visualiser_showing:
            window_name = 'Visualiser'
            cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
            try:
                cv2.imshow(window_name, data)
            except Exception as e:
                print(e)
            cv2.waitKey(1)
            visualiser_showing = True
        if visualiser_showing:
            width = cv2.getWindowImageRect(window_name)[2]
            height = cv2.getWindowImageRect(window_name)[3]
            try:
                image = cv2.resize(data, (width, height),
                                   interpolation=cv2.INTER_AREA)
                cv2.imshow(window_name, image)
                cv2.waitKey(1)
            except Exception as e:
                print(e)
    else:
        cv2.destroyAllWindows()
        visualiser_showing = False
        cv2.waitKey(1)
Пример #3
0
    def visualisation_loop(vis_object):
        """
        When the visualisation parameter in a node is set to True then this loop starts in a new visualisation thread.
        The thread terminates when the visualisation_on boolean is turned off. Because this is a static method it can
        be overridden by another loop from an xxx_worker script using the set_new_visualisation_loop
        :return: Nothing
        """
        while vis_object.running:
            while vis_object.visualisation_on:
                if not vis_object.window_showing:
                    window_name = '{} {}'.format(vis_object.node_name,
                                                 vis_object.node_index)
                    cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
                    cv2.imshow(window_name, vis_object.visualised_data)
                    cv2.waitKey(1)
                    vis_object.window_showing = True
                if vis_object.window_showing:
                    try:
                        width = cv2.getWindowImageRect(window_name)[2]
                        height = cv2.getWindowImageRect(window_name)[3]
                        image = cv2.resize(vis_object.visualised_data,
                                           (width, height),
                                           interpolation=cv2.INTER_AREA)
                        cv2.imshow(window_name, image)
                        cv2.waitKey(1)
                    except Exception as e:
                        print(e)

            cv2.destroyAllWindows()
            cv2.waitKey(1)
            vis_object.window_showing = False
Пример #4
0
    def mouse_callback_function(self, event, x, y, flags, param):
        """
        The purpose of this function is to handle anything related to the mouse.

        If the left mouse button is pressed it will initiate a drawing session by placing a rectangle at the last
        position in the internal array '_rectangles_to_draw' with a corner located at the mouse pointer. While in a
        drawing session and the mouse is moved around the size and width of this rectangle will be changed. When the
        left mouse button is released the drawing session is ended and the position of the mouse pointer determines
        where the second corner is placed. If the middle mouse button is clicked the last added rectangle will be
        removed, if possible.
        """
        if event == cv2.EVENT_LBUTTONDOWN:
            print("Left mouse was pressed")
            if len(self._rectangles_to_draw) < glob.MAX_NUMBER_OF_TRACKS:
                self._drawing = True
                self._drawing_start_position = (x, y)
                # The region that is currently drawn is saved in the last place of the list
                self._rectangles_to_draw.append(
                    Rectangle((x, y), (x + 1, y + 1), self._id_counter))
                self._id_counter += 1
            else:
                print("Maximum number of tracks has been reached.")

        elif event == cv2.EVENT_LBUTTONUP:
            print("Left mouse was released")
            if self._drawing:
                self._drawing = False
                self._drawing_end_position = (x, y)
                window = cv2.getWindowImageRect(self._window_name)
                top_left_x = max(
                    min(self._drawing_start_position[0],
                        self._drawing_end_position[0]), 0)
                top_left_y = max(
                    min(self._drawing_start_position[1],
                        self._drawing_end_position[1]), 0)
                bottom_right_x = min(
                    max(self._drawing_start_position[0],
                        self._drawing_end_position[0]), window[2] - 1)
                bottom_right_y = min(
                    max(self._drawing_start_position[1],
                        self._drawing_end_position[1]), window[3] - 1)
                self._rectangles_to_draw[-1].update_position(
                    (top_left_x, top_left_y), (bottom_right_x, bottom_right_y))

        elif event == cv2.EVENT_MBUTTONDOWN:
            print("Middle click was pressed")
            if self._rectangles_to_draw:
                self._rectangles_to_draw.pop()

        elif self._drawing:
            window = cv2.getWindowImageRect(self._window_name)
            top_left_x = max(min(self._drawing_start_position[0], x), 0)
            top_left_y = max(min(self._drawing_start_position[1], y), 0)
            bottom_right_x = min(max(self._drawing_start_position[0], x),
                                 window[2] - 1)
            bottom_right_y = min(max(self._drawing_start_position[1], y),
                                 window[3] - 1)
            self._rectangles_to_draw[-1].update_position(
                (top_left_x, top_left_y), (bottom_right_x, bottom_right_y))
Пример #5
0
    def refresh(self):
        self.width, self.height = cv2.getWindowImageRect(self.windowname)[2:]
        self.resize_camera()

        if self.use_moderngl:
            image = self.offscreen_renderer.render(self.camera)
        else:
            image = (self.scene.render(self.interactor.camera) * 255).astype(np.uint8)

        bgr_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        if self.recording:
            self.video_writer.write(bgr_image.astype(np.uint8))

        self.update_fps()
        if self.recording:
            cv2.circle(
                bgr_image,
                (image.shape[1] - 20, image.shape[0] - 20),
                8,
                (0, 0, 255),
                cv2.FILLED,
            )
        if self.display_fps:
            self.print_fps(bgr_image, self.fps)

        cv2.imshow(self.windowname, bgr_image)

        key = cv2.waitKey(1)
        if key > 0:
            self.process_key(key)
    def handleKeyUpdate(self, key):
        (x, y, w, h) = cv2.getWindowImageRect('image')
        moveBy = int(self.zoom_level * .2)
        if key != 255:
            print(key)
        if key == 97:
            #left
            self.click_x = int(w / 2) - moveBy
            self.click_y = int(h / 2)
            self.img = self.refitToScreenSize(
                self.updateRegion(self.click_x, self.click_y))

        elif key == 119:
            # up
            self.click_x = int(w / 2)
            self.click_y = int(h / 2) - moveBy
            self.img = self.refitToScreenSize(
                self.updateRegion(self.click_x, self.click_y))

        elif key == 100:
            # right
            self.click_x = int(w / 2) + moveBy
            self.click_y = int(h / 2)
            self.img = self.refitToScreenSize(
                self.updateRegion(self.click_x, self.click_y))

        elif key == 115:
            #down
            self.click_x = int(w / 2)
            self.click_y = int(h / 2) + moveBy
            self.img = self.refitToScreenSize(
                self.updateRegion(self.click_x, self.click_y))
Пример #7
0
    def main_loop(self, app_handle: AppHandler):
        if (app_handle == 0):
            raise Exception("wrong handler format")

        __window_name = "Snake Window"
        __main_window = cv2.namedWindow(__window_name)

        while (app_handle.IsOpen()):
            rect = app_handle.Rect()
            if rect is None:
                break

            __printscreen_pil = ImageGrab.grab(rect)
            __printscreen_numpy = np.array(__printscreen_pil.getdata(),
                                           dtype='uint8').reshape(
                                               (__printscreen_pil.size[1],
                                                __printscreen_pil.size[0], 3))

            width = app_handle.Width()
            height = app_handle.Height()

            if height is None or width is None:
                break

            __printscreen_numpy = self.preprocess_image(
                __printscreen_numpy, width, height)

            #todo: rozmiar filtra powoduje exception
            #https://docs.opencv.org/3.1.0/d4/d13/tutorial_py_filtering.html

            cv2.imshow(__window_name, __printscreen_numpy)
            if (cv2.waitKey(25) and 0xFF == ord('q')):
                x = cv2.getWindowImageRect(__window_name)
                cv2.destroyAllWindows()
                break
Пример #8
0
    def on_mouse(self, event, x, y, flag, *params):

        windowWidth=cv2.getWindowImageRect("Lightfield")[2]
        windowHeight=cv2.getWindowImageRect("Lightfield")[3]
        image_x = map_range(x,0,windowWidth, self.min_x, self.max_x)
        image_y = map_range(windowHeight-y,0,windowHeight, self.min_z, self.max_z)

        closest_image = self.metadata[0]
        distance = self.metadata[0].distance(image_x,image_y)
        for m in self.metadata:
            if m.distance(image_x,image_y) < distance:
                distance = m.distance(image_x,image_y)
                closest_image = m

        print("%4d, %4d" %(image_x,image_y),closest_image.x,closest_image.z )
        self.img = closest_image.load()
Пример #9
0
def session():
    global drag_state
    with mss() as sct:
        cv2.imshow(
            WINDOW, 
            np.array(sct.grab(rect)), 
        )
        cv2.setMouseCallback(WINDOW, mouseEvent)
        cv2.waitKey(300)
        while True:
            if drag_state == 0:
                cv2.imshow(
                    WINDOW, 
                    np.array(sct.grab(rect)), 
                )
            key_press = cv2.waitKey(33) & 0xFF
            rect['left'], rect[
                'top'
            ], _, __ = cv2.getWindowImageRect(WINDOW)
            if key_press in (
                ord('q'), 
                27, 
            ):
                raise KeyboardInterrupt
            elif key_press == ord(' ') or drag_state == 2:
                break
    cv2.destroyWindow(WINDOW)
Пример #10
0
    def getWindowImageRect(self):
        """Provides rectangle of image in the window.

        The function getWindowImageRect returns the client screen coordinates, width and height of the image rendering area.
        """
        Ix, Iy, Iw, Ih = cv2.getWindowImageRect(winname=self.winname)
        return (Ix, Iy, Iw, Ih)
Пример #11
0
def main():
    # initialize the video stream
    # and initialize the FPS counter
    #logging.info("starting video stream...")
    frame = None

    # src=0 is default web cam
    #vs = VideoStream(src=0).start()

    screenWidth, screenHeight = pyautogui.size()
    screenRatio = screenWidth/screenHeight

    logging.info("screenWith: {}x{}, format: {:.2f}:1".format(screenWidth,
                screenHeight, screenRatio))

    logging.info("Creating output window")
    cv2.namedWindow("Output", cv2.WINDOW_NORMAL)
    # scaling the screen to 70% for second monitor...
    cv2.resizeWindow('Output', (int(screenWidth*.70), int(screenHeight*.70)))
    cv2.moveWindow('Output', -1440, 200)
    try:
        destiny_window = win32gui.FindWindow(None, "Destiny 2")
        win32gui.SetForegroundWindow(destiny_window)
    except:
        logging.debug("Couldn't find Destiny 2 window, is it running?")
        cv2.destroyAllWindows()
        #exit(1)

    osd = destiny2_bot_osd(screenWidth, screenHeight)

    # Add keyboard hotkeys
    keyboard.add_hotkey('ctrl+shift+a', osd.add_console, args=['ctrl+shift+a pressed'])

    # START EVENT LOOP
    while True:
        # grab a screenshot of the desktop
        frame = np.array(ImageGrab.grab(bbox=(0, 40,
                        screenWidth, screenHeight)))

        osd.fps_update(frame)
        osd.write_console(frame)

        # show the output frame
        # scale frame to window
        rect = cv2.getWindowImageRect('Output')
        im_scaled = cv2.resize(frame, (rect[2], rect[3]))
        # convert frame back to RGB to display correctly
        RGB_img = cv2.cvtColor(im_scaled, cv2.COLOR_BGR2RGB)
        cv2.imshow("Output", RGB_img)

        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    logging.info("exiting")
    cv2.destroyAllWindows()
Пример #12
0
    def get_view_dims(self):
        # Find maximum width
        blank_img_width = np.zeros((1, 100, 3), np.uint8)
        blank_img_width.fill(LIGHT_GRAY)
        self.show_image(blank_img_width)
        cv2.waitKey(1)
        max_width = cv2.getWindowImageRect(self.window_name)[2]
        del blank_img_width

        # Find maximum height
        blank_img_height = np.zeros((100, 1, 3), np.uint8)
        blank_img_height.fill(LIGHT_GRAY)
        self.show_image(blank_img_height)
        cv2.waitKey(1)
        max_height = cv2.getWindowImageRect(self.window_name)[3]
        del blank_img_height

        return (max_width, max_height)
Пример #13
0
 def print_map(self, window):  # działa
     text = ("Global traffic jam: " + str(self.slow_cars) + ", " +
             "greatest global traffic jam: " + str(self.max_slow_cars))
     windowWidth = cv2.getWindowImageRect(window)[2]
     windowHeight = cv2.getWindowImageRect(window)[3]
     image = cv2.resize(cv2.cvtColor(self.colormap, cv2.COLOR_BGR2RGB),
                        (windowWidth, windowHeight))
     cv2.putText(
         image,
         text,
         (10, 25),
         cv2.FONT_HERSHEY_SIMPLEX,
         1,
         (0, 0, 255),
         2,
         cv2.LINE_AA,
     )
     cv2.imshow(window, image)
Пример #14
0
 def __init__(self):
     pyautogui.FAILSAFE = False
     cv2.namedWindow(Window.NAME, cv2.WINDOW_GUI_EXPANDED)
     cv2.setWindowProperty(Window.NAME, cv2.WND_PROP_FULLSCREEN,
                           cv2.WINDOW_FULLSCREEN)
     cv2.setWindowProperty(Window.NAME, cv2.WND_PROP_ASPECT_RATIO,
                           cv2.WINDOW_FREERATIO)
     (_, _, width, height) = cv2.getWindowImageRect(Window.NAME)
     print("Screen size: %d x %d" % (width, height))
     pyautogui.moveTo(width, height)
Пример #15
0
 def onDoubleClicked(self, event, x, y, flags, param):
     if event == cv2.EVENT_LBUTTONDBLCLK:
         print("Double Clicked")
         _, _, w, h = cv2.getWindowImageRect(param[0])
         if w == 500 and h == 350:
             cv2.resizeWindow(param[0], self.width, self.height)
             cv2.moveWindow(param[0], 0, 0)
         else:
             cv2.resizeWindow(param[0], 500, 350)
             print(param)
             cv2.moveWindow(param[0], param[1], param[2])
Пример #16
0
def cv2_setup(window):
    global disable_abort
    if fullscreen:
        cv2.namedWindow(window, cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty(window, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
    else:
        cv2.namedWindow(window)
        width, height = cv2.getWindowImageRect(window)[2:]
        cv2.moveWindow(window, (SystemMetrics.screen_width - width) // 2,
                       (SystemMetrics.screen_height - height) // 2)
    if not disable_abort:
        cv2.setMouseCallback(window, abort)
    return window
    def updateRegion(self, center_x, center_y):
        # ok so the center_x is entirely based on the openCV window. So the true center is actually
        # at cur_x + center_x. But first center_x needs to be scaled based on the midpoint. So
        # we gotta first do center_x - midpoint
        (x, y, w, h) = cv2.getWindowImageRect('image')
        center_x = center_x - int(w / 2)
        center_y = center_y - int(h / 2)

        self.cur_x = self.cur_x + center_x
        self.cur_y = self.cur_y + center_y

        img = self.getRegion()

        return img
Пример #18
0
def run_resize_image(image, cvwin, ww, hh, lock=0):
    x, y, w, h = cv2.getWindowImageRect(cvwin)
    if (w > 0) and h > 0:
        suc, sz, sx, sy, ex, ey = calcFixedAspectRatio([w, h], [ww, hh])
        if suc:
            #DEBUG(suc, sz, sx, sy, ex, ey, (ww, hh), (x, y, w, h), image.shape, cvwin)
            img = cv2.resize(image, sz, interpolation=cv2.INTER_LINEAR)
            if lock:
                image = img
                cv2.resizeWindow(cvwin, sz[0], sz[1])
            else:
                image = np.zeros((h, w, 3), dtype=np.uint8)
                image[sy:ey, sx:ex, :] = img
    return image, [w, h]
Пример #19
0
def show_image(img):
    if img is None:
        return
    height, width, channels = img.shape
    rect = cv2.getWindowImageRect(window_name)
    h = rect[3]
    w = int(h * width / height)
    if w > rect[2]:
        w = rect[2]
        h = int(height / width * w)
    new_img = cv2.resize(img, (w, h), interpolation=cv2.INTER_AREA)
    s = np.zeros((rect[3], rect[2], 3), np.uint8)
    s.fill(255)
    sy = int((rect[3] - h) / 2)
    sx = int((rect[2] - w) / 2)
    s[sy:sy + h, sx:sx + w] = new_img
    cv2.imshow(window_name, s)
Пример #20
0
    def viewCCTV(self):
        caps = list()
        windowNames = list()
        startX, startY = 0, 0
        for uri in (self.uris[self.idx]):
            c = cv2.VideoCapture(uri)
            if c.isOpened():
                print("add")
                caps.append(c)
            else:
                print("don't add")
                continue
            #caps.append(cv2.VideoCapture(uri))
            winname = 'Camera ' + uri
            windowNames.append(winname)
            cv2.namedWindow(winname)
            cv2.moveWindow(winname, startX, startY)
            cv2.resizeWindow(winname, 500, 350)
            cv2.setMouseCallback(winname, self.onDoubleClicked,
                                 (winname, startX, startY))
            startX += 500
            if startX + 500 > self.width:
                startX = 0
                startY += 350
            #cv2.resizeWindow(winname, 200, 200)
        finish = False
        while True:
            for i in range(0, len(caps)):
                _, frame = caps[i].read()
                if frame is None:
                    continue
                x, y, w, h = cv2.getWindowImageRect(windowNames[i])
                resized_frame = cv2.resize(frame, (w, h))
                cv2.imshow(windowNames[i], resized_frame)

                k = cv2.waitKey(1) & 0xFF
                if k == 27:
                    caps[i].release()
                    finish = True
                    break
            if finish:
                break

        for name in windowNames:
            cv2.destroyWindow(name)
Пример #21
0
	def get_frame(self):
		try:
			frame = self.videos[self.video_i][self.video_type].last_frame()
		except:
			print("Error: video [%d %s] not found!"%(self.video_i+1,self.video_type))
			frame = None

		#empty frame
		if frame is None:
			frame = np.zeros((10,10,3))
		elif self.videos_path[self.video_i] == "Avatar" and\
			self.video_type == "bluray":
			#16:9 to 4:3
			#  |--dx--|--------new_x---------|--dx--|
			size = frame.shape
			new_x = size[0]*4/3
			dx = int((size[1]-new_x)/2)
			frame = frame[:,dx:-dx]
		#zoom
		if self.zoom:
			size = frame.shape
			pixels = (int(size[0]*(1-1/ZOOM)/2),int(size[1]*(1-1/ZOOM)/2))
			frame  = frame[pixels[0]:-pixels[0],pixels[1]:-pixels[1]]

		frame = frame.copy()
		
		#resize
		dim_frame  = (frame.shape[1],frame.shape[0])
		dim_screen = cv2.getWindowImageRect('video')[2:]

		#if dont change so much
		if abs(self.last_dim_screen[0] - dim_screen[0]) + abs(self.last_dim_screen[0] - dim_screen[0]) <= 2:
			dim_screen = self.last_dim_screen
		self.last_dim_screen = dim_screen

		alpha = min([dim_screen[0]/dim_frame[0],dim_screen[1]/dim_frame[1]])
		dim_frame2 = (int(dim_frame[0]*alpha),int(dim_frame[1]*alpha))


		if self.video_resize_type == 'bicubic':
			frame = cv2.resize(frame, dim_frame2, interpolation = cv2.INTER_CUBIC )
		elif self.video_resize_type == 'nearest':
			frame = cv2.resize(frame, dim_frame2, interpolation = cv2.INTER_NEAREST )

		return frame
Пример #22
0
    def _callHandler(self, event_type: EventType, event: Event):
        if event_type == EventType.MouseMove or event_type == EventType.MouseClick:
            win_rect = cv.getWindowImageRect(self.name)
            event.x -= win_rect[0]
            event.y -= win_rect[1]

            event.x *= self.width / win_rect[2]
            event.y *= self.height / win_rect[3]

            # Out of bounds
            if event.x < 0 or event.x >= self.width or event.y < 0 or event.y >= self.height:
                return

        if event_type == EventType.MouseMove:
            self.mouseEvent = event

        for handler in self.handlers[event_type].values():
            handler(event)
def recalculate_window_stuffs():
    global cell_height
    global cell_width
    global number_of_cells
    global number_of_columns
    global number_of_rows
    global resize_x
    global resize_y
    global window_height
    global window_width

    (_, _, temp_window_width, temp_window_height) = cv2.getWindowImageRect('win')
    if not (temp_window_width == -1 or temp_window_height == -1):
        window_width = temp_window_width
        window_height = temp_window_height
    print(f"resizing: {window_width}x{window_height}")

    # 9 - 7
    number_of_rows = 8

    cell_height = window_height // number_of_rows
    cell_width = int(cell_height * (initial_img_width / initial_img_height))
    cell_aspect_ratio = cell_width / cell_height
    image_aspect_ratio = initial_img_width / initial_img_height
    #assert abs(cell_aspect_ratio - image_aspect_ratio) < 0.01, f"cell_aspect_ratio={cell_aspect_ratio} image_aspect_ratio={image_aspect_ratio}"
    number_of_columns = window_width // cell_width

    resize_x = cell_width / initial_img_width
    resize_y = cell_height / initial_img_height

    # setting up cell widths and asserting cells fit into the window size
    # cell_width = initial_img_width * resize_x # 150
    # cell_height = initial_img_height * resize_y # 200
    #assert window_width % cell_width == 0, 'Check that cell_width fits into the windows width'
    #assert window_height % cell_height == 0, 'Check that cell_height fits into windows height'
    # amount of columns, rows and total cells in the grid
    # number_of_columns = window_width // cell_width # how many columns there are.
    # number_of_rows = window_height // cell_height # how many rows there are.
    number_of_cells = number_of_rows * number_of_columns  # number of cells in grid
    print(cell_width, cell_height, number_of_rows, number_of_columns)
Пример #24
0
def click_and_crop(event, x, y, flags, window_name):
    global refPt, cropping

    window_x, window_y, window_w, window_h = cv2.getWindowImageRect(
        window_name)
    # print(event, x, y)
    # print(window_x, window_y, window_w, window_h)

    if x < 0:
        x = 0
    elif x >= window_w:
        x = window_w - 1

    if y < 0:
        y = 0
    elif y >= window_h:
        y = window_h - 1

    if event == cv2.EVENT_LBUTTONDOWN:
        refPt.append((x, y))

    elif event == cv2.EVENT_LBUTTONUP:
        if len(refPt) < 1:
            return

        refPt.append((x, y))
        min_x = min(refPt[-2][0], refPt[-1][0])
        max_x = max(refPt[-2][0], refPt[-1][0])
        min_y = min(refPt[-2][1], refPt[-1][1])
        max_y = max(refPt[-2][1], refPt[-1][1])

        refPt.clear()
        if (max_x - min_x) > 20 and (max_y - min_y) > 20:
            refPt.append((min_x, min_y))
            refPt.append((max_x, max_y))
            cropping = True
        else:
            cropping = False
Пример #25
0
def getting_screen_share(control_socket2):
    """getting the screen share"""
    global screenx, screeny
    screenx = int(control_socket2.recv(4).decode('utf-8'))
    screeny = int(control_socket2.recv(4).decode('utf-8'))
    size = int(control_socket2.recv(10).decode('utf-8'))

    try:
        while True:
            chunks = []
            rc_data = 0
            while rc_data < size:
                chunk = control_socket2.recv(size - rc_data)
                chunks.append(chunk)
                rc_data += len(chunk)

            img_to_save = PIL.Image.frombytes("RGB", (screenx, screeny),
                                              b''.join(chunks))
            img_np = numpy.array(img_to_save)
            imS = cv2.resize(img_np, (myscreenx, myscreeny - 150))

            img_np = cv2.cvtColor(imS, cv2.COLOR_BGR2RGB)
            cv2.namedWindow('frame')
            cv2.imshow('frame', img_np)
            global tuple_of_sizes
            tuple_of_sizes = cv2.getWindowImageRect('frame')

            key = cv2.waitKey(1)
            if key == 27:
                break
    except ConnectionResetError:
        pass

    print("from here")
    listener.stop()
    control_socket.close()
    cv2.destroyAllWindows()
    end_remote_mode()
    def main(self, vsid, vsuri, datauri):
        """
        Main function
        """
        print(f'Capturing {vsuri} with data from {datauri}')
        try:
            self._getCurrentObjects(vsid, datauri)
            wndname = f'**ESC to STOP** [{vsuri}] '
            cv2.namedWindow(wndname, cv2.WINDOW_NORMAL)
            cam = cv2.VideoCapture(vsuri)
            while True:
                ret, img = cam.read()
                if ret:
                    self._lock.acquire()
                    objs = self._currentObjects.copy()
                    self._lock.release()
                    for o in objs:
                        c, s, p1, p2 = o
                        clr = self._getColor(c)
                        ih, iw, _ = img.shape
                        wndrect = cv2.getWindowImageRect(wndname)
                        #cv2.resizeWindow(wndname, iw, ih)
                        #cv2.rectangle(img, (l, t), (r, b), (r,g,b))
                        cv2.rectangle(img, p1, p2, clr, 2)
                        cv2.putText(img, f'c:{c},s:{s}', p1,
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, clr, 1)
                    cv2.imshow(wndname, img)
                    if cv2.waitKey(1) == 27:
                        break

            cam.release()
            cv2.destroyAllWindows()
        except Exception as e:
            print(e)
            traceback.print_tb(e.__traceback__)

        self._stopping = True
Пример #27
0
    def analyze(self, frame):
        # We send this frame to GazeTracking to analyze for gaze direction
        self.gaze_tr.refresh(frame)

        screen_x, screen_y = None, None

        # calibrate iris_detection and annotate frame with pupil-landmarks
        if not self.iris_calib.is_complete():
            cam_frame = self.gaze_tr.annotated_frame()
            cv2.imshow(self.calib_window, cam_frame)

        # calibrate the mapping from pupil to screen coordinates
        elif not self.gaze_calib.is_completed():
            rect = cv2.getWindowImageRect(self.calib_window)
            cv2.moveWindow(self.calib_window, -rect[0], -rect[1])
            calib_frame = self.gaze_calib.calibrate_gaze(self.webcam_estate)
            cv2.imshow(self.calib_window, calib_frame)

        # test the mapping
        elif not self.gaze_calib.is_tested():
            calib_frame = self.gaze_calib.test_gaze(self.pog,
                                                    self.webcam_estate)
            cv2.imshow(self.calib_window, calib_frame)

        # continue to unobtrusively estimate eye point of gaze
        else:
            if not self.windows_closed:
                # get the calibration window out of the way
                icon_sz = 50
                cv2.resizeWindow(self.calib_window, icon_sz, icon_sz)
                cv2.moveWindow(self.calib_window,
                               self.monitor['width'] - icon_sz,
                               self.monitor['height'] - icon_sz)
                self.windows_closed = True
            screen_x, screen_y = self.pog.point_of_gaze(self.webcam_estate)

        return screen_x, screen_y
Пример #28
0
 def run(self, cb):
     blank = np.zeros((H, W, C), dtype=np.uint8)
     cv2.namedWindow(WIN, cv2.WINDOW_NORMAL)
     cv2.setMouseCallback(WIN, cb.play_cb)
     i = 0
     global g_ctx
     while (g_ctx.img_name == None):
         time.sleep(0.1)
     blank = g_ctx.img.copy()
     hh, ww, cc = blank.shape
     cv2.resizeWindow(WIN, ww, hh)
     while (g_ctx.cv_quit == 0):
         if (cv2.getWindowProperty(WIN, 0) < 0):
             break
         blk = blank.copy()
         i += 1
         #cv2.putText(blk, "%d" % i, 0.6, 1, 4, 1)
         #print(i, cb.cursor, cb.pt0, cb.pt1, cb.pt2, cb.pt3)
         blk, sz = run_resize_image(blk, WIN, ww, hh, lock=1)
         if cb.cur_time > 0:
             k = cb.getPtsList()
             if cb.cur_time < 4:
                 k[cb.cur_time] = cb.cursor
                 for l in range(cb.cur_time):
                     #print(k[l], k[l+1])
                     cv2.line(blk, tuple(k[l]), tuple(k[l + 1]),
                              (0, 170, 0), 3)
             if cb.cur_time == 4:
                 cv2.polylines(blk, np.array([k]), 1, [0, 170, 255], 10)
         _, _, sw, sh = cv2.getWindowImageRect(WIN)
         cb.setSize([sw, sh])
         cv2.imshow(WIN, blk)
         key = cv2.waitKey(30)
         if (key == ord('q')):
             break
     cv2.destroyAllWindows()
Пример #29
0
    def __call__(self, frame):

        if self.screen_shape is None \
                or self.screen_shape[0] != frame.shape[0] or self.screen_shape[1] != frame.shape[1]:
            self.screen_shape = frame.shape[:2]
            cv2.setMouseCallback(FrameProcessor._WINDOW_LABEL,
                                 self.mouse_callback,
                                 param=self.screen_shape)

        current_time = datetime.datetime.now()
        screenshot = False

        if self.multiscreen:
            multiframe = np.zeros(frame.shape, dtype=np.uint8)
            x_mid, y_mid = frame.shape[1] // 2, frame.shape[0] // 2
            multiframe[:y_mid, :x_mid, :] = cv2.resize(frame, (x_mid, y_mid))

        if not self.moving:

            if self.detector is not None:
                rects, (frame_delta, frame_binary,
                        motion_rects) = self.detector(frame)
                if self.multiscreen:
                    frame_delta_small = cv2.resize(frame_delta, (x_mid, y_mid))
                    frame_binary_small = cv2.resize(frame_binary,
                                                    (x_mid, y_mid))
                    for channel in range(frame.shape[2]):
                        multiframe[y_mid:, :x_mid, channel] = frame_delta_small
                        multiframe[y_mid:, x_mid:,
                                   channel] = frame_binary_small
                    FrameProcessor.add_rects_to_multiscreen(
                        multiframe, motion_rects,
                        FrameProcessor._MOTION_BOX_COLOR)
                    FrameProcessor.add_rects_to_multiscreen(
                        multiframe, rects,
                        FrameProcessor._MOTION_MERGED_BOX_COLOR)
            else:
                # if no movement detector provided, the whole frame is a single input for recognizer
                motion_rects = rects = [[0, 0, frame.shape[1], frame.shape[0]]]

            # inspect subframes with movement
            if self.recognizer is not None:
                if rects:
                    boxes = self.recognizer(frame, rects)
                    description = []
                    for box in boxes:
                        top_classes = [(self.class_names[c], box[5 + c])
                                       for c in np.argsort(box[5:])[::-1]
                                       if box[5 + c] > self.min_class_conf]
                        if len(top_classes) > 0:
                            name = top_classes[0][0]
                            background = name in self.background_names
                            blind_boxes = self.background_boxes[
                                name] if name in self.background_boxes else None
                            if not background and blind_boxes is not None:
                                for blind_box in blind_boxes:
                                    overlap_area = (max(box[0], blind_box[0]) -
                                                    min(box[0] + box[2], blind_box[0] + blind_box[2])) * \
                                                   (max(box[1], blind_box[1]) -
                                                    min(box[1] + box[3], blind_box[1] + blind_box[3]))
                                    # which part of detected box is contained in blind box
                                    overlap_part = overlap_area / (
                                        (box[2] + 1) * (box[3] + 1))
                                    if overlap_part > self.background_overlap:
                                        background = True
                                        self.logger.debug(
                                            '[%s] background (%.3f) %s: %s' %
                                            (FrameProcessor.get_filename(
                                                current_time), overlap_part,
                                             name, box[:4]))
                                        break

                            # check if recognized objects have intersection with detected motions
                            if self.detector is not None:
                                if np.max([
                                        self.detector.intersect(box, r)
                                        for r in motion_rects
                                ]) == 0:
                                    background = True

                            if self.multiscreen:
                                FrameProcessor.add_rects_to_multiscreen(
                                    multiframe, [box[:4]],
                                    self.class_colors[name])

                            if not background:
                                screenshot = True

                            description += [(top_classes, box[:5])]

                    if screenshot:

                        description_ext = {
                            'movements':
                            rects,
                            'objects': [(dict(top_classes), box)
                                        for top_classes, box in description]
                        }

                        if self.raw_screenshot_dir:
                            self.save_raw_frame_with_description(
                                current_time, frame, description_ext)

                        for top_classes, box in description:
                            name = top_classes[0][0]
                            FrameProcessor.put_text(
                                frame, '%.1f%%' % (box[4] * 100),
                                box[0] + box[2] // 2, box[1],
                                self.class_colors[name],
                                FrameProcessor._BACKGROUND_COLOR,
                                cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, 1,
                                'bottom center')

                            offset = 0
                            for n, c in top_classes:
                                _, h, b = FrameProcessor.put_text(
                                    frame, '%s: %.1f%%' % (n, c * 100),
                                    box[0] + box[2], box[1] + offset,
                                    self.class_colors[n],
                                    FrameProcessor._BACKGROUND_COLOR,
                                    cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, 1,
                                    'top left')
                                offset += h + b
                            cv2.rectangle(frame, (box[0], box[1]),
                                          (box[0] + box[2], box[1] + box[3]),
                                          self.class_colors[name], 1)

                        self.logger.info(
                            '[%s] %s' %
                            (FrameProcessor.get_filename(current_time),
                             description_ext))

            if self.detector is not None:
                # draw rectangles around subframes with movement
                for x, y, w, h in motion_rects:
                    cv2.rectangle(frame, (x, y), (x + w, y + h),
                                  FrameProcessor._MOTION_MERGED_BOX_COLOR, 1)
                self.logger.debug('movement detected at %s' % rects)

                # text in the left top of the screen
                cv2.putText(
                    frame, 'Moving object detected' if len(rects) > 0 else
                    'All clear!', (10, 30), cv2.FONT_HERSHEY_COMPLEX, 1,
                    FrameProcessor._SYSTEM_COLOR, 2)

        else:
            cv2.putText(frame, 'Camera moving', (10, 30),
                        cv2.FONT_HERSHEY_COMPLEX, 1,
                        FrameProcessor._SYSTEM_COLOR, 2)

        # timestamp in the left bottom of the screen
        FrameProcessor.put_text(
            frame,
            current_time.strftime('%A %d %B %Y %H:%M:%S.%f')[:-3], 10,
            frame.shape[0] - 10, FrameProcessor._SYSTEM_COLOR,
            FrameProcessor._BACKGROUND_COLOR, cv2.FONT_HERSHEY_COMPLEX_SMALL,
            1, 1)

        key = cv2.waitKeyEx(1)

        # if the 'q' key is pressed, break from the loop
        if key == ord('q'):
            return False

        # save frame if moving object detected or 's' key is pressed
        if (screenshot or key == ord(' ')) and self.screenshot_dir:
            self.save_frame(current_time, frame)

        # switch between show/hide background objects
        if key == ord('b'):
            self.show_background = not self.show_background

        if key == ord('c'):
            if self.show_background:
                names = list(self.background_boxes.keys())
                if names:
                    if self.show_background_class is None:
                        self.show_background_class = names[0]
                    else:
                        idx = names.index(self.show_background_class)
                        if idx < 0:
                            self.logger.debug(
                                'background name %s not found in background boxes'
                                % self.show_background_class)
                            self.show_background_class = None
                        elif idx < len(names) - 1:
                            self.show_background_class = names[idx + 1]
                        else:
                            self.show_background_class = None

        # show background objects
        if self.show_background:
            if self.show_background_class in self.background_boxes:
                cv2.putText(
                    frame,
                    'Background zone for %s' % self.show_background_class,
                    (10, 60), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
                for box in self.background_boxes[self.show_background_class]:
                    cv2.rectangle(
                        frame, (box[0], box[1]),
                        (box[0] + box[2], box[1] + box[3]),
                        self.class_colors[self.show_background_class],
                        thickness=2)
            else:
                cv2.putText(frame, 'Background zone for all classes', (10, 60),
                            cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)

                for name, boxes in self.background_boxes.items():
                    for box in boxes:
                        cv2.rectangle(frame, (box[0], box[1]),
                                      (box[0] + box[2], box[1] + box[3]),
                                      self.class_colors[name],
                                      thickness=2)

        # add frame to multiframe
        if self.multiscreen and self.detector is not None:
            multiframe[:y_mid, x_mid:, :] = cv2.resize(frame, (x_mid, y_mid))
            screen = multiframe
        else:
            screen = frame

        if self.moving:

            # actual width and height of the window
            _, _, w_wnd, h_wnd = cv2.getWindowImageRect(
                FrameProcessor._WINDOW_LABEL)
            h, w = self.screen_shape

            width_h, width_w = h // h_wnd, w // w_wnd

            x_l, x_r = w // 3, w * 2 // 3
            y_t, y_b = h // 3, h * 2 // 3
            cv2.line(screen, (x_l, 0), (x_l, h), FrameProcessor._GRID_COLOR,
                     width_w)
            cv2.line(screen, (x_r, 0), (x_r, h), FrameProcessor._GRID_COLOR,
                     width_w)
            cv2.line(screen, (0, y_t), (w, y_t), FrameProcessor._GRID_COLOR,
                     width_h)
            cv2.line(screen, (0, y_b), (w, y_b), FrameProcessor._GRID_COLOR,
                     width_h)

        if self.max_screen_size is not None and screen.shape[
                1] > self.max_screen_size:
            screen = cv2.resize(screen,
                                (self.max_screen_size * screen.shape[1] //
                                 screen.shape[0], self.max_screen_size))

        cv2.imshow(FrameProcessor._WINDOW_LABEL, screen)

        # switch between single screen and multiscreen modes (will take effect next frame)
        if key == ord('m'):
            self.multiscreen = not self.multiscreen

        return True
Пример #30
0
def main(webcam_id):
    # Load a TensorFlow model
    graph_def = tf.compat.v1.GraphDef()
    with tf.io.gfile.GFile(MODEL_FILENAME, 'rb') as f:
        graph_def.ParseFromString(f.read())
    print(cv2.getWindowImageRect('Frame'))
    # Load labels
    with open(LABELS_FILENAME, 'r') as f:
        labels = [l.strip() for l in f.readlines()]

    od_model = TFObjectDetection(graph_def, labels)

    cv2.namedWindow("test", cv2.WINDOW_AUTOSIZE)
    cap = cv2.VideoCapture(webcam_id)
    if not cap.isOpened():
        sys.exit("Failed to open camera")

    while True:

        if cv2.getWindowProperty("test", 0) < 0:
            print("Display window closed by user, Exiting...")
            break

        ret_val, img = cap.read()

        if not ret_val:
            print("VideoCapture.read() failed, Exiting...")
            break

        cv2.imshow("test", img)
        k = cv2.waitKey(1) & 0xFF
        if k == ord('q'):
            break
        elif k == ord('s'):
            image = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
            predictions = od_model.predict_image(image)

            if predictions != []:
                biggest_probability = 0
                for i in range(len(predictions)):
                    if predictions[i]['probability'] > predictions[
                            biggest_probability]['probability']:
                        biggest_probability = i

                ret = predictions[biggest_probability]
                print(ret)
                h, w, _ = img.shape
                prob = ret['probability']
                tagName = ret['tagName']
                bbox = ret['boundingBox']
                left = bbox['left']
                top = bbox['top']
                width = bbox['width']
                height = bbox['height']
                x1 = int(left * w)
                y1 = int(top * h)
                x2 = x1 + int(width * w)
                y2 = y1 + int(height * h)
                p0 = (max(x1, 15), max(y1, 15))

                info = "{:.2f}:-{}".format(prob, tagName)
                cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
                cv2.putText(img, info, p0, cv2.FONT_ITALIC, 0.6, (0, 255, 0),
                            2)
                result = game(tagName)
                cv2.putText(img, result, (150, 400), cv2.FONT_HERSHEY_PLAIN, 3,
                            (0, 0, 0), 2)
                cv2.imshow("test", img)

                cv2.waitKey(0)

    cap.release()
    cv2.destroyAllWindows()