Пример #1
0
 def __init__(self):
     super().__init__("FPS", "Misc")
     self.display = ToggleLink()
     self.counter = 0
     self.last_time = time.time()
     self.update_time = 1
     self.fps = "?"
Пример #2
0
    def __init__(self):
        super().__init__("Screen", "High Level", z_index=-100)
        self.share_screen = ToggleLink()
        self.path = Path('plugin_data/ScreenPlugin')
        os.makedirs(self.path, exist_ok=True)

        self.sct = None
        self.monitor_region = None
Пример #3
0
    def __init__(self):
        super().__init__("Record", "High Level", z_index=1000)
        self.record = ToggleLink()
        self.path = Path('plugin_data/RecordPlugin')
        os.makedirs(self.path, exist_ok=True)

        self.icon_size = (64, 64)
        self.record_icon = self.load_icon()
        self.record_path = None
        self.image_size = None  # (height, width)
        self.writer = None
Пример #4
0
 def __init__(self):
     super().__init__("Segmentation", "Misc", z_index=-1)
     self.network = torchvision.models.segmentation.deeplabv3_resnet50(
         pretrained=True).eval()
     self.display = ToggleLink()
     self.use_cuda = ToggleLink()
     self.preprocess = transforms.Compose([
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225]),
     ])
     self.scale_factor = 1 / 4
     self.path = 'plugin_data/SegmentationPlugin'
     os.makedirs(self.path, exist_ok=True)
     self.background_path = None
     self.background = np.random.randint(0, 255, (1920, 1080, 3), np.uint8)
     self.device_mapping = {True: "cuda", False: "cpu"}
Пример #5
0
class ScreenPlugin(Plugin):
    def __init__(self):
        super().__init__("Screen", "High Level", z_index=-100)
        self.share_screen = ToggleLink()
        self.path = Path('plugin_data/ScreenPlugin')
        os.makedirs(self.path, exist_ok=True)

        self.sct = None
        self.monitor_region = None

    def start_screen_sharing(self):
        self.sct = mss()
        self.monitor_region = self.sct.monitors[1]  # Share 1st screen

    def stop_screen_sharing(self):
        if self.sct is not None:
            self.sct.close()

    def toggle_screen_sharing(self, window):
        self.share_screen.flip()
        if self.share_screen.get():
            self.start_screen_sharing()
        else:
            self.stop_screen_sharing()

    def get_actions(self):
        return [PluginAction("Share Screen", self.toggle_screen_sharing, self.share_screen),]

    def save(self):
        if self.share_screen.get():
            self.stop_screen_sharing()
        return {'share_screen': self.share_screen.get()}

    def load(self, plugin_state):
        self.share_screen.set(plugin_state.get('share_screen', False))
        if self.share_screen.get():
            self.start_screen_sharing()

    def process(self, frame):
        image_size = frame.shape[:2]
        if self.share_screen.get() and self.sct is not None:
            screen_img = np.array(self.sct.grab(self.monitor_region))[:, :, :3]
            screen_img = cv2.resize(screen_img, image_size[::-1], interpolation=cv2.INTER_CUBIC)
            screen_img = cv2.cvtColor(screen_img, cv2.COLOR_BGR2RGB)
            return screen_img
        return frame
Пример #6
0
class FPSPlugin(Plugin):
    def __init__(self):
        super().__init__("FPS", "Misc")
        self.display = ToggleLink()
        self.counter = 0
        self.last_time = time.time()
        self.update_time = 1
        self.fps = "?"

    def get_actions(self):
        return [PluginAction("Display FPS", self.toggle_display, self.display)]

    def toggle_display(self, window):
        self.display.flip()

    def process(self, frame):
        if self.display.get():
            frame = self.write_fps(frame)
        self.counter += 1
        current = time.time()
        if current - self.last_time > self.update_time:
            self.fps = "{0:.2f} fps".format(self.counter /
                                            (current - self.last_time))
            self.last_time = current
            self.counter = 0

        return frame

    def write_fps(self, frame):
        font = cv2.FONT_HERSHEY_SIMPLEX
        xy = (10, 30)
        font_scale = 0.6
        font_color = (255, 255, 255)
        line_type = 2
        frame = cv2.flip(frame, 1)
        frame = cv2.putText(frame,
                            self.fps,
                            xy,
                            font,
                            font_scale,
                            font_color,
                            lineType=line_type)
        frame = cv2.flip(frame, 1)
        return frame

    def save(self):
        return {"display": self.display.get()}

    def load(self, plugin_state):
        self.display.set(plugin_state.get("display", False))
Пример #7
0
class RecordPlugin(Plugin):
    def __init__(self):
        super().__init__("Record", "High Level", z_index=1000)
        self.record = ToggleLink()
        self.path = Path('plugin_data/RecordPlugin')
        os.makedirs(self.path, exist_ok=True)

        self.icon_size = (64, 64)
        self.record_icon = self.load_icon()
        self.record_path = None
        self.image_size = None  # (height, width)
        self.writer = None

    def load_icon(self):
        img = Image.open('plugins/record_plugin/record_icon.png')
        img = img.resize(self.icon_size, Image.BILINEAR)
        return np.array(img)

    def start_recording(self):
        now = datetime.now()
        self.record_path = self.path / now.strftime("%Y-%m-%d_%H-%M-%S.mp4")
        self.writer = cv2.VideoWriter(str(self.record_path),
                                      cv2.VideoWriter_fourcc(*'MP4V'), 30,
                                      self.image_size[::-1])

    def stop_recording(self):
        self.writer.release()

    def toggle_record(self, window):
        if self.image_size is None:  # Must learn image size first
            return
        self.record.flip()
        if self.record.get():
            self.start_recording()
        else:
            self.stop_recording()

    def get_actions(self):
        return [
            PluginAction("Record", self.toggle_record, self.record),
        ]

    def save(self):
        if self.record.get():
            self.stop_recording()
        return {}

    def load(self, plugin_state):
        self.record_path = None
        self.record.set(False)  # Always start with record False

    def process(self, frame):
        self.image_size = frame.shape[:2]
        if self.record.get():
            frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            frame_bgr = np.flip(frame_bgr, 1)
            self.writer.write(frame_bgr)

            # Display record icon
            corner = frame[:self.icon_size[0], :self.icon_size[1], :]
            mask = self.record_icon[:, :, 3:] / 255.0
            corner = corner * (1 - mask) + self.record_icon[:, :, :3] * mask
            corner = corner.round().astype(np.uint8)
            frame[:self.icon_size[0], :self.icon_size[1], :] = corner
        return frame
Пример #8
0
class SegmentationPlugin(Plugin):
    def __init__(self):
        super().__init__("Segmentation", "Misc", z_index=-1)
        self.network = torchvision.models.segmentation.deeplabv3_resnet50(
            pretrained=True).eval()
        self.display = ToggleLink()
        self.use_cuda = ToggleLink()
        self.preprocess = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])
        self.scale_factor = 1 / 4
        self.path = 'plugin_data/SegmentationPlugin'
        os.makedirs(self.path, exist_ok=True)
        self.background_path = None
        self.background = np.random.randint(0, 255, (1920, 1080, 3), np.uint8)
        self.device_mapping = {True: "cuda", False: "cpu"}

    def toggle_display(self, window):
        self.display.flip()

    def change_device(self, window):
        if not self.use_cuda and not torch.cuda.is_available():
            mbox = QMessageBox(window)
            mbox.setText("No CUDA compatible device available")
            mbox.setWindowTitle(self.plugin_name)
            mbox.setIcon(QMessageBox.Warning)
            mbox.show()
        else:
            self.use_cuda.flip()

    def get_actions(self):
        return [
            PluginAction("Active", self.toggle_display, self.display),
            PluginAction("Use GPU acceleration", self.change_device,
                         self.use_cuda),
            PluginAction("Select background", self.select_background, False)
        ]

    def save(self):
        return {
            "background_path": self.background_path,
            "display": self.display.get(),
            "use_cuda": self.use_cuda.get()
        }

    def load(self, plugin_state):
        self.background_path = plugin_state.get("background_path", None)
        self.display.set(plugin_state.get("display", False))
        self.use_cuda.set(
            plugin_state.get("use_cuda", False) and torch.cuda.is_available())

        self.load_background(self.background_path)

    def load_background(self, file_path):
        if file_path is not None:
            img = cv2.imread(file_path)
            if img is not None:
                self.background = img
                return True
            else:
                video = cv2.VideoCapture(file_path)
                if video.isOpened():
                    self.background = video
                    return True
        self.background = np.random.randint(0, 255, (1920, 1080, 3), np.uint8)
        return False

    def get_background_frame(self):
        if isinstance(self.background, cv2.VideoCapture):
            ret, frame = self.background.read()
            if ret:
                return frame
            else:
                self.load_background(self.background_path)
                return self.get_background_frame()
        else:
            return self.background

    def select_background(self, window):
        file_name, _ = QtWidgets.QFileDialog.getOpenFileName(
            window, "Select Image or Video", "")
        if self.load_background(file_name):
            self.background_path = os.path.join(
                self.path,
                str(time.time()) + os.path.splitext(file_name)[1])
            shutil.copy(file_name, self.background_path)
        self.display.set(True)

    def get_mask(self, input_image):
        input_tensor = self.preprocess(input_image)
        device = self.device_mapping[self.use_cuda.get()]
        self.network.to(device)
        input_batch = input_tensor.unsqueeze(0).to(device)
        with torch.no_grad():
            output = self.network(input_batch)['out'][0]
        output_predictions = output.argmax(0).byte().cpu().numpy()
        mask = output_predictions == 15
        return mask

    def process(self, frame):
        if self.display.get():
            input_image = cv2.resize(frame,
                                     None,
                                     fx=self.scale_factor,
                                     fy=self.scale_factor)
            input_image = Image.fromarray(input_image)
            mask = self.get_mask(input_image)
            mask = cv2.resize((mask * 255).astype(np.uint8),
                              frame.shape[:2][::-1],
                              interpolation=cv2.INTER_CUBIC)
            kernel = np.ones((7, 7), np.uint8)
            mask = cv2.erode(mask, kernel, iterations=1)
            kernel = kernel / kernel.sum()
            mask = cv2.filter2D(mask, -1, kernel)
            mask = Image.fromarray(mask)
            background = self.get_background_frame()[:, ::-1, ::-1]
            if background.shape != frame.shape:
                background = np.array(
                    crop_center(Image.fromarray(background),
                                *frame.shape[:2][::-1]))
            frame = np.array(
                Image.composite(Image.fromarray(frame),
                                Image.fromarray(background), mask))
        return frame