Beispiel #1
0
class ScrapyGUI(QMainWindow):
    def __init__(self):
        global ss, vgs, us, ts
        super().__init__()
        self.initUI()

        ss.ps.connect(self.search_user)
        vgs.ps.connect(self.start_user)
        us.ps.connect(self.run_user)

        ts.ps.connect(self.topic_finished)

    def initUI(self):
        self.statusBar().showMessage('准备就绪')
        self.setGeometry(100, 100, 800, 600)
        self.setWindowTitle('关注微信公众号:月小水长')
        self.setWindowIcon(QIcon('logo.jpg'))

        fpAct = QAction(QIcon('fp.png'), '找人(&FP)', self)
        fpAct.setShortcut('Ctrl+P')
        fpAct.setStatusTip('请输入微博用户昵称')
        fpAct.triggered.connect(self.findUser)

        fbAct = QAction(QIcon('fb.png'), '搜微博(&FB)', self)
        fbAct.setShortcut('Ctrl+B')
        fbAct.setStatusTip('请输入微博内容关键词')
        fbAct.triggered.connect(self.findTopic)

        menubar = self.menuBar()
        weiboMenu = menubar.addMenu('微博(&B)')
        weiboMenu.addAction(fpAct)
        weiboMenu.addSeparator()
        weiboMenu.addAction(fbAct)

        settingsMenu = menubar.addMenu('设置(&S)')

        aaAct = QAction(QIcon('aa.png'), '关于作者(&AA)', self)
        aaAct.setShortcut('Ctrl+A')
        aaAct.setStatusTip('产品作者的详细信息')
        aaAct.triggered.connect(self.aboutAuthor)

        aboutMenu = menubar.addMenu('关于(&A)')
        aboutMenu.addAction(aaAct)

        aboutMenu.addSeparator()

        oaAct = QAction(QIcon('oa.png'), '打开官网(&OA)', self)
        oaAct.setShortcut('Ctrl+O')
        oaAct.setStatusTip('打开产品官网')
        oaAct.triggered.connect(self.openAuthority)
        aboutMenu.addAction(oaAct)

        self.pListView = ListView(self)
        self.pListView.setViewMode(QListView.ListMode)
        self.pListView.setStyleSheet("QListView{icon-size:70px}")

        self.pListView.setGeometry(0, 20, 800, 560)

        self.show()

    def findUser(self):
        global filter
        group = QInputDialog.getText(self, "输入用户昵称", "")
        self.searchedUser = group[0]
        if (len(group[0]) > 0):
            self.pListView.clearData()
            WeiboSearchScrapy(keyword=group[0])

    def findTopic(self):
        dialog = MyDialog(self, info='主题')
        dialog.show()
        if (dialog.exec_() == QDialog.Accepted):
            print(dialog.getData())
            group = dialog.getData()
            topic = group[0]
            filter = 1 if group[1] == True else 0
            WeiboTopicScrapy(keyword=topic, filter=filter)
            QMessageBox.about(self, "提示",
                              "已成功将抓取【{}】主题的任务提交后台,结束会通知您".format(topic))

    def aboutAuthor(self):
        QMessageBox.about(self, "作者介绍",
                          "简介:985计算机本科在读\nQQ:2391527690\n微信公众号:月小水长")

    def openAuthority(self):
        webbrowser.open("https://inspurer.github.io/")

    def search_user(self, msg):
        print(msg)
        if msg == 'EOF':
            QMessageBox.about(self, '提示',
                              '【{}】相关的用户信息加载完毕'.format(self.searchedUser))
            return
        elif msg == 'NetError':
            QMessageBox.warning(self, '警告', '请先检查电脑联网情况')
            return
        data = json.loads(msg)
        self.pListView.addItem(data)
        self.show()

    # 监听窗口大小变化事件
    def resizeEvent(self, *args, **kwargs):
        w, h = self.width(), self.height()
        self.pListView.setGeometry(0, 20, w, h - 40)

    def start_user(self, msg):
        print('wwww', msg)
        res = QMessageBox.question(self, "提示", "只抓取原创微博吗?")
        if res == QMessageBox.Yes:
            WeiboUserScrapy(user_id=int(msg), filter=1)
        else:
            WeiboUserScrapy(user_id=int(msg), filter=0)

    def run_user(self, flag, value):
        print(flag, value)
        if flag == 'start':
            self.totalPageNum = value
            self.progress = QProgressDialog(self)
            self.progress.setWindowTitle("请稍等")
            self.progress.setLabelText("正在准备抓取...")
            '''
            如果任务的预期持续时间小于minimumDuration,则对话框根本不会出现。这样可以防止弹出对话框,快速完成任务。对于预期超过minimumDuration的任务,对话框将在minimumDuration时间之后或任何进度设置后立即弹出。
            如果设置为0,则只要设置任何进度,将始终显示对话框。 默认值为4000毫秒,即4秒。
            '''
            self.progress.setMinimumDuration(1)
            self.progress.setWindowModality(Qt.WindowModal)
            # 去掉取消按钮
            self.progress.setCancelButtonText(None)
            self.progress.setRange(0, self.totalPageNum)
            self.progress.setValue(1)
        if flag == 'run':
            self.progress.setValue(value)
            self.progress.setLabelText("正在抓取第{}/{}页".format(
                value, self.totalPageNum))

            if value == self.totalPageNum:
                self.progress.destroy()
                QMessageBox.about(self, "提示", "抓取结束")

    def topic_finished(self, msg):
        QMessageBox.about(self, "提示", msg)
        ...
Beispiel #2
0
class FilterWidget(QGroupBox):
    closed = pyqtSignal([str])

    def __init__(self, cv_video_cap):
        super(FilterWidget, self).__init__()
        self.cv_video_cap = cv_video_cap  # type: CVVideoCapture
        self.ui = Ui_FilterWidget()
        self.ui.setupUi(self)
        self.setWindowTitle(os.path.basename(self.cv_video_cap.file_handle))
        self.setTitle('Filter -  %s [%d]frames@[%d]fps' %
                      (self.cv_video_cap.file_handle,
                       self.cv_video_cap.get_frame_count(),
                       self.cv_video_cap.get_frame_rate()))
        self.ui.spinBoxFilterSharpness_windowSize.setValue(
            self.cv_video_cap.get_frame_rate())
        self.opticalflow_feature_params = dict(maxCorners=500,
                                               qualityLevel=0.3,
                                               minDistance=7,
                                               blockSize=7)
        self.opticalflow_lk_params = dict(
            winSize=(15, 15),
            maxLevel=2,
            criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,
                      0.03))
        self.frame_acceptance_np = None
        self.sharpness_filter = None
        self.correlation_filter = None
        self.opticalflow_filter = None
        self.sharpness_filter_status = ""
        self.correlation_filter_status = ""
        self.opticalflow_filter_status = ""
        self.filter_worker = None
        self.filter_worker_thread = None
        self.export_worker = None
        self.export_worker_thread = None

        self.worker_progressdialog = None  # type: QProgressDialog
        self.ui.pushButtonFilterGlobal_run.clicked.connect(
            self.pushButtonFilterGlobal_run_clicked)
        self.ui.pushButtonFilterParams_save.clicked.connect(
            self.pushButtonFilterParams_save_clicked)
        self.ui.pushButtonFilterParams_load.clicked.connect(
            self.pushButtonFilterParams_load_clicked)
        self.ui.pushButtonFilterGlobal_preview.clicked.connect(
            self.pushButtonFilterGlobal_preview_clicked)
        self.ui.pushButtonFilterGlobal_export.clicked.connect(
            self.pushButtonFilterGlobal_export_clicked)

        self.playback_widget = None
        self.playback_control_widget = None
        self.filtered_video_capture = None

        self.update_filter_status()

    def closeEvent(self, e):
        self.closed.emit(self.cv_video_cap.file_handle)
        super(FilterWidget, self).closeEvent(e)

    @property
    def params_batch_count(self):
        val = min(self.ui.spinBoxFilterGlobal_batchSize.value(),
                  self.cv_video_cap.get_frame_count())
        val = int(val)
        self.ui.spinBoxFilterGlobal_batchSize.setValue(val)
        return val

    def load_params_batch_count(self, val):
        val = min(val, self.cv_video_cap.get_frame_count())
        val = int(val)
        self.ui.spinBoxFilterGlobal_batchSize.setValue(val)

    @property
    def params_sharpness(self):
        params = {
            'enabled': self.ui.groupBoxFilterSharpness.isChecked(),
            'z_score': self.ui.doubleSpinBoxFilterSharpness_zscore.value(),
            'window_size': self.ui.spinBoxFilterSharpness_windowSize.value(),
        }
        return params

    def load_params_sharpness(self, params):
        self.ui.groupBoxFilterSharpness.setChecked(params['enabled'])
        self.ui.doubleSpinBoxFilterSharpness_zscore.setValue(params['z_score'])
        self.ui.spinBoxFilterSharpness_windowSize.setValue(
            params['window_size'])

    @property
    def params_correlation(self):
        params = {
            'enabled': self.ui.groupBoxFilterCorrelation.isChecked(),
            'threshold':
            self.ui.doubleSpinBoxFilterCorrelation_threshold.value()
        }
        return params

    def load_params_correlation(self, params):
        self.ui.groupBoxFilterCorrelation.setChecked(params['enabled'])
        self.ui.doubleSpinBoxFilterCorrelation_threshold.setValue(
            params['threshold'])

    @property
    def params_opticalflow(self):
        params = {
            'enabled': self.ui.groupBoxFilterOpticalFlow.isChecked(),
            'threshold':
            self.ui.doubleSpinBoxFilterOpticalFlow_threshold.value(),
            'opticalflow_params': {
                'feature_params': self.opticalflow_feature_params,
                'lk_params': self.opticalflow_lk_params
            }
        }
        return params

    def load_params_opticalflow(self, params):
        self.ui.groupBoxFilterOpticalFlow.setChecked(params['enabled'])
        self.ui.doubleSpinBoxFilterOpticalFlow_threshold.setValue(
            params['threshold'])
        self.opticalflow_feature_params = params['opticalflow_params'][
            'feature_params']
        self.opticalflow_lk_params = params['opticalflow_params']['lk_params']

    def save_params(self, filepath):
        with open(filepath, 'wb') as f:
            pickle.dump(
                {
                    'batch_count': self.params_batch_count,
                    'sharpness': self.params_sharpness,
                    'correlation': self.params_correlation,
                    'opticalflow': self.params_opticalflow
                }, f)
        pass

    def load_params(self, filepath):
        with open(filepath, 'rb') as f:
            p = pickle.load(f)
            if p is not None:
                self.load_params_batch_count(p['batch_count'])
                self.load_params_sharpness(p['sharpness'])
                self.load_params_correlation(p['correlation'])
                self.load_params_opticalflow(p['opticalflow'])

    def prepare_filters(self):
        self.sharpness_filter_status = ""
        self.correlation_filter_status = ""
        self.opticalflow_filter_status = ""
        if self.params_sharpness['enabled']:
            sharpness = CVSharpness()
            self.sharpness_filter = {
                'filter':
                sharpness,
                'params':
                self.params_sharpness,
                'params_loaded':
                sharpness.load_params_file(self.cv_video_cap),
                'calculation':
                sharpness.load_calculation_file(self.cv_video_cap),
                'acceptance':
                None,
                'acceptance_loaded':
                sharpness.load_acceptance_file(self.cv_video_cap),
            }
        else:
            self.sharpness_filter = None
        if self.params_correlation['enabled']:
            correlation = CVCorrelation()
            self.correlation_filter = {
                'filter':
                correlation,
                'params':
                self.params_correlation,
                'params_loaded':
                correlation.load_params_file(self.cv_video_cap),
                'acceptance':
                None,
                'acceptance_loaded':
                correlation.load_acceptance_file(self.cv_video_cap),
            }
        else:
            self.correlation_filter = None
        if self.params_opticalflow['enabled']:
            optical_flow_params = self.params_opticalflow['opticalflow_params']
            opticalflow = CVOpticalFlow(optical_flow_params['feature_params'],
                                        optical_flow_params['lk_params'])
            self.opticalflow_filter = {
                'filter':
                opticalflow,
                'params':
                self.params_opticalflow,
                'params_loaded':
                opticalflow.load_params_file(self.cv_video_cap),
                'acceptance':
                None,
                'acceptance_loaded':
                opticalflow.load_acceptance_file(self.cv_video_cap),
            }
        else:
            self.opticalflow_filter = None

    def create_progressbar_dialog(self, title):
        self.worker_progressdialog = QProgressDialog(title, None, 0, 1000,
                                                     self)
        self.worker_progressdialog.setWindowTitle('Progress')
        self.worker_progressdialog.setMinimumWidth(500)
        self.worker_progressdialog.setWindowModality(QtCore.Qt.WindowModal)
        self.worker_progressdialog.setAutoClose(False)
        self.worker_progressdialog.setAutoReset(False)
        self.worker_progressdialog.setValue(1000)
        self.worker_progressdialog.show()

    def update_progressbar_dialog_title(self, title):
        self.worker_progressdialog.setLabelText(title)
        self.update_filter_status()

    def update_progressbar_dialog_value(self, value):
        self.worker_progressdialog.setValue(min(round(value * 1000), 1000))

    def destroy_progressbar_dialog(self):
        self.worker_progressdialog.close()
        self.worker_progressdialog.destroy()

    def update_filter_status(self):
        self.ui.labelFilterSharpness_status.setText(
            "N/A" if self.sharpness_filter_status ==
            "" else self.sharpness_filter_status)
        self.ui.labelFilterCorrelation_status.setText(
            "N/A" if self.correlation_filter_status ==
            "" else self.correlation_filter_status)
        self.ui.labelFilterOpticalFlow_status.setText(
            "N/A" if self.opticalflow_filter_status ==
            "" else self.opticalflow_filter_status)

    def pushButtonFilterParams_load_clicked(self):
        filename = QFileDialog.getOpenFileName(
            self,
            'Open saved filter params',
            os.path.dirname(os.path.abspath(self.cv_video_cap.file_handle)),
            filter='*.pickled_params')[0]
        if os.path.exists(filename):
            self.load_params(filename)

    def pushButtonFilterParams_save_clicked(self):
        filename = QFileDialog.getSaveFileName(
            self,
            'Save filter params',
            os.path.dirname(os.path.abspath(self.cv_video_cap.file_handle)),
            filter='*.pickled_params')[0]
        if not filename.endswith(".pickled_params"):
            filename += ".pickled_params"
        self.save_params(filename)

    def pushButtonFilterGlobal_preview_clicked(self):
        if self.playback_widget:
            self.playback_widget.close()
            self.playback_widget = None
        if self.playback_control_widget:
            self.playback_control_widget.close()
            self.playback_control_widget = None

        if self.frame_acceptance_np is None:
            msgbox = QMessageBox(self)
            msgbox.setWindowTitle('Error')
            msgbox.setIcon(QMessageBox.Warning)
            msgbox.setText('Nothing to preview.\nPlease run filters first!')
            msgbox.show()
            return

        self.filtered_video_capture = CVArrayFilteredVideoCapture(
            self.cv_video_cap, 1, self.frame_acceptance_np)
        self.playback_widget = VideoPlaybackWidget()
        self.playback_control_widget = VideoPlaybackControlWidget(
            self.filtered_video_capture)
        self.playback_widget.show()
        self.playback_control_widget.incomingFrame.connect(
            self.playback_widget.on_incomingFrame)
        self.playback_control_widget.show()
        self.playback_widget.setWindowTitle('[Preview] ' + self.windowTitle())
        self.playback_control_widget.setWindowTitle('[Control] ' +
                                                    self.windowTitle())
        self.closed.connect(self.playback_widget.close)
        self.closed.connect(self.playback_control_widget.close)

    def pushButtonFilterGlobal_export_clicked(self):
        if self.playback_widget:
            self.playback_widget.close()
            self.playback_widget = None
        if self.playback_control_widget:
            self.playback_control_widget.close()
            self.playback_control_widget = None

        if self.frame_acceptance_np is None:
            msgbox = QMessageBox(self)
            msgbox.setWindowTitle('Error')
            msgbox.setIcon(QMessageBox.Warning)
            msgbox.setText('Nothing to export.\nPlease run filters first!')
            msgbox.show()
            return

        filename = QFileDialog.getSaveFileName(
            self,
            'Export video',
            os.path.dirname(os.path.abspath(self.cv_video_cap.file_handle)),
            filter='*.avi')[0]  # type: str
        if not filename:
            return
        if not filename.lower().endswith(".avi"):
            filename += ".avi"
        if os.path.exists(filename):
            msg = "Are you really sure you want to overwrite the file?\n%s" % filename
            reply = QMessageBox.question(self, 'One sec...', msg,
                                         QMessageBox.Yes, QMessageBox.No)
            if reply != QMessageBox.Yes:
                return

        self.create_progressbar_dialog('Loading...')
        print('Write to file [%s]' % filename)
        codec = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(filename, codec, 2,
                              (int(self.cv_video_cap.get_frame_width()),
                               int(self.cv_video_cap.get_frame_height())))

        def worker_function(progress_changed, state_changed):
            state_changed.emit('Preparing to export...')
            key_frames = np.where(self.frame_acceptance_np)[0]
            num_key_frames = len(key_frames)
            progress = 0
            state_changed.emit('Writing frames...')
            for i in key_frames:
                self.cv_video_cap.set_position_frame(i)
                frame = self.cv_video_cap.read()
                if frame:
                    out.write(frame.cv_mat)
                progress += 1 / num_key_frames
                progress_changed.emit(progress)
            state_changed.emit('Writing finished, releasing file...')
            out.release()
            state_changed.emit('Done! Exported to %s.' % filename)
            print('Export Done!')
            sleep(1)

        self.export_worker_thread = QThread(self)
        self.export_worker_thread.start()
        self.export_worker = ProgressWorker(worker_function)
        self.export_worker.moveToThread(self.export_worker_thread)
        self.export_worker.progress_changed.connect(
            self.update_progressbar_dialog_value)
        self.export_worker.state_changed.connect(
            self.update_progressbar_dialog_title)
        self.export_worker.finished.connect(self.destroy_progressbar_dialog)
        self.export_worker.finished.connect(self.update_filter_status)
        self.export_worker.start.emit()

    def pushButtonFilterGlobal_run_clicked(self):
        self.create_progressbar_dialog('Loading...')

        self.prepare_filters()

        def worker_function(progress_changed, state_changed):
            sharpness_filter_updated = False
            self.frame_acceptance_np = np.ones(
                [int(self.cv_video_cap.get_frame_count())], dtype=np.bool_)

            if self.sharpness_filter:
                progress_changed.emit(0)
                state_changed.emit('Running sharpness filter...')

                def callback(obj):
                    progress_changed.emit(obj.progress)

                sharpness_filter = self.sharpness_filter[
                    'filter']  # type: CVSharpness
                sharpness_value = self.sharpness_filter['calculation']
                if sharpness_value is None:
                    # calculate and save
                    progress_changed.emit(0)
                    state_changed.emit(
                        'Analyzing video for image sharpness...')
                    print('sharpness recalculating')
                    sharpness_value = sharpness_filter.calculate_sharpness_video_capture(
                        cv_video_capture=self.cv_video_cap,
                        progress_tracker=CVProgressTracker(callback),
                        batch_size=self.params_batch_count)
                    sharpness_filter.save_calculation_file(
                        sharpness_value, self.cv_video_cap)
                progress_changed.emit(1)
                state_changed.emit('Running sharpness acceptance test...')
                if (self.sharpness_filter['params'] != self.sharpness_filter['params_loaded']) or \
                        (self.sharpness_filter['acceptance_loaded'] is None):
                    # different params
                    sharpness_acceptance = \
                        sharpness_filter.test_sharpness_acceptance(
                            sharpness_calculated=sharpness_value,
                            frame_window_size=self.sharpness_filter['params']['window_size'],
                            z_score=self.params_sharpness['z_score']
                        )
                    self.sharpness_filter['acceptance'] = sharpness_acceptance
                    self.sharpness_filter[
                        'acceptance_loaded'] = sharpness_acceptance
                    sharpness_filter.save_params_file(
                        self.sharpness_filter['params'], self.cv_video_cap)
                    sharpness_filter.save_acceptance_file(
                        sharpness_acceptance, self.cv_video_cap)
                    sharpness_filter_updated = True

                original_count = np.sum(self.frame_acceptance_np)
                current_count = np.sum(
                    self.sharpness_filter['acceptance_loaded'])
                self.sharpness_filter_status = (
                    "[%d] => [%d] frames (%.2f%% dropped)" %
                    (original_count, current_count,
                     (original_count - current_count) / original_count * 100))
                progress_changed.emit(1)
                state_changed.emit('Sharpness filter done...')
                self.frame_acceptance_np = self.sharpness_filter[
                    'acceptance_loaded']
            else:
                sharpness_filter_updated = True

            correlation_filter_updated = False
            if sharpness_filter_updated:
                # requires recalculation
                correlation_filter_updated = True

            if self.correlation_filter:
                progress_changed.emit(0)
                state_changed.emit('Running correlation filter...')

                def callback(obj):
                    progress_changed.emit(obj.progress)

                # correlation filter enabled
                correlation_filter = self.correlation_filter[
                    'filter']  # type: CVCorrelation

                if correlation_filter_updated:
                    self.correlation_filter['acceptance_loaded'] = None
                if (self.correlation_filter['params'] != self.correlation_filter['params_loaded']) or \
                        (self.correlation_filter['acceptance_loaded'] is None):

                    progress_changed.emit(0)
                    state_changed.emit(
                        'Removing still frames using cross correlation...')
                    print('correlation recalculating')
                    # different params
                    correlation_acceptance = \
                        correlation_filter.test_correlation_video_capture(
                            cv_video_capture=self.cv_video_cap,
                            correlation_limit=self.correlation_filter['params']['threshold'],
                            frame_acceptance_np=self.frame_acceptance_np,
                            progress_tracker=CVProgressTracker(callback),
                            batch_size=self.params_batch_count,
                        )
                    self.correlation_filter[
                        'acceptance'] = correlation_acceptance
                    self.correlation_filter[
                        'acceptance_loaded'] = correlation_acceptance
                    correlation_filter.save_params_file(
                        self.correlation_filter['params'], self.cv_video_cap)
                    correlation_filter.save_acceptance_file(
                        correlation_acceptance, self.cv_video_cap)
                    correlation_filter_updated = True

                original_count = np.sum(self.frame_acceptance_np)
                current_count = np.sum(
                    self.correlation_filter['acceptance_loaded'])
                self.correlation_filter_status = (
                    "[%d] => [%d] frames (%.2f%% dropped)" %
                    (original_count, current_count,
                     (original_count - current_count) / original_count * 100))
                progress_changed.emit(1)
                state_changed.emit('Correlation filter done...')
                self.frame_acceptance_np = self.correlation_filter[
                    'acceptance_loaded']
            else:
                correlation_filter_updated = True

            opticalflow_filter_updated = False
            if correlation_filter_updated:
                # requires recalculation
                opticalflow_filter_updated = True

            if self.opticalflow_filter:
                # optical_flow enabled
                progress_changed.emit(1)
                state_changed.emit('Running optical flow filter...')

                def callback(obj):
                    progress_changed.emit(obj.progress)

                opticalflow_filter = self.opticalflow_filter[
                    'filter']  # type: CVOpticalFlow
                if opticalflow_filter_updated:
                    self.opticalflow_filter['acceptance_loaded'] = None
                if (json.dumps(self.opticalflow_filter['params'], sort_keys=True) !=
                        json.dumps(self.opticalflow_filter['params_loaded'], sort_keys=True)) or \
                        (self.opticalflow_filter['acceptance_loaded'] is None):
                    # different params
                    progress_changed.emit(0)
                    state_changed.emit(
                        'Calculating distance between frames using optical flow...'
                    )
                    print('opticalflow recalculating')
                    opticalflow_acceptance = \
                        opticalflow_filter.test_optical_flow_video_capture(
                            cv_video_capture=self.cv_video_cap,
                            distance_limit=self.opticalflow_filter['params']['threshold'],
                            frame_acceptance_np=self.frame_acceptance_np,
                            progress_tracker=CVProgressTracker(callback),
                            batch_size=self.params_batch_count,
                        )
                    self.opticalflow_filter[
                        'acceptance'] = opticalflow_acceptance
                    self.opticalflow_filter[
                        'acceptance_loaded'] = opticalflow_acceptance
                    opticalflow_filter.save_params_file(
                        self.opticalflow_filter['params'], self.cv_video_cap)
                    opticalflow_filter.save_acceptance_file(
                        opticalflow_acceptance, self.cv_video_cap)

                original_count = np.sum(self.frame_acceptance_np)
                current_count = np.sum(
                    self.opticalflow_filter['acceptance_loaded'])
                self.opticalflow_filter_status = (
                    "[%d] => [%d] frames (%.2f%% dropped)" %
                    (original_count, current_count,
                     (original_count - current_count) / original_count * 100))
                progress_changed.emit(1)
                state_changed.emit('Optical flow filter done...')
                self.frame_acceptance_np = self.opticalflow_filter[
                    'acceptance_loaded']
            else:
                opticalflow_filter_updated = True

            progress_changed.emit(1)
            state_changed.emit('All filters done!')
            print('all filters done')
            sleep(1)

        self.filter_worker_thread = QThread(self)
        self.filter_worker_thread.start()
        self.filter_worker = ProgressWorker(worker_function)
        self.filter_worker.moveToThread(self.filter_worker_thread)
        self.filter_worker.progress_changed.connect(
            self.update_progressbar_dialog_value)
        self.filter_worker.state_changed.connect(
            self.update_progressbar_dialog_title)
        self.filter_worker.finished.connect(self.destroy_progressbar_dialog)
        self.filter_worker.finished.connect(self.update_filter_status)
        self.filter_worker.start.emit()
Beispiel #3
0
class ProgressDialog:
    '''Displays visual progress bar featuring:
    * current tick label
    * percentage
    * Abort button to cancel the run
    * Exceptions displayed

    Use it to run asynchronous tasks in a modal way
    '''
    def __init__(self, parent):
        '''Creates an instance. To actually show the dialog use context manager.

        Args:
            parent (Qt Widget): parent component for the dialog
        '''
        self._parent = parent
        self._level = 0
        self._ui = None
        self._throttler = None

    @contextlib.asynccontextmanager
    async def __call__(self, title='Please wait', ticks=100):
        '''Shows the dialog and creates a context for "ticking".

        Args:
            title (str): dialof window title, default "Please wait"
            ticks (int): total number of expected ticks (can be approximate). Establishes
                the "scale" of the progress bar.

        Withing this context one can use :meth:`tick()` to advance the progress and
        change tick label.
        '''
        if ticks <= 0:
            raise ValueError('units should be positive')

        if self._level > 0:
            # allow passing progress object on to the sub-routines
            yield self
            return

        self._ui = QProgressDialog(title, 'Abort', 0, ticks, self._parent)
        self._ui.setWindowModality(Qt.WindowModality.WindowModal)
        self._ui.setMinimumDuration(500)
        self._throttler = _ThrottlingProgressProxy(self._ui)

        with trio.CancelScope() as cancel_scope:
            try:
                self._level += 1
                with connect(self._ui.canceled, cancel_scope.cancel):
                    yield
            except (Exception, trio.Cancelled) as err:
                self._ui.hide()
                message = str(err)
                if sys.exc_info()[0] != None:
                    import traceback
                    stack = traceback.format_exc(10)
                    if stack:
                        message += '\n' + stack

                QMessageBox.critical(self._parent, 'Error', message)
            finally:
                self._level -= 1

                self._ui.setValue(self._ui.maximum())
                self._ui.hide()
                self._ui.destroy()
                self._ui = None

    def tick(self, advance=1, label=None):
        '''Communicates that some progress was done with the task.

        Args:
            advance (int): how many tick units were completed (default is 1)
            label (str): optionally set the label of the "current work" to display
                in the ProgressDialog window.
        '''
        if self._level == 0:
            raise RuntimeError(
                'Progress context is missing. Did you forger to do "async with progress()"?'
            )

        self._throttler.tick(advance, label)