示例#1
0
    def _get_sync(self, qurl: QUrl, timeout: int = 20000) -> Reply:
        '''
        synchronous GET-request
        '''
        request = QNetworkRequest(qurl)
        ## newer versions of QGIS (3.6+) support synchronous requests
        #if hasattr(self._manager, 'blockingGet'):
        #reply = self._manager.blockingGet(request, forceRefresh=True)
        ## use blocking event loop for older versions
        #else:
        loop = QEventLoop()
        timer = QTimer()
        timer.setSingleShot(True)
        # reply or timeout break event loop, whoever comes first
        timer.timeout.connect(loop.quit)
        reply = self._manager.get(request)
        reply.finished.connect(loop.quit)

        timer.start(timeout)

        # start blocking loop
        loop.exec()
        loop.deleteLater()
        if not timer.isActive():
            reply.deleteLater()
            raise ConnectionError('Timeout')

        timer.stop()
        #if reply.error():
        #self.error.emit(reply.errorString())
        #raise ConnectionError(reply.errorString())
        res = Reply(reply)
        self.finished.emit(res)
        return res
示例#2
0
    def _post_sync(self,
                   qurl: QUrl,
                   timeout: int = 20000,
                   data: bytes = b'',
                   content_type=None):
        '''
        synchronous POST-request
        '''
        request = QNetworkRequest(qurl)
        if content_type:
            request.setHeader(QNetworkRequest.ContentTypeHeader, content_type)
        # newer versions of QGIS (3.6+) support synchronous requests
        if hasattr(self._manager, 'blockingPost'):
            reply = self._manager.blockingPost(request,
                                               data,
                                               forceRefresh=True)
        # use blocking event loop for older versions
        else:
            loop = QEventLoop()
            timer = QTimer()
            timer.setSingleShot(True)
            # reply or timeout break event loop, whoever comes first
            timer.timeout.connect(loop.quit)
            reply = self._manager.post(request, data)
            reply.finished.connect(loop.quit)

            timer.start(timeout)

            # start blocking loop
            loop.exec()
            loop.deleteLater()
            if not timer.isActive():
                reply.deleteLater()
                raise ConnectionError('Timeout')

            timer.stop()
        if reply.error():
            self.error.emit(reply.errorString())
            raise ConnectionError(reply.errorString())
        res = Reply(reply)
        self.finished.emit(res)
        return res
示例#3
0
class ColorsDock(QgsDockWidget):
    """
    Custom dock widget for modfication of project colors
    """
    def __init__(self, iface):
        super().__init__()
        self.iface = iface

        stack = QgsPanelWidgetStack()

        self.main_panel = QgsPanelWidget()
        self.main_panel.setDockMode(True)
        layout = QVBoxLayout()
        layout.setContentsMargins(0, 0, 0, 0)
        layout.setMargin(0)  # pylint: disable=no-value-for-parameter
        self.main_panel.setLayout(layout)
        self.setWindowTitle(self.tr('Project Colors'))
        self.setObjectName('project_colors_dock')

        scheme = [
            s for s in QgsApplication.colorSchemeRegistry().schemes()
            if isinstance(s, QgsProjectColorScheme)
        ]
        self.color_list = QgsColorSchemeList(None, scheme[0])
        layout.addWidget(self.color_list)

        # defer updates for a short timeout -- prevents flooding with signals
        # when doing lots of color changes, improving app responsiveness
        self.timer = QTimer(self)
        self.timer.setSingleShot(True)
        self.timer.setInterval(100)

        self.block_updates = False

        def apply_colors():
            """
            Applies color changes to canvas and project
            """
            self.block_updates = True
            self.color_list.saveColorsToScheme()
            self.block_updates = False
            self.iface.mapCanvas().refreshAllLayers()

        self.timer.timeout.connect(apply_colors)

        def colors_changed():
            """
            Triggers a deferred update of the project colors
            """
            if self.timer.isActive():
                return
            self.timer.start()

        self.color_list.model().dataChanged.connect(colors_changed)
        stack.setMainPanel(self.main_panel)
        self.setWidget(stack)

        QgsProject.instance().projectColorsChanged.connect(
            self.repopulate_list)

    def repopulate_list(self):
        """
        Rebuild the colors list when project colors are changed
        """
        if self.block_updates:
            return
        scheme = [
            s for s in QgsApplication.colorSchemeRegistry().schemes()
            if isinstance(s, QgsProjectColorScheme)
        ][0]
        self.color_list.setScheme(scheme)
示例#4
0
class QtWaitingSpinner(QWidget):
    mColor = QColor(Qt.black)
    mRoundness = 120.0
    mMinimumTrailOpacity = 25.0
    mTrailFadePercentage = 60.0
    mRevolutionsPerSecond = 1.0
    mNumberOfLines = 20
    mLineLength = 20
    mLineWidth = 2
    mInnerRadius = 60
    mCurrentCounter = 0
    mIsSpinning = False

    def __init__(self,
                 parent=None,
                 centerOnParent=True,
                 disableParentWhenSpinning=True,
                 *args,
                 **kwargs):
        QWidget.__init__(self, parent=parent, *args, **kwargs)
        self.mCenterOnParent = centerOnParent
        self.mDisableParentWhenSpinning = disableParentWhenSpinning
        self.initialize()

    def initialize(self):
        self.timer = QTimer(self)
        self.timer.timeout.connect(self.rotate)
        self.updateSize()
        self.updateTimer()
        self.hide()

    @pyqtSlot()
    def rotate(self):
        self.mCurrentCounter += 1
        if self.mCurrentCounter > self.numberOfLines():
            self.mCurrentCounter = 0
        self.update()

    def updateSize(self):
        size = (self.mInnerRadius + self.mLineLength) * 2
        self.setFixedSize(size, size)

    def updateTimer(self):
        self.timer.setInterval(
            1000 / (self.mNumberOfLines * self.mRevolutionsPerSecond))
        seconds = 1000 / (self.mNumberOfLines * self.mRevolutionsPerSecond)

    def updatePosition(self):
        if self.parentWidget() and self.mCenterOnParent:
            self.move(self.parentWidget().width() / 2 - self.width() / 2,
                      self.parentWidget().height() / 2 - self.height() / 2)

    def lineCountDistanceFromPrimary(self, current, primary, totalNrOfLines):
        distance = primary - current
        if distance < 0:
            distance += totalNrOfLines
        return distance

    def currentLineColor(self, countDistance, totalNrOfLines, trailFadePerc,
                         minOpacity, color):
        if countDistance == 0:
            return color

        minAlphaF = minOpacity / 100.0

        distanceThreshold = ceil((totalNrOfLines - 1) * trailFadePerc / 100.0)
        if countDistance > distanceThreshold:
            color.setAlphaF(minAlphaF)

        else:
            alphaDiff = self.mColor.alphaF() - minAlphaF
            gradient = alphaDiff / distanceThreshold + 1.0
            resultAlpha = color.alphaF() - gradient * countDistance
            resultAlpha = min(1.0, max(0.0, resultAlpha))
            color.setAlphaF(resultAlpha)
        return color

    def paintEvent(self, event):
        self.updatePosition()
        painter = QPainter(self)
        painter.fillRect(self.rect(), Qt.transparent)
        painter.setRenderHint(QPainter.Antialiasing, True)
        if self.mCurrentCounter > self.mNumberOfLines:
            self.mCurrentCounter = 0
        painter.setPen(Qt.NoPen)

        for i in range(self.mNumberOfLines):
            painter.save()
            painter.translate(self.mInnerRadius + self.mLineLength,
                              self.mInnerRadius + self.mLineLength)
            rotateAngle = 360.0 * i / self.mNumberOfLines
            painter.rotate(rotateAngle)
            painter.translate(self.mInnerRadius, 0)
            distance = self.lineCountDistanceFromPrimary(
                i, self.mCurrentCounter, self.mNumberOfLines)
            color = self.currentLineColor(distance, self.mNumberOfLines,
                                          self.mTrailFadePercentage,
                                          self.mMinimumTrailOpacity,
                                          self.mColor)
            painter.setBrush(color)
            painter.drawRoundedRect(
                QRect(0, -self.mLineWidth // 2, self.mLineLength,
                      self.mLineLength), self.mRoundness, Qt.RelativeSize)
            painter.restore()

    def start(self):
        self.updatePosition()
        self.mIsSpinning = True
        self.show()

        if self.parentWidget() and self.mDisableParentWhenSpinning:
            self.parentWidget().setEnabled(False)

        if not self.timer.isActive():
            self.timer.start()
            self.mCurrentCounter = 0

    def stop(self):
        self.mIsSpinning = False
        self.hide()

        if self.parentWidget() and self.mDisableParentWhenSpinning:
            self.parentWidget().setEnabled(True)

        if self.timer.isActive():
            self.timer.stop()
            self.mCurrentCounter = 0

    def setNumberOfLines(self, lines):
        self.mNumberOfLines = lines
        self.updateTimer()

    def setLineLength(self, length):
        self.mLineLength = length
        self.updateSize()

    def setLineWidth(self, width):
        self.mLineWidth = width
        self.updateSize()

    def setInnerRadius(self, radius):
        self.mInnerRadius = radius
        self.updateSize()

    def color(self):
        return self.mColor

    def roundness(self):
        return self.mRoundness

    def minimumTrailOpacity(self):
        return self.mMinimumTrailOpacity

    def trailFadePercentage(self):
        return self.mTrailFadePercentage

    def revolutionsPersSecond(self):
        return self.mRevolutionsPerSecond

    def numberOfLines(self):
        return self.mNumberOfLines

    def lineLength(self):
        return self.mLineLength

    def lineWidth(self):
        return self.mLineWidth

    def innerRadius(self):
        return self.mInnerRadius

    def isSpinning(self):
        return self.mIsSpinning

    def setRoundness(self, roundness):
        self.mRoundness = min(0.0, max(100, roundness))

    def setColor(self, color):
        self.mColor = color

    def setRevolutionsPerSecond(self, revolutionsPerSecond):
        self.mRevolutionsPerSecond = revolutionsPerSecond
        self.updateTimer()

    def setTrailFadePercentage(self, trail):
        self.mTrailFadePercentage = trail

    def setMinimumTrailOpacity(self, minimumTrailOpacity):
        self.mMinimumTrailOpacity = minimumTrailOpacity
示例#5
0
class LoadOqEngineOutputsTestCase(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        # NOTE: recovery modeling is an exprimental feature
        cls.initial_experimental_enabled = QSettings().value(
            '/irmt/experimental_enabled',
            DEFAULT_SETTINGS['experimental_enabled'],
            type=bool)
        QSettings().setValue('irmt/experimental_enabled', True)
        cls.irmt = Irmt(iface)
        cls.irmt.initGui()
        cls.hostname = os.environ.get('OQ_ENGINE_HOST',
                                      'http://localhost:8800')
        cls.global_failed_attempts = []
        cls.global_skipped_attempts = []
        cls.global_time_consuming_outputs = []
        cls.loading_completed = {}
        cls.loading_exception = {}
        cls.irmt.drive_oq_engine_server(show=False, hostname=cls.hostname)
        # NOTE: calc_list must be retrieved BEFORE starting any test
        cls.calc_list = cls.irmt.drive_oq_engine_server_dlg.calc_list
        if isinstance(cls.calc_list, Exception):
            raise cls.calc_list
        cls.output_list = {}
        try:
            cls.only_calc_id = int(os.environ.get('ONLY_CALC_ID'))
        except (ValueError, TypeError):
            print('ONLY_CALC_ID was not set or is not an integer'
                  ' value. Running tests for all the available calculations')
            cls.only_calc_id = None
        else:
            print('ONLY_CALC_ID is set.'
                  ' Running tests only for calculation #%s' % cls.only_calc_id)
        if cls.only_calc_id is not None:
            cls.calc_list = [
                calc for calc in cls.calc_list
                if calc['id'] == cls.only_calc_id
            ]
        cls.only_output_type = os.environ.get('ONLY_OUTPUT_TYPE')
        if not cls.only_output_type:
            print('ONLY_OUTPUT_TYPE was not set. Running tests for all'
                  ' the available output types')
        else:
            print('ONLY_OUTPUT_TYPE is set. Running tests only for'
                  ' output type: %s' % cls.only_output_type)

    @classmethod
    def tearDownClass(cls):
        QSettings().setValue('irmt/experimental_enabled',
                             cls.initial_experimental_enabled)
        # print("\n\nGLOBAL SUMMARY OF TESTING OQ-ENGINE OUTPUT LOADERS")
        # print("==================================================\n")
        # if cls.global_skipped_attempts:
        #     print('\nSkipped:')
        #     for skipped_attempt in cls.global_skipped_attempts:
        #         print('\tCalculation %s: %s'
        #               % (skipped_attempt['calc_id'],
        #                  skipped_attempt['calc_description']))
        #         print('\t\tOutput type: %s' % skipped_attempt['output_type'])
        # if not cls.global_failed_attempts:
        #     print("All the outputs were loaded successfully")
        # else:
        #     print('\nFailed attempts:')
        #     for failed_attempt in cls.global_failed_attempts:
        #         print('\tCalculation %s (%s): %s'
        #               % (failed_attempt['calc_id'],
        #                  failed_attempt['calc_mode'],
        #                  failed_attempt['calc_description']))
        #         print('\t\tOutput type: %s' % failed_attempt['output_type'])
        # if cls.global_time_consuming_outputs:
        #     print('\n\nSome loaders took longer than %s seconds:' %
        #           LONG_LOADING_TIME)
        #     for output in sorted(cls.global_time_consuming_outputs,
        #                          key=operator.itemgetter('loading_time'),
        #                          reverse=True):
        #         print('\t%s' % output)
        # stop_app()

    def list_calculations_and_outputs(self):
        print("\n\tList of tested OQ-Engine demo calculations:")
        for calc in self.calc_list:
            print('\tCalculation %s (%s): %s' %
                  (calc['id'], calc['calculation_mode'], calc['description']))
            calc_output_list = \
                self.irmt.drive_oq_engine_server_dlg.get_output_list(
                    calc['id'])
            if isinstance(calc_output_list, Exception):
                raise calc_output_list
            self.output_list[calc['id']] = calc_output_list
            print('\t\tOutput types: %s' %
                  ', '.join([output['type'] for output in calc_output_list]))

    def get_output_list(self):
        for calc in self.calc_list:
            calc_output_list = \
                self.irmt.drive_oq_engine_server_dlg.get_output_list(
                    calc['id'])
            if isinstance(calc_output_list, Exception):
                raise calc_output_list
            self.output_list[calc['id']] = calc_output_list

    def run_calc(self, input_files, job_type='hazard', calc_id=None):
        if hasattr(self, 'timer'):
            self.timer.stop()
            delattr(self, 'timer')
        resp = self.irmt.drive_oq_engine_server_dlg.run_calc(
            calc_id=calc_id, file_names=input_files, use_default_ini=True)
        calc_id = resp['job_id']
        print("Running %s calculation #%s" % (job_type, calc_id))
        self.timer = QTimer()
        self.timer.timeout.connect(lambda: self.refresh_calc_log(calc_id))
        self.timer.start(3000)  # refresh time in milliseconds
        timeout = 240
        start_time = time.time()
        while time.time() - start_time < timeout:
            QGIS_APP.processEvents()
            if not self.timer.isActive():
                self.timer.timeout.disconnect()
                break
            time.sleep(0.1)
        calc_status = self.get_calc_status(calc_id)
        if isinstance(calc_status, Exception):
            print('Removing calculation #%s' % calc_id)
            resp = self.irmt.drive_oq_engine_server_dlg.remove_calc(calc_id)
            print(
                'After reaching the timeout of %s seconds, the %s'
                ' calculation raised the exception "%s", and it was deleted' %
                (timeout, job_type, calc_status))
            raise calc_status
        if not calc_status['status'] == 'complete':
            print('Removing calculation #%s' % calc_id)
            resp = self.irmt.drive_oq_engine_server_dlg.remove_calc(calc_id)
            raise TimeoutError(
                'After reaching the timeout of %s seconds, the %s'
                ' calculation was in the state "%s", and it was deleted' %
                (timeout, job_type, calc_status))
        else:
            print('Calculation #%s was completed' % calc_id)
        return calc_id

    def get_calc_status(self, calc_id):
        calc_status = self.irmt.drive_oq_engine_server_dlg.get_calc_status(
            calc_id)
        return calc_status

    def refresh_calc_log(self, calc_id):
        calc_status = self.get_calc_status(calc_id)
        if isinstance(calc_status, Exception):
            print("An exception occurred: %s" % calc_status)
            print("Trying to continue anyway")
            return
        if calc_status['status'] in ('complete', 'failed'):
            self.timer.stop()
            if calc_status['status'] == 'falied':
                print('Calculation #%s failed' % calc_id)
        calc_log = self.irmt.drive_oq_engine_server_dlg.get_calc_log(calc_id)
        if isinstance(calc_log, Exception):
            print("An exception occurred: %s" % calc_log)
            return
        if calc_log:
            print(calc_log)

    def download_output(self, output_id, outtype):
        dest_folder = tempfile.gettempdir()
        output_download_url = (
            "%s/v1/calc/result/%s?export_type=%s&dload=true" %
            (self.hostname, output_id, outtype))
        print('\t\tGET: %s' % output_download_url, file=sys.stderr)
        # FIXME: enable the user to set verify=True
        resp = requests.get(output_download_url, verify=False)
        if not resp.ok:
            raise Exception(resp.text)
        filename = resp.headers['content-disposition'].split('filename=')[1]
        filepath = os.path.join(dest_folder, filename)
        with open(filepath, "wb") as f:
            f.write(resp.content)
        return filepath

    def _on_loading_ko(self, output_dict):
        ex_type, ex, tb = sys.exc_info()
        failed_attempt = copy.deepcopy(output_dict)
        failed_attempt['traceback'] = tb
        self.failed_attempts.append(failed_attempt)
        self.global_failed_attempts.append(failed_attempt)
        traceback.print_tb(failed_attempt['traceback'])
        print(ex)

    def _on_loading_ok(self, start_time, output_dict):
        loading_time = time.time() - start_time
        print('\t\t(loading time: %.4f sec)' % loading_time)
        if loading_time > LONG_LOADING_TIME:
            output_dict['loading_time'] = loading_time
            self.time_consuming_outputs.append(output_dict)
            self.global_time_consuming_outputs.append(output_dict)
        output_type = output_dict['output_type']
        if output_type in OQ_EXTRACT_TO_LAYER_TYPES:
            loaded_layer = self.irmt.iface.activeLayer()
            if output_type == 'gmf_data':
                if loaded_layer is None:
                    print('\t\tWARNING: no layer was loaded. It should mean '
                          'that no data could be loaded for the chosen eid')
            else:
                self.assertIsNotNone(loaded_layer, 'No layer was loaded')
                num_feats = loaded_layer.featureCount()
                self.assertGreater(
                    num_feats, 0,
                    'The loaded layer does not contain any feature!')

    def load_calc_output(self,
                         calc,
                         selected_output_type,
                         taxonomy_idx=None,
                         aggregate_by_site=None,
                         approach=None,
                         n_simulations=None):
        calc_id = calc['id']
        for output in self.output_list[calc_id]:
            if (output['type'] != selected_output_type
                    and "%s_aggr" % output['type'] != selected_output_type):
                continue
            output_dict = {
                'calc_id': calc_id,
                'calc_mode': calc['calculation_mode'],
                'calc_description': calc['description'],
                'output_type': selected_output_type
            }
            start_time = time.time()
            print('\n\tCalculation %s (%s): %s' %
                  (calc['id'], calc['calculation_mode'], calc['description']))
            # NOTE: aggregated outputs use an existing OQ-Engine output and
            #       virtually transforms it postfixing its type with '_aggr'
            output_copy = copy.deepcopy(output)
            output_copy['type'] = selected_output_type
            try:
                loading_resp = self.load_output(
                    calc,
                    output_copy,
                    taxonomy_idx=taxonomy_idx,
                    aggregate_by_site=aggregate_by_site,
                    approach=approach,
                    n_simulations=n_simulations)
            except Exception:
                self._on_loading_ko(output_dict)
            else:
                if loading_resp != 'skipped':
                    self._on_loading_ok(start_time, output_dict)

    def on_init_done(self,
                     dlg,
                     taxonomy_idx=None,
                     aggregate_by_site=None,
                     approach=None,
                     n_simulations=None):
        if taxonomy_idx is not None:
            print("\t\tTaxonomy: %s" % dlg.taxonomy_cbx.itemText(taxonomy_idx))
            dlg.taxonomy_cbx.setCurrentIndex(taxonomy_idx)
        if aggregate_by_site is not None:
            print("\t\taggregate_by_site: %s" % aggregate_by_site)
            dlg.aggregate_by_site_ckb.setChecked(aggregate_by_site)
        # NOTE: approach and n_simulations have to be set in the viewer_dock
        if approach is not None:
            print("\t\tRecovery modeling with parameters:")
            print("\t\t\tApproach: %s" % approach)
        if n_simulations is not None:
            print("\t\t\tn_simulations: %s" % n_simulations)
        # set dialog options and accept
        if dlg.output_type == 'uhs':
            dlg.load_selected_only_ckb.setChecked(True)
            assert_and_emit(dlg, dlg.loading_exception, self.assertGreater,
                            dlg.poe_cbx.count(), 0, 'No PoE was found')
            dlg.poe_cbx.setCurrentIndex(0)
        elif dlg.output_type == 'avg_losses-rlzs':
            dlg.load_selected_only_ckb.setChecked(True)
            # Taxonomies should be at least 'All' and a single one
            assert_and_emit(dlg, dlg.loading_exception, self.assertGreater,
                            dlg.taxonomy_cbx.count(), 1,
                            'No taxonomy was found')
            # 'All' (inserted on top)
            taxonomy_all_idx = dlg.taxonomy_cbx.findText('All')
            assert_and_emit(dlg, dlg.loading_exception, self.assertEqual,
                            taxonomy_all_idx, 0,
                            "Taxonomy All was not the first in selector")
            dlg.taxonomy_cbx.setCurrentIndex(taxonomy_idx)
            assert_and_emit(dlg, dlg.loading_exception, self.assertGreater,
                            dlg.loss_type_cbx.count(), 0,
                            'No loss type was found')
            dlg.loss_type_cbx.setCurrentIndex(0)
            # FIXME: we need to do dlg.accept() also for the case
            #        performing the aggregation by zone
        elif dlg.output_type == 'damages-rlzs' and aggregate_by_site:
            # FIXME: testing only for selected taxonomy
            dlg.load_selected_only_ckb.setChecked(True)
            assert_and_emit(dlg, dlg.loading_exception, self.assertGreater,
                            dlg.taxonomy_cbx.count(), 0,
                            'No taxonomy was found')
            dlg.taxonomy_cbx.setCurrentIndex(0)
            taxonomy_all_idx = dlg.taxonomy_cbx.findText('All')
            assert_and_emit(dlg, dlg.loading_exception, self.assertEqual,
                            taxonomy_all_idx, 0,
                            "Taxonomy All was not the first in selector")
            dlg.taxonomy_cbx.setCurrentIndex(taxonomy_idx)
            assert_and_emit(dlg, dlg.loading_exception, self.assertGreater,
                            dlg.loss_type_cbx.count(), 0,
                            'No loss_type was found')
            dlg.loss_type_cbx.setCurrentIndex(0)
            assert_and_emit(dlg, dlg.loading_exception, self.assertGreater,
                            dlg.dmg_state_cbx.count(), 0,
                            'No damage state was found')
            dlg.dmg_state_cbx.setCurrentIndex(0)
            # FIXME: we need to do dlg.accept() also for the case
            #        performing the aggregation by zone
        elif dlg.output_type == 'asset_risk':
            num_selected_taxonomies = len(
                list(dlg.taxonomies_multisel.get_selected_items()))
            num_unselected_taxonomies = len(
                list(dlg.taxonomies_multisel.get_unselected_items()))
            num_taxonomies = (num_selected_taxonomies +
                              num_unselected_taxonomies)
            assert_and_emit(dlg, dlg.loading_exception, self.assertGreater,
                            num_taxonomies, 0, 'No taxonomy was found')
            assert_and_emit(dlg, dlg.loading_exception, self.assertGreater,
                            dlg.category_cbx.count(), 0,
                            'No category was found')
            dlg.category_cbx.setCurrentIndex(0)
        if dlg.ok_button.isEnabled():
            dlg.accept()
            if dlg.output_type == 'asset_risk':
                # NOTE: avoiding to emit loading_completed for asset_risk,
                # because in this case there's a second asynchronous call to
                # the extract api, and the signal is emitted by the callback
                return
        else:
            raise RuntimeError('The ok button is disabled')
        if dlg.output_type == 'hcurves':
            self.load_hcurves()
        elif dlg.output_type == 'uhs':
            self.load_uhs()
        elif dlg.output_type == 'damages-rlzs' and not aggregate_by_site:
            self.load_recovery_curves(dlg, approach, n_simulations)
            return
        dlg.loading_completed.emit(dlg)

    def _store_skipped_attempt(self, id, calculation_mode, description, type):
        skipped_attempt = {
            'calc_id': id,
            'calc_mode': calculation_mode,
            'calc_description': description,
            'output_type': type
        }
        self.skipped_attempts.append(skipped_attempt)
        self.global_skipped_attempts.append(skipped_attempt)

    def load_output(self,
                    calc,
                    output,
                    taxonomy_idx=None,
                    aggregate_by_site=None,
                    approach=None,
                    n_simulations=None):
        # NOTE: it is better to avoid resetting the project here, because some
        # outputs might be skipped, therefore it would not be needed
        calc_id = calc['id']
        output_type = output['type']
        calculation_mode = calc['calculation_mode']
        # NOTE: loading zipped input files only for multi_risk
        if output_type == 'input' and calculation_mode != 'multi_risk':
            self._store_skipped_attempt(calc_id, calculation_mode,
                                        calc['description'], output_type)
            print('\t\tSKIPPED (loading zipped input files only for'
                  ' multi_risk)')
            return 'skipped'
        if output_type in (OQ_CSV_TO_LAYER_TYPES | OQ_RST_TYPES
                           | OQ_ZIPPED_TYPES):
            if output_type in OQ_CSV_TO_LAYER_TYPES:
                filetype = 'csv'
            elif output_type in OQ_RST_TYPES:
                filetype = 'rst'
            else:  # OQ_ZIPPED_TYPES
                filetype = 'zip'
            # TODO: we should test the actual downloader, asynchronously
            filepath = self.download_output(output['id'], filetype)
            assert filepath is not None
            self.irmt.iface.newProject()
            if output_type == 'fullreport':
                dlg = ShowFullReportDialog(filepath)
                dlg.accept()
                print('\t\tok')
                return 'ok'
            if output_type in OQ_ZIPPED_TYPES:
                dlg = LoadInputsDialog(self.irmt.drive_oq_engine_server_dlg,
                                       filepath,
                                       self.irmt.iface,
                                       mode='testing')
                dlg.accept()
                print('\t\tok')
                return 'ok'
            dlg = OUTPUT_TYPE_LOADERS[output_type](
                self.irmt.drive_oq_engine_server_dlg,
                self.irmt.iface,
                self.irmt.viewer_dock,
                self.irmt.drive_oq_engine_server_dlg.session,
                self.hostname,
                calc_id,
                output_type,
                filepath,
                calculation_mode=calculation_mode,
                mode='testing')
            if dlg.ok_button.isEnabled():
                dlg.accept()
                print('\t\tok')
                return 'ok'
            else:
                raise RuntimeError('The ok button is disabled')
                return 'ko'
        elif output_type == 'ruptures':
            dlg = OUTPUT_TYPE_LOADERS[output_type](
                self.irmt.drive_oq_engine_server_dlg,
                self.irmt.iface,
                self.irmt.viewer_dock,
                self.irmt.drive_oq_engine_server_dlg.session,
                self.hostname,
                calc_id,
                output_type,
                min_mag=6.5,
                calculation_mode=calculation_mode,
                mode='testing')
            self.loading_completed[dlg] = False
            self.loading_exception[dlg] = None
            dlg.loading_completed.connect(
                lambda dlg: self.on_loading_completed(dlg))
            dlg.loading_exception.connect(lambda dlg, exception: self.
                                          on_loading_exception(dlg, exception))
            timeout = 10
            start_time = time.time()
            dlg.accept()
            while time.time() - start_time < timeout:
                QGIS_APP.processEvents()
                if self.loading_completed[dlg]:
                    print('\t\tok')
                    return 'ok'
                if self.loading_exception[dlg]:
                    raise self.loading_exception
                    return 'ko'
                time.sleep(0.1)
            raise TimeoutError('Loading time exceeded %s seconds' % timeout)
            return 'ko'
        elif output_type in OQ_EXTRACT_TO_LAYER_TYPES:
            self.irmt.iface.newProject()
            dlg = OUTPUT_TYPE_LOADERS[output_type](
                self.irmt.drive_oq_engine_server_dlg,
                self.irmt.iface,
                self.irmt.viewer_dock,
                self.irmt.drive_oq_engine_server_dlg.session,
                self.hostname,
                calc_id,
                output_type,
                calculation_mode=calculation_mode,
                mode='testing')
            self.loading_completed[dlg] = False
            self.loading_exception[dlg] = None
            dlg.loading_completed.connect(
                lambda dlg: self.on_loading_completed(dlg))
            dlg.loading_exception.connect(lambda dlg, exception: self.
                                          on_loading_exception(dlg, exception))
            dlg.init_done.connect(lambda dlg: self.on_init_done(
                dlg,
                taxonomy_idx=taxonomy_idx,
                aggregate_by_site=aggregate_by_site,
                approach=approach,
                n_simulations=n_simulations))
            timeout = 10
            start_time = time.time()
            while time.time() - start_time < timeout:
                QGIS_APP.processEvents()
                if self.loading_completed[dlg]:
                    print('\t\tok')
                    return 'ok'
                if self.loading_exception[dlg]:
                    raise self.loading_exception
                    return 'ko'
                time.sleep(0.1)
            raise TimeoutError('Loading time exceeded %s seconds' % timeout)
            return 'ko'
        elif output_type in OQ_EXTRACT_TO_VIEW_TYPES:
            self.irmt.iface.newProject()
            self.irmt.viewer_dock.load_no_map_output(
                calc_id, self.irmt.drive_oq_engine_server_dlg.session,
                self.hostname, output_type,
                self.irmt.drive_oq_engine_server_dlg.engine_version)
            tmpfile_handler, tmpfile_name = tempfile.mkstemp()
            self.irmt.viewer_dock.write_export_file(tmpfile_name)
            os.close(tmpfile_handler)
            print('\t\tok')
            return 'ok'

    def on_loading_completed(self, dlg):
        self.loading_completed[dlg] = True

    def on_loading_exception(self, dlg, exception):
        self.loading_exception[dlg] = exception

    def load_output_type(self, selected_output_type):
        self.get_output_list()
        if (self.only_output_type
                and self.only_output_type != selected_output_type):
            print('\nSkipped output type: %s' % selected_output_type)
            return
        self.failed_attempts = []
        self.skipped_attempts = []
        self.time_consuming_outputs = []
        for calc in self.calc_list:
            if selected_output_type in ['avg_losses-rlzs', 'damages-rlzs']:
                # TODO: keep track of taxonomy in test summary
                aggregate_by_site = (None if selected_output_type
                                     == 'avg_losses-rlzs' else True)
                for taxonomy_idx in [0, 1]:
                    self.load_calc_output(calc,
                                          selected_output_type,
                                          taxonomy_idx=taxonomy_idx,
                                          aggregate_by_site=aggregate_by_site)
                # for damages-rlzs also test recovery modeling
                if selected_output_type == 'damages-rlzs':
                    for approach in ['Disaggregate', 'Aggregate']:
                        self.load_calc_output(calc,
                                              selected_output_type,
                                              aggregate_by_site=False,
                                              approach=approach,
                                              n_simulations=2)
            else:
                self.load_calc_output(calc, selected_output_type)
        if self.skipped_attempts:
            print('\nSkipped:')
            for skipped_attempt in self.skipped_attempts:
                print('\tCalculation %s: %s' %
                      (skipped_attempt['calc_id'],
                       skipped_attempt['calc_description']))
                print('\t\tOutput type: %s' % skipped_attempt['output_type'])
        if not self.failed_attempts:
            print('\n%s successfully loaded for all calculations' %
                  selected_output_type)
        else:
            failing_summary = ''
            for failed_attempt in self.failed_attempts:
                # NOTE: we avoid printing the error also at the end, because:
                #       1) it would be a duplicate
                #       2) it would not contain the traceback from the engine
                failing_summary += ('\n\tCalculation %s (%s): %s'
                                    '\n\t\t(please check traceback ahead)') % (
                                        failed_attempt['calc_id'],
                                        failed_attempt['calc_mode'],
                                        failed_attempt['calc_description'])
            raise FailedAttempts(failing_summary)
        if self.time_consuming_outputs:
            print('\n\nSome loaders took longer than %s seconds:' %
                  LONG_LOADING_TIME)
            for output in sorted(self.time_consuming_outputs,
                                 key=operator.itemgetter('loading_time'),
                                 reverse=True):
                print('\t%s' % output)

    def load_recovery_curves(self, dlg, approach, n_simulations):
        self._set_output_type('Recovery Curves')
        self.irmt.viewer_dock.approach_cbx.setCurrentIndex(
            self.irmt.viewer_dock.approach_cbx.findText(approach))
        self.irmt.viewer_dock.n_simulations_sbx.setValue(n_simulations)
        self._change_selection()
        self._test_export()
        dlg.loading_completed.emit(dlg)

    def load_uhs(self):
        self._set_output_type('Uniform Hazard Spectra')
        self._change_selection()
        self._test_export()

    def load_hcurves(self):
        self._set_output_type('Hazard Curves')
        self.assertGreater(self.irmt.viewer_dock.imt_cbx.count(), 0,
                           'No IMT was found!')
        self.irmt.viewer_dock.imt_cbx.setCurrentIndex(0)
        self._change_selection()
        self._test_export()

    def _test_export(self):
        _, exported_file_path = tempfile.mkstemp(suffix=".csv")
        self.irmt.viewer_dock.write_export_file(exported_file_path)
        # NOTE: we are only checking that the exported CSV has at least 2 rows
        # and 3 columns per row. We are avoiding more precise checks, because
        # CSV tests are very fragile. On different platforms the numbers could
        # be slightly different. With different versions of
        # shapely/libgeos/numpy/etc the numbers could be slightly different.
        # The parameters of the demos could change in the future and the
        # numbers (even the number of rows and columns) could change.
        with open(exported_file_path, 'r', newline='') as got:
            got_reader = csv.reader(got)
            n_rows = 0
            for got_line in got_reader:
                if got_line[0].startswith('#'):
                    continue
                n_rows += 1
                n_cols = 0
                for got_element in got_line:
                    n_cols += 1
                self.assertGreaterEqual(
                    n_cols, 3, "The following line of the exported file %s has"
                    " only %s columns:\n%s" %
                    (exported_file_path, n_cols, got_line))
            self.assertGreaterEqual(
                n_rows, 2, "The exported file %s has only %s rows" %
                (exported_file_path, n_rows))

    def _set_output_type(self, output_type):
        self.irmt.viewer_dock.output_type_cbx.setCurrentIndex(-1)
        idx = self.irmt.viewer_dock.output_type_cbx.findText(output_type)
        self.assertNotEqual(idx, -1, 'Output type %s not found' % output_type)
        self.irmt.viewer_dock.output_type_cbx.setCurrentIndex(idx)

    def _change_selection(self):
        layer = self.irmt.iface.activeLayer()
        # the behavior should be slightly different (pluralizing labels, etc)
        # depending on the amount of features selected
        num_feats = layer.featureCount()
        self.assertGreater(num_feats, 0,
                           'The loaded layer does not contain any feature!')
        # select first feature only
        layer.select(1)
        layer.removeSelection()
        # select first and last features (just one if there is only one)
        layer.select([1, num_feats])
class GeoGigLiveLayerRefresher(object):

    nProgressBarsOpen = 0
    nfeaturesRead = 0
    lock = threading.RLock(
    )  # this might not be necessary - I think this will always be happening on the same ui thread

    def __init__(self,
                 connector,
                 geogiglayer,
                 fullDetail=False,
                 sm_factor=1.0,
                 sm_type="WithBBOX"):
        self.connector = connector
        self.geogiglayer = geogiglayer

        self.queryThread = QueryThread(self.connector)
        self.queryThread.started.connect(self.datasetStart)
        self.queryThread.finished.connect(self.datasetReceived)
        self.queryThread.progress_occurred.connect(self.featuresRead)

        self.refreshTimer = QTimer()
        self.refreshTimer.setSingleShot(True)
        self.refreshTimer.timeout.connect(self.makeQuery)

        self.lastExtent = None
        self.sm_factor = sm_factor
        self.sm_type = sm_type

        root = QgsProject.instance().layerTreeRoot()
        root.visibilityChanged.connect(
            self.visibilityChanged)  # track all layer visibility changes
        self.fullDetail = fullDetail
        #root.addedChildren.connect(self.layerTreeAddedTo) # track when layer is added to tree

    # called when layer is removed
    def cleanup(self):
        # don't track this anymore (it causes a problem because the c++ object is
        # deleted, but the python object isn't)
        root = QgsProject.instance().layerTreeRoot()
        root.visibilityChanged.disconnect(self.visibilityChanged)

    def isLayerVisible(self):
        if self.geogiglayer.layer is None:
            return None
        layerId = self.geogiglayer.layer.id()
        if self.geogiglayer.canvas is None:
            treelayer = QgsProject.instance().layerTreeRoot().findLayer(
                layerId)  # QgsLayerTreeLayer
            if treelayer is None:
                return False
            if not treelayer.isVisible():
                return False  # definitely not visible
        # likely visible, do a simple scale-range check
        return self.geogiglayer.layer.isInScaleRange(
            self.geogiglayer._canvas().scale())

    def visibilityChanged(self, qgsLayerTreeNode):
        if self.isLayerVisible():
            self.refresh(forceRefresh=False, tryToRepopulate=True)

    def openProgress(self):
        with self.lock:
            if self.nProgressBarsOpen == 0:
                self.nfeaturesRead = 0
                qgiscommons2.gui.startProgressBar(
                    "Transferring data from GeoGig", 0,
                    currentWindow().messageBar())
            self.nProgressBarsOpen += 1

    def closeProgress(self):
        with self.lock:
            self.nProgressBarsOpen -= 1
            if self.nProgressBarsOpen == 0:
                qgiscommons2.gui.closeProgressBar()

    # sometimes the progress bar can be closed by another thread/function
    #  this will re-open it if that happens.
    # ex. when you have a layers being populated() during a refresh()
    #     which can occur on project load
    def ensureProgressOpen(self):
        _progressActive = qgiscommons2.gui._progressActive
        if _progressActive:
            return  # nothing to do
        qgiscommons2.gui.startProgressBar("Transferring data from GeoGig", 0,
                                          currentWindow().messageBar())

    # called by backgrounding feature loader (self.queryThread)
    # this is for progress indication
    def featuresRead(self, nfeatsBatch):
        with self.lock:
            self.ensureProgressOpen()
            self.nfeaturesRead += nfeatsBatch
            try:
                qgiscommons2.gui.setProgressText(
                    "Read " + "{:,}".format(self.nfeaturesRead) +
                    " features...")
            except:
                pass  # could be a problem...

    # occurs when extents change, call this from geogiglayer
    def refresh(self, forceRefresh=True, tryToRepopulate=False):
        if tryToRepopulate and not self.geogiglayer.valid:
            try:
                self.geogiglayer.populate()
            except:
                item = QgsProject.instance().layerTreeRoot().findLayer(
                    self.geogiglayer.layer.id())
                item.setItemVisibilityCheckedRecursive(False)
                return
        if not forceRefresh:
            extentRect = self.geogiglayer.extentToLayerCrs(
                self.geogiglayer._canvas().extent())
            extent = [
                extentRect.xMinimum(),
                extentRect.yMinimum(),
                extentRect.xMaximum(),
                extentRect.yMaximum()
            ]
            if self.lastExtent == extent:
                return
        # set time -- will fire after 100ms and call makeQuery
        if self.refreshTimer.isActive():
            self.refreshTimer.setInterval(100)  # restart
        else:
            self.refreshTimer.start(100)

    #downloads the current extent at full detail. Called when entering or exiting the editing mode.
    def setFullDetail(self, fullDetail, refresh=True):
        self.fullDetail = fullDetail
        if refresh:
            self.makeQuery()

    def getFullDetail(self):
        return self.fullDetail

    # thread has started to do work
    def datasetStart(self, url, query):
        self.timeStart = time.perf_counter()
        QgsMessageLog.logMessage("loading dataset url={}, query={}".format(
            url, str(query)))

    # return true if you shouldn't draw this layer
    #   if its rules-based, and all the rules depend on scale, and all the rules are "out-of-scale"
    def doNotDrawScale(self, r, scale):
        if not isinstance(r, QgsRuleBasedRenderer):
            return False
        # any of them are NOT scale dependent, then need to draw
        if any([not r.dependsOnScale() for r in r.rootRule().children()]):
            return False
        return not any([r.isScaleOK(scale) for r in r.rootRule().children()])

    def ecqlFromLayerStyle(self):
        canvas = self.geogiglayer._canvas()
        ms = canvas.mapSettings()
        ctx = QgsRenderContext.fromMapSettings(ms)

        r = self.geogiglayer.layer.renderer().clone()
        try:
            r.startRender(ctx, self.geogiglayer.layer.fields())
            if self.doNotDrawScale(r, canvas.scale()):
                return "EXCLUDE"
            expression = r.filter()
            if expression == "" or expression == "TRUE":
                return None
            converter = ExpressionConverter(expression)
            return converter.asECQL()
        except:
            return None
        finally:
            r.stopRender(ctx)

    def makeQuery(self):
        self.queryThread.abort()
        self.queryThread.wait()  # wait for it to abort
        if not self.isLayerVisible():
            return  # don't do anything if the layer is invisible NOTE: layer likely has data in it
        self.openProgress()
        extent = self.geogiglayer.extentToLayerCrs(
            self.geogiglayer._canvas().extent())
        self.lastExtent = [
            extent.xMinimum(),
            extent.yMinimum(),
            extent.xMaximum(),
            extent.yMaximum()
        ]
        self.queryThread.createURL(self.geogiglayer.user,
                                   self.geogiglayer.repo,
                                   self.geogiglayer.layername)
        if self.fullDetail:
            self.queryThread.createQuery(self.geogiglayer.commitid,
                                         self.lastExtent,
                                         simplifyGeom=False,
                                         ecqlFilter=self.ecqlFromLayerStyle())
        else:
            self.queryThread.createQuery(self.geogiglayer.commitid,
                                         self.lastExtent,
                                         self.geogiglayer._canvas().width(),
                                         self.geogiglayer._canvas().height(),
                                         screenMap_factor=self.sm_factor,
                                         screenMap_type=self.sm_type,
                                         ecqlFilter=self.ecqlFromLayerStyle())
        self.queryThread.start()

    # called  by backgrounding feature loader (self.queryThread)
    # this is after the dataset has loaded.
    # None -> aborted
    def datasetReceived(self, memorydataset):
        if memorydataset is not None:
            end_time = time.perf_counter()
            QgsMessageLog.logMessage(
                "Dataset received ({}) - {:,} features in {}s".format(
                    self.geogiglayer.layername, len(memorydataset),
                    end_time - self.timeStart))
        self.closeProgress()
        if memorydataset is None:
            return
        try:
            self.geogiglayer.newDatasetReceived(memorydataset)
        except Exception as e:
            QgsMessageLog.logMessage("error - " + str(e))