Ejemplo n.º 1
0
    def test_invalid_json(self):
        invalid_json_str = '''
            {
                "taskType": "CHIP_CLASSIFICATION
            }
        '''
        str_to_file(invalid_json_str, self.file_path)

        with self.assertRaises(ProtobufParseException):
            load_json_config(self.file_path, TaskConfigMsg())
Ejemplo n.º 2
0
    def test_bogus_field(self):
        config = {
            'taskType': 'CHIP_CLASSIFICATION',
            'chipClassificationConfig': {
                'classItems': [{
                    'id': 1,
                    'name': 'car'
                }]
            },
            'bogus_field': 0
        }

        self.write_config_file(config)
        with self.assertRaises(ProtobufParseException):
            load_json_config(self.file_path, TaskConfigMsg())
Ejemplo n.º 3
0
    def test_bundle_od_command(self):
        def get_task(tmp_dir):
            predict_package_uri = os.path.join(tmp_dir, 'predict_package.zip')
            t = rv.TaskConfig.builder(rv.OBJECT_DETECTION) \
                             .with_predict_package_uri(predict_package_uri) \
                             .with_classes(['class1']) \
                             .build()
            return t

        def get_backend(task, tmp_dir):
            model_uri = os.path.join(tmp_dir, 'model')
            template_uri = data_file_path(
                'tf_object_detection/embedded_ssd_mobilenet_v1_coco.config')
            with open(model_uri, 'w') as f:
                f.write('DUMMY')
            b = rv.BackendConfig.builder(rv.TF_OBJECT_DETECTION) \
                                .with_task(task) \
                                .with_template(template_uri) \
                                .with_model_uri(model_uri) \
                                .build()
            return b

        with RVConfig.get_tmp_dir() as tmp_dir:
            task = get_task(tmp_dir)
            backend = get_backend(task, tmp_dir)
            analyzer = self.get_analyzer(tmp_dir)
            scene = self.get_scene(tmp_dir)
            cmd = rv.CommandConfig.builder(rv.BUNDLE) \
                                  .with_task(task) \
                                  .with_root_uri(tmp_dir) \
                                  .with_backend(backend) \
                                  .with_analyzers([analyzer]) \
                                  .with_scene(scene) \
                                  .build() \
                                  .create_command()

            cmd.run(tmp_dir)

            package_dir = os.path.join(tmp_dir, 'package')
            make_dir(package_dir)
            with zipfile.ZipFile(task.predict_package_uri, 'r') as package_zip:
                package_zip.extractall(path=package_dir)

            bundle_config_path = os.path.join(package_dir,
                                              'bundle_config.json')
            bundle_config = load_json_config(bundle_config_path,
                                             CommandConfigMsg())

            self.assertEqual(bundle_config.command_type, rv.BUNDLE)

            actual = set(os.listdir(package_dir))
            expected = set(['stats.json', 'model', 'bundle_config.json'])

            self.assertEqual(actual, expected)
Ejemplo n.º 4
0
    def test_bundle_cc_command(self):
        def get_task(tmp_dir):
            predict_package_uri = os.path.join(tmp_dir, 'predict_package.zip')
            t = rv.TaskConfig.builder(rv.CHIP_CLASSIFICATION) \
                             .with_predict_package_uri(predict_package_uri) \
                             .with_classes(['class1']) \
                             .build()
            return t

        def get_backend(task, tmp_dir):
            model_uri = os.path.join(tmp_dir, 'model')
            with open(model_uri, 'w') as f:
                f.write('DUMMY')
            b = rv.BackendConfig.builder(rv.KERAS_CLASSIFICATION) \
                                .with_task(task) \
                                .with_model_defaults(rv.RESNET50_IMAGENET) \
                                .with_model_uri(model_uri) \
                                .build()
            return b

        with RVConfig.get_tmp_dir() as tmp_dir:
            task = get_task(tmp_dir)
            backend = get_backend(task, tmp_dir)
            analyzer = self.get_analyzer(tmp_dir)
            scene = self.get_scene(tmp_dir)
            cmd = rv.CommandConfig.builder(rv.BUNDLE) \
                                  .with_task(task) \
                                  .with_root_uri(tmp_dir) \
                                  .with_backend(backend) \
                                  .with_analyzers([analyzer]) \
                                  .with_scene(scene) \
                                  .build() \
                                  .create_command(tmp_dir)

            cmd.run(tmp_dir)

            package_dir = os.path.join(tmp_dir, 'package')
            make_dir(package_dir)
            with zipfile.ZipFile(task.predict_package_uri, 'r') as package_zip:
                package_zip.extractall(path=package_dir)

            bundle_config_path = os.path.join(package_dir,
                                              'bundle_config.json')
            bundle_config = load_json_config(bundle_config_path,
                                             CommandConfigMsg())

            self.assertEqual(bundle_config.command_type, rv.BUNDLE)

            actual = set(os.listdir(package_dir))
            expected = set(['stats.json', 'model', 'bundle_config.json'])

            self.assertEqual(actual, expected)
    def load_experiment_clicked(self):
        experiment_uri = self.experiment_uri_line_edit.text()
        Log.log_info("Loading experiment at {}".format(experiment_uri))
        QtWidgets.QApplication.setOverrideCursor(Qt.WaitCursor)
        try:
            msg = load_json_config(experiment_uri, ExperimentConfigMsg())
        except (NotReadableError, ProtobufParseException) as e:
            reply = QMessageBox.warning(
                self.iface.mainWindow(), 'Error',
                'Unable to read experiment file. See log more details.',
                QMessageBox.Ok)
            Log.log_warning(
                'Unable to read experiment file: {}'.format(experiment_uri))
            Log.log_exception(e)
            return
        finally:
            QtWidgets.QApplication.restoreOverrideCursor()

        experiment = rv.ExperimentConfig.from_proto(msg)
        self.experiment = experiment
        ds = experiment.dataset

        self.train_scene_list.clear()
        for scene in ds.train_scenes:
            item = QtWidgets.QListWidgetItem(scene.id, self.train_scene_list)
            item.setFlags(QtCore.Qt.ItemIsUserCheckable
                          | QtCore.Qt.ItemIsEnabled)
            item.setCheckState(QtCore.Qt.Checked)
            self.train_scene_list.addItem(item)

        self.validation_scene_list.clear()
        for scene in ds.validation_scenes:
            item = QtWidgets.QListWidgetItem(scene.id,
                                             self.validation_scene_list)
            item.setFlags(QtCore.Qt.ItemIsUserCheckable
                          | QtCore.Qt.ItemIsEnabled)
            item.setCheckState(QtCore.Qt.Checked)
            self.validation_scene_list.addItem(item)

        self.test_scene_list.clear()
        for scene in ds.test_scenes:
            item = QtWidgets.QListWidgetItem(scene.id, self.test_scene_list)
            item.setFlags(QtCore.Qt.ItemIsUserCheckable
                          | QtCore.Qt.ItemIsEnabled)
            item.setCheckState(QtCore.Qt.Checked)
            self.test_scene_list.addItem(item)
Ejemplo n.º 6
0
 def test_valid(self):
     config = {
         'taskType': 'CHIP_CLASSIFICATION',
         'chipClassificationConfig': {
             'classItems': [{
                 'id': 1,
                 'name': 'car'
             }]
         }
     }
     self.write_config_file(config)
     task = load_json_config(self.file_path, TaskConfigMsg())
     self.assertEqual(task.task_type, rv.CHIP_CLASSIFICATION)
     self.assertEqual(task.chip_classification_config.class_items[0].id, 1)
     self.assertEqual(task.chip_classification_config.class_items[0].name,
                      'car')
     self.assertEqual(len(task.chip_classification_config.class_items), 1)
Ejemplo n.º 7
0
 def run(command_config_uri):
     msg = load_json_config(command_config_uri, CommandConfigMsg())
     CommandRunner.run_from_proto(msg)
Ejemplo n.º 8
0
 def test_bogus_value(self):
     config = {'task': 'bogus_value'}
     self.write_config_file(config)
     with self.assertRaises(ProtobufParseException):
         load_json_config(self.file_path, TaskConfigMsg())
Ejemplo n.º 9
0
    def __init__(self,
                 prediction_package_uri,
                 tmp_dir,
                 update_stats=False,
                 channel_order=None):
        """Creates a new Predictor.

        Args:
            prediction_package_uri: The URI of the prediction package to use.
                Can be any type of URI that Raster Vision can read.
            tmp_dir: Temporary directory in which to store files that are used
                by the Predictor. This directory is not cleaned up by this
                class.
            update_stats: Option indicating if any Analyzers should be run on
                the image to be predicted on. Otherwise, the Predictor will use
                the output of Analyzers that are bundled with the predict
                package. This is useful, for instance, if you are predicting
                against imagery that needs to be normalized with a
                StatsAnalyzer, and the color profile of the new imagery is
                significantly different then the imagery the model was trained
                on.
            channel_order: Option for a new channel order to use for the
                imagery being predicted against. If not present, the
                channel_order from the original configuration in the predict
                package will be used.
        """
        self.tmp_dir = tmp_dir
        self.update_stats = update_stats
        self.model_loaded = False

        package_zip_path = download_if_needed(prediction_package_uri, tmp_dir)
        package_dir = os.path.join(tmp_dir, 'package')
        make_dir(package_dir)
        with zipfile.ZipFile(package_zip_path, 'r') as package_zip:
            package_zip.extractall(path=package_dir)

        # Read bundle command config
        bundle_config_path = os.path.join(package_dir, 'bundle_config.json')
        msg = load_json_config(bundle_config_path, CommandConfigMsg())
        bundle_config = msg.bundle_config

        self.task_config = rv.TaskConfig.from_proto(bundle_config.task) \
                                        .load_bundle_files(package_dir)

        self.backend_config = rv.BackendConfig.from_proto(bundle_config.backend) \
                                              .load_bundle_files(package_dir)

        scene_config = rv.SceneConfig.from_proto(bundle_config.scene)
        scene_builder = scene_config.load_bundle_files(package_dir) \
                                    .to_builder() \
                                    .clear_label_source() \
                                    .clear_aois() \
                                    .with_id('PREDICTOR')

        # If the scene does not have a label store, generate a default one.
        if not scene_config.label_store:
            scene_builder = scene_builder.with_task(self.task_config) \
                                         .with_label_store()

        if channel_order:
            raster_source = scene_builder.config['raster_source'] \
                                         .to_builder() \
                                         .with_channel_order(channel_order) \
                                         .build()
            scene_builder = scene_builder.with_raster_source(raster_source)

        self.scene_config = scene_builder.build()

        self.analyzer_configs = []
        if update_stats:
            for analyzer in bundle_config.analyzers:
                a = rv.AnalyzerConfig.from_proto(analyzer) \
                                     .load_bundle_files(package_dir)
                self.analyzer_configs.append(a)

        self.bundle_config = rv.command.CommandConfig \
                                       .from_proto(msg) \
                                       .to_builder() \
                                       .with_task(self.task_config) \
                                       .with_backend(self.backend_config) \
                                       .with_scene(self.scene_config) \
                                       .with_analyzers(self.analyzer_configs) \
                                       .build()
Ejemplo n.º 10
0
    def run(self):
        self.dlg.show()

        settings = Settings()

        self.dlg.predict_package_edit.setText(settings.get_predict_package())

        self.dlg.style_profile_combobox.clear()
        profiles = settings.get_style_profiles()
        profiles.insert(0, StyleProfile.EMPTY())
        profile_names = list(map(lambda p: p.name, profiles))
        self.dlg.style_profile_combobox.addItems(profile_names)
        settings_profile = settings.get_predict_profile()
        if settings_profile in profile_names:
            self.dlg.style_profile_combobox.setCurrentIndex(
                profile_names.index(settings_profile))
        else:
            self.dlg.style_profile_combobox.setCurrentIndex(0)

        self.dlg.label_uri_edit.setText(settings.get_label_store_uri())

        self.dlg.update_stats_checkbox.setChecked(settings.get_update_stats())

        self.dlg.use_docker_checkbox.setChecked(settings.get_use_docker())
        self.dlg.docker_image_edit.setText(settings.get_docker_image())

        # Load all raster layers
        self.dlg.input_layer_combobox.clear()
        raster_layers = get_raster_layers()
        layer_names = []
        for name in raster_layers:
            layer_names.append(name)
            self.dlg.input_layer_combobox.addItem(name)

        result = self.dlg.exec_()

        if result:
            predict_package = self.dlg.predict_package_edit.text()
            settings.set_predict_package(predict_package)

            layer_name_idx = self.dlg.input_layer_combobox.currentIndex()
            layer_name = layer_names[layer_name_idx]
            layer = raster_layers[layer_name]

            style_profile_index = self.dlg.style_profile_combobox.currentIndex(
            )
            settings.set_predict_profile(profile_names[style_profile_index])

            style_profile = None
            if not style_profile_index == 0:
                style_profile = profiles[style_profile_index]

            label_store_uri = self.dlg.label_uri_edit.text()
            settings.set_label_store_uri(label_store_uri)

            update_stats = self.dlg.update_stats_checkbox.checkState()
            settings.set_update_stats(update_stats)

            use_docker = self.dlg.use_docker_checkbox.checkState()
            settings.set_use_docker(use_docker)
            docker_image = self.dlg.docker_image_edit.text()
            settings.set_docker_image(docker_image)

            prediction_layer_name = '{}-predictions'.format(layer_name)

            with TemporaryDirectory(dir=settings.get_working_dir()) as tmp_dir:
                path = os.path.join(tmp_dir, "{}.tif".format(layer_name))
                export_raster_layer(layer, path)
                if not os.path.exists(path):
                    raise Exception("Writing raster to {} failed".format(path))
                bundle_config_base = 'bundle_config.json'
                bundle_config_path = os.path.join(tmp_dir, bundle_config_base)

                # Grab the predict package locally
                pp_local = download_if_needed(predict_package, tmp_dir)

                if use_docker:
                    pp_dir = os.path.dirname(pp_local)
                    pp_base = os.path.basename(pp_local)
                    gt_base = os.path.basename(path)
                    lb_dir = os.path.dirname(label_store_uri)
                    lb_base = os.path.basename(label_store_uri)

                    cmd = [
                        'docker', 'run', '--rm', '-v',
                        '{}/.rastervision:/root/.rastervision'.format(
                            os.environ['HOME']), '-v',
                        '{}:{}'.format(tmp_dir, '/opt/source'), '-v',
                        '{}:{}'.format(lb_dir, '/opt/output'), '-v',
                        '{}:{}'.format(pp_dir, '/opt/predict_package'),
                        docker_image, 'rastervision', 'predict',
                        '/opt/predict_package/{}'.format(pp_base),
                        '/opt/source/{}'.format(gt_base),
                        '/opt/output/{}'.format(lb_base), '--export-config',
                        '/opt/source/{}'.format(bundle_config_base)
                    ]

                    if update_stats:
                        cmd.append('--update-stats')

                    Log.log_info('Running command: {}'.format(' '.join(cmd)))
                    try:
                        output = check_output(cmd)
                        Log.log_info("[PREDICT OUTPUT]: {}".format(output))
                    except CalledProcessError as e:
                        Log.log_error("Error running {}: {}".format(
                            ' '.join(cmd), e.output))
                        raise e
                else:
                    predictor = Predictor(pp_local,
                                          tmp_dir,
                                          update_stats=update_stats)
                    predictor.predict(path, label_store_uri,
                                      bundle_config_path)

                msg = load_json_config(bundle_config_path, CommandConfigMsg())
                bundle_config = msg.bundle_config
                task_config = rv.TaskConfig.from_proto(bundle_config.task)

                scene_config = rv.SceneConfig.from_proto(bundle_config.scene)
                label_store_config = scene_config.label_store.for_prediction(
                    label_store_uri)

            # Load prediction
            config = label_store_config
            loader = RegistryInstance.get().get_label_store_loader(
                config.store_type)
            ctx = LoadContext(task_config, self.iface, style_profile, None)
            style_file = None
            if ctx.style_profile:
                style_file = ctx.style_profile.prediction_style_file
            loader.load(config, prediction_layer_name, ctx, style_file)