Пример #1
0
 def _construct_samples_dct(self):
     logger.info('Will collect samples (img/ann pairs).')
     self.name_to_tag = self.config['dataset_tags']
     project_fs = ProjectFS.from_disk_dir_project(self.helper.paths.project_dir)
     logger.info('Project structure has been read. Samples: {}.'.format(project_fs.pr_structure.image_cnt))
     self.samples_dct = nn_data.samples_by_tags(tags=list(self.name_to_tag.values()), project_fs=project_fs,
                                                project_meta=self.helper.in_project_meta)
Пример #2
0
    def _load_train_config(self):
        train_config_rw = JsonConfigRW(self.paths.model_config_fpath)
        if not train_config_rw.config_exists:
            raise RuntimeError(
                'Unable to run inference, config from training wasn\'t found.')
        self.train_config = train_config_rw.load()

        self.class_title_to_idx = self.train_config[
            self.class_title_to_idx_key]
        self.train_classes = FigClasses(
            self.train_config[self.train_classes_key])
        logger.info('Read model internal class mapping',
                    extra={'class_mapping': self.class_title_to_idx})
        logger.info('Read model out classes',
                    extra={'classes': self.train_classes.py_container})

        # Make a separate [class title] --> [index] map that excludes the 'special' classes that should not be in the`
        # final output.
        train_class_titles = set(train_class['title']
                                 for train_class in self.train_classes)
        self.out_class_mapping = {
            title: idx
            for title, idx in self.class_title_to_idx.items()
            if title in train_class_titles
        }
Пример #3
0
 def load_settings(self):
     self.settings = deepcopy(ModelDeploy.settings)
     new_settings = json_utils.json_load(
         task_paths.TaskPaths(determine_in_project=False).settings_path)
     logger.info('Input settings', extra={'settings': new_settings})
     config_readers.update_recursively(self.settings, new_settings)
     logger.info('Full settings', extra={'settings': self.settings})
Пример #4
0
 def _determine_settings(self):
     input_config = self.helper.task_settings
     logger.info('Input config', extra={'config': input_config})
     config = deepcopy(self.default_settings)
     config_readers.update_recursively(config, input_config)
     logger.info('Full config', extra={'config': config})
     SettingsValidator.validate_train_cfg(config)
     self.config = config
Пример #5
0
    def __init__(self, default_settings={}):
        logger.info('Will init all required to evaluation.')
        self.helper = task_helpers.TaskHelperMetrics()

        self.default_settings = deepcopy(default_settings)
        self._determine_settings()
        self._determine_input_data()
        self._check_project_meta()
Пример #6
0
 def __init__(self, settings=None):
     logger.info('Starting base single image inference applier init.')
     settings = settings or {}
     self.settings = config_readers.update_recursively(
         deepcopy(self.get_default_settings()), settings)
     self.paths = TaskPaths(determine_in_project=False)
     self._load_train_config()
     self._construct_and_fill_model()
     logger.info('Base single image inference applier init done.')
Пример #7
0
 def _determine_model_input_size(self):
     src_size = self.train_config['settings']['input_size']
     self.input_size_wh = (src_size['width'], src_size['height'])
     logger.info('Model input size is read (for auto-rescale).',
                 extra={
                     'input_size': {
                         'width': self.input_size_wh[0],
                         'height': self.input_size_wh[1]
                     }
                 })
Пример #8
0
def main():
    task_helpers.task_verification(check_in_graph)

    logger.info('DTL started')
    helper = DtlHelper()

    try:
        net = Net(helper.graph, helper.in_project_metas,
                  helper.paths.results_dir)
        helper.save_res_meta(net.get_result_project_meta())
        datasets_conflict_map = calculate_datasets_conflict_map(helper)
    except Exception as e:
        logger.error("Error occurred on DTL-graph initialization step!")
        raise e

    # is_archive = net.is_archive()
    results_counter = 0
    for pr_name, pr_dir in helper.in_project_dirs.items():
        project = sly.Project(directory=pr_dir, mode=sly.OpenMode.READ)
        progress = progress_counter.progress_counter_dtl(
            pr_name, project.total_items)
        for dataset in project:
            for item_name in dataset:
                try:
                    img_desc = ImageDescriptor(
                        make_legacy_project_item(project, dataset, item_name),
                        datasets_conflict_map[project.name][dataset.name])
                    ann = json_utils.json_load(dataset.get_ann_path(item_name))
                    data_el = (img_desc, ann)
                    export_output_generator = net.start(data_el)
                    for res_export in export_output_generator:
                        logger.trace("image processed",
                                     extra={
                                         'img_name':
                                         res_export[0][0].get_img_name()
                                     })
                        results_counter += 1
                except Exception as e:
                    extra = {
                        'project_name': project.name,
                        'ds_name': dataset.name,
                        'image_name': item_name,
                        'exc_str': str(e),
                    }
                    logger.warn(
                        'Image was skipped because some error occurred',
                        exc_info=True,
                        extra=extra)
                progress.iter_done_report()

    logger.info('DTL finished',
                extra={
                    'event_type': EventType.DTL_APPLIED,
                    'new_proj_size': results_counter
                })
Пример #9
0
 def _determine_model_classes_detection(self):
     in_project_classes = self.helper.in_project_meta.classes
     self.out_classes = nn_data.make_out_classes(in_project_classes, shape='rectangle')
     logger.info('Determined model out classes', extra={'classes': self.out_classes.py_container})
     in_project_class_to_idx = nn_data.make_new_class_to_idx_map(in_project_classes,
                                                                 start_class_id=self.get_start_class_id())
     self.class_title_to_idx = nn_data.infer_training_class_to_idx_map(
         self.config['weights_init_type'],
         in_project_class_to_idx,
         self.helper.paths.model_config_fpath,
         class_to_idx_config_key=self.class_title_to_idx_key)
     logger.info('Determined class mapping.', extra={'class_mapping': self.class_title_to_idx})
Пример #10
0
    def _determine_settings(self):
        input_config = self.helper.task_settings
        logger.info('Input config', extra={'config': input_config})

        config = deepcopy(self.default_settings)

        config_readers.update_recursively(config, input_config)
        logger.info('Full config', extra={'config': config})
        if len(config['classes_mapping']) < 1:
            raise RuntimeError('At least one classes pair should be defined')
        self.classes_mapping = config['classes_mapping']
        self.config = config
Пример #11
0
    def __init__(self, default_settings):
        logger.info('Will init all required to train.')
        self.helper = task_helpers.TaskHelperTrain()

        self.default_settings = deepcopy(default_settings)
        self._determine_settings()
        self._determine_model_classes()
        self._determine_out_config()
        self._construct_samples_dct()
        self._construct_data_loaders()
        self._construct_and_fill_model()
        self._construct_loss()

        self.epoch_flt = 0
Пример #12
0
def main():
    task_helpers.task_verification(check_in_graph)

    logger.info('DTL started')
    helper = DtlHelper()
    net = Net(helper.graph, helper.in_project_metas, helper.paths.results_dir)
    helper.save_res_meta(net.get_result_project_meta())
    datasets_conflict_map = calculate_datasets_conflict_map(helper)

    # is_archive = net.is_archive()
    results_counter = 0
    for pr_name, pr_dir in helper.in_project_dirs.items():

        root_path, project_name = ProjectFS.split_dir_project(pr_dir)
        project_fs = ProjectFS.from_disk(root_path,
                                         project_name,
                                         by_annotations=True)
        progress = progress_counter.progress_counter_dtl(
            pr_name, project_fs.image_cnt)

        for sample in project_fs:
            try:
                img_desc = ImageDescriptor(
                    sample, datasets_conflict_map[pr_name][sample.ds_name])
                ann = json_utils.json_load(sample.ann_path)
                data_el = (img_desc, ann)
                export_output_generator = net.start(data_el)
                for res_export in export_output_generator:
                    logger.trace(
                        "image processed",
                        extra={'img_name': res_export[0][0].get_img_name()})
                    results_counter += 1
            except Exception as e:
                extra = {
                    'project_name': sample.project_name,
                    'ds_name': sample.ds_name,
                    'image_name': sample.image_name,
                    'exc_str': str(e),
                }
                logger.warn('Image was skipped because some error occurred',
                            exc_info=True,
                            extra=extra)
            progress.iter_done_report()

    logger.info('DTL finished',
                extra={
                    'event_type': EventType.DTL_APPLIED,
                    'new_proj_size': results_counter
                })
Пример #13
0
    def __init__(self,
                 single_image_inference: SingleImageInferenceApplier,
                 default_mode_settings,
                 default_settings_for_modes=None,
                 settings_validator_cls=AlwaysPassingValidator):
        self.single_image_inference = single_image_inference
        self.default_settings = config_readers.update_recursively(
            deepcopy(default_mode_settings),
            single_image_inference.get_default_settings())
        self.default_settings_for_modes = deepcopy(
            default_settings_for_modes or get_default_settings_for_modes())
        self.settings_validator_cls = settings_validator_cls

        self.helper = task_helpers.TaskHelperInference()
        self._determine_settings()
        self._determine_input_data()
        logger.info('Dataset inference preparation done.')
Пример #14
0
    def _determine_settings(self):
        input_config = self.helper.task_settings
        logger.info('Input config', extra={'config': input_config})

        self.config = deepcopy(self.default_settings)
        if 'mode' in input_config and 'source' in input_config['mode']:
            mode_name = input_config['mode']['source']
            default_mode_settings = self.default_settings_for_modes.get(
                mode_name, None)
            if default_mode_settings is not None:
                config_readers.update_recursively(
                    self.config['mode'], deepcopy(default_mode_settings))

                config_readers.update_recursively(self.config, input_config)
        logger.info('Full config', extra={'config': self.config})
        self.settings_validator_cls.validate_inference_cfg(self.config)

        self.debug_copy_images = os.getenv('DEBUG_COPY_IMAGES') is not None
Пример #15
0
    def _determine_input_data(self):
        project_1 = self.config['project_1']
        project_2 = self.config.get('project_2')
        if project_2 is None:
            project_2 = project_1

        data_dir = os.path.join(self.helper.paths.task_dir, 'data')
        for pr in [project_1, project_2]:
            if not os.path.exists(os.path.join(data_dir, pr)):
                raise RuntimeError('Project {} does not exist.'.format(pr))

        self.in_project_fs_1 = ProjectFS.from_disk(data_dir, project_1)
        self.in_project_fs_2 = ProjectFS.from_disk(data_dir, project_2)
        if self.in_project_fs_1.image_cnt != self.in_project_fs_2.image_cnt:
            raise RuntimeError(
                'Projects should contain same number of samples.')
        logger.info('Projects structure has been read. Samples: {}.'.format(
            self.in_project_fs_1.image_cnt))

        self.in_pr_meta_1 = ProjectMeta.from_dir(
            os.path.join(data_dir, project_1))
        self.in_pr_meta_2 = ProjectMeta.from_dir(
            os.path.join(data_dir, project_2))
Пример #16
0
 def _determine_input_data(self):
     project_fs = ProjectFS.from_disk_dir_project(
         self.helper.paths.project_dir)
     logger.info('Project structure has been read. Samples: {}.'.format(
         project_fs.pr_structure.image_cnt))
     self.in_project_fs = project_fs