def automatic_model_search(self, network_info):
        model = Path(network_info['model'])
        if model.is_dir():
            is_blob = network_info.get('_model_is_blob')
            if is_blob:
                model_list = list(model.glob('*{}.blob'.format(self.default_model_suffix)))
                if not model_list:
                    model_list = list(model.glob('*.blob'))
            else:
                model_list = list(model.glob('*{}.xml'.format(self.default_model_suffix)))
                blob_list = list(model.glob('*{}.blob'.format(self.default_model_suffix)))
                if not model_list and not blob_list:
                    model_list = list(model.glob('*.xml'.format(self.default_model_suffix)))
                    blob_list = list(model.glob('*.blob'.format(self.default_model_suffix)))
                    if not model_list:
                        model_list = blob_list
            if not model_list:
                raise ConfigError('Suitable model for {} not found'.format(self.default_model_suffix))
            if len(model_list) > 1:
                raise ConfigError('Several suitable models for {} found'.format(self.default_model_suffix))
            model = model_list[0]
        if model.suffix == '.blob':
            return model, None
        weights = network_info.get('weights', model.parent / model.name.replace('xml', 'bin'))

        return model, weights
 def auto_model_search(self, network_info):
     model = Path(network_info.get('model', ''))
     weights = network_info.get('weights')
     if model.is_dir():
         models_list = list(
             Path(model).glob('{}.xml'.format(self.default_model_name)))
         if not models_list:
             models_list = list(Path(model).glob('*.xml'))
         if not models_list:
             raise ConfigError('Suitable model description is not detected')
         if len(models_list) != 1:
             raise ConfigError(
                 'Several suitable models found, please specify required model'
             )
         model = models_list[0]
         if weights is None or Path(weights).is_dir():
             weights_dir = weights or model.parent
             weights = Path(weights_dir) / model.name.replace('xml', 'bin')
             if not weights.exists():
                 weights_list = list(weights_dir.glob('*.bin'))
                 if not weights_list:
                     raise ConfigError('Suitable weights is not detected')
                 if len(weights_list) != 1:
                     raise ConfigError(
                         'Several suitable weights found, please specify required explicitly'
                     )
                 weights = weights_list[0]
         weights = Path(weights)
     return model, weights
def build_stages(models_info, preprocessors_config, launcher, model_args):
    def merge_preprocessing(model_specific, common_preprocessing):
        if model_specific:
            model_specific.extend(common_preprocessing)
            return model_specific
        return common_preprocessing

    required_stages = ['pnet']
    stages_mapping = OrderedDict([('pnet', {
        'caffe': CaffeProposalStage,
        'dlsdk': DLSDKProposalStage,
        'dummy': DummyProposalStage
    }), ('rnet', {
        'caffe': CaffeRefineStage,
        'dlsdk': DLSDKRefineStage
    }), ('onet', {
        'caffe': CaffeOutputStage,
        'dlsdk': DLSDKOutputStage
    })])
    framework = launcher.config['framework']
    stages = []
    for stage_name, stage_classes in stages_mapping.items():
        if stage_name not in models_info:
            if stage_name not in required_stages:
                continue
            else:
                raise ConfigError(
                    '{} required for evaluation'.format(stage_name))
        model_config = models_info[stage_name]
        if 'predictions' in model_config and not model_config.get(
                'store_predictions', False):
            stage_framework = 'dummy'
        else:
            stage_framework = framework
        if not contains_any(model_config, ['model', 'caffe_model'
                                           ]) and stage_framework != 'dummy':
            if model_args:
                model_config['model'] = model_args[
                    len(stages) if len(model_args) > 1 else 0]
        stage = stage_classes.get(stage_framework)
        if not stage_classes:
            raise ConfigError('{} stage does not support {} framework'.format(
                stage_name, stage_framework))
        stage_preprocess = merge_preprocessing(
            models_info[stage_name].get('preprocessing', []),
            preprocessors_config)
        preprocessor = PreprocessingExecutor(stage_preprocess)
        stages.append(stage(models_info[stage_name], preprocessor, launcher))

    if not stages:
        raise ConfigError(
            'please provide information about MTCNN pipeline stages')

    return stages
Example #4
0
    def from_configs(cls, config):
        dataset_config = config['datasets'][0]
        dataset = Dataset(dataset_config)
        data_reader_config = dataset_config.get('reader', 'opencv_imread')
        data_source = dataset_config['data_source']
        if isinstance(data_reader_config, str):
            data_reader_type = data_reader_config
            data_reader_config = None
        elif isinstance(data_reader_config, dict):
            data_reader_type = data_reader_config['type']
        else:
            raise ConfigError('reader should be dict or string')
        if data_reader_type in REQUIRES_ANNOTATIONS:
            data_source = dataset.annotation
        data_reader = BaseReader.provide(data_reader_type, data_source,
                                         data_reader_config)
        models_info = config['network_info']
        launcher_config = config['launchers'][0]
        launcher = create_launcher(launcher_config, delayed_model_loading=True)
        preprocessors_config = dataset_config.get('preprocessing', [])
        stages = build_stages(models_info, preprocessors_config, launcher)
        metrics_executor = MetricsExecutor(dataset_config['metrics'], dataset)
        postprocessing = PostprocessingExecutor(
            dataset_config['postprocessing'])

        return cls(dataset, data_reader, stages, postprocessing,
                   metrics_executor)
Example #5
0
 def __init__(self, model_info, preprocessor, *args, **kwargs):
     super().__init__(model_info, preprocessor)
     self._index = 0
     if 'predictions' not in self.model_info:
         raise ConfigError('predictions_file is not found')
     self._predictions = read_pickle(self.model_info['predictions'])
     self.iterator = 0
 def __init__(self, network_info, launcher, models_args, is_blob=None):
     detector = network_info.get('detector', {})
     recognizer_encoder = network_info.get('recognizer_encoder', {})
     recognizer_decoder = network_info.get('recognizer_decoder', {})
     if 'model' not in detector:
         detector['model'] = models_args[0]
         detector['_model_is_blob'] = is_blob
     if 'model' not in recognizer_encoder:
         recognizer_encoder['model'] = models_args[1 if len(models_args) > 1 else 0]
         recognizer_encoder['_model_is_blob'] = is_blob
     if 'model' not in recognizer_decoder:
         recognizer_decoder['model'] = models_args[2 if len(models_args) > 2 else 0]
         recognizer_decoder['_model_is_blob'] = is_blob
     network_info.update({
         'detector': detector,
         'recognizer_encoder': recognizer_encoder,
         'recognizer_decoder': recognizer_decoder
     })
     if not contains_all(network_info, ['detector', 'recognizer_encoder', 'recognizer_decoder']):
         raise ConfigError('network_info should contains detector, encoder and decoder fields')
     self.detector = create_detector(network_info['detector'], launcher)
     self.recognizer_encoder = create_recognizer(network_info['recognizer_encoder'], launcher, 'encoder')
     self.recognizer_decoder = create_recognizer(network_info['recognizer_decoder'], launcher, 'decoder')
     self.recognizer_decoder_inputs = network_info['recognizer_decoder_inputs']
     self.recognizer_decoder_outputs = network_info['recognizer_decoder_outputs']
     self.max_seq_len = int(network_info['max_seq_len'])
     self.adapter = create_adapter(network_info['adapter'])
     self.alphabet = network_info['alphabet']
     self.sos_index = int(network_info['sos_index'])
     self.eos_index = int(network_info['eos_index'])
Example #7
0
    def auto_model_search(network_info):
        model = Path(network_info['model'])
        is_blob = network_info.get('_model_is_blob')
        if model.is_dir():
            if is_blob:
                model_list = list(model.glob('*.blob'))
            else:
                model_list = list(model.glob('*.xml'))
                if not model_list and is_blob is None:
                    model_list = list(model.glob('*.blob'))
            if not model_list:
                raise ConfigError('Suitable model not found')
            if len(model_list) > 1:
                raise ConfigError('Several suitable models found')
            model = model_list[0]
        if model.suffix == '.blob':
            return model, None
        weights = network_info.get('weights', model.parent / model.name.replace('xml', 'bin'))

        return model, weights
Example #8
0
    def from_configs(cls, config):
        dataset_config = config['datasets'][0]
        dataset = Dataset(dataset_config)
        data_reader_config = dataset_config.get('reader', 'opencv_imread')
        data_source = dataset_config['data_source']
        if isinstance(data_reader_config, str):
            reader = BaseReader.provide(data_reader_config, data_source)
        elif isinstance(data_reader_config, dict):
            reader = BaseReader.provide(data_reader_config['type'], data_source, data_reader_config)
        else:
            raise ConfigError('reader should be dict or string')
        preprocessing = PreprocessingExecutor(dataset_config.get('preprocessing', []), dataset.name)
        metrics_executor = MetricsExecutor(dataset_config['metrics'], dataset)
        launcher_settings = config['launchers'][0]
        supported_frameworks = ['dlsdk']
        if not launcher_settings['framework'] in supported_frameworks:
            raise ConfigError('{} framework not supported'.format(launcher_settings['framework']))
        launcher = create_launcher(launcher_settings, delayed_model_loading=True)
        network_info = config.get('network_info', {})
        colorization_network = network_info.get('colorization_network', {})
        verification_network = network_info.get('verification_network', {})
        model_args = config.get('_models', [])
        models_is_blob = config.get('_model_is_blob')
        if 'model' not in colorization_network and model_args:
            colorization_network['model'] = model_args[0]
            colorization_network['_model_is_blob'] = models_is_blob
        if 'model' not in verification_network and model_args:
            verification_network['model'] = model_args[1 if len(model_args) > 1 else 0]
            verification_network['_model_is_blob'] = models_is_blob
        network_info.update({
            'colorization_network': colorization_network,
            'verification_network': verification_network
        })
        if not contains_all(network_info, ['colorization_network', 'verification_network']):
            raise ConfigError('configuration for colorization_network/verification_network does not exist')

        test_model = ColorizationTestModel(network_info['colorization_network'], launcher)
        check_model = ColorizationCheckModel(network_info['verification_network'], launcher)
        return cls(dataset, reader, preprocessing, metrics_executor, launcher, test_model, check_model)
Example #9
0
 def __init__(self, network_info, launcher):
     super().__init__(network_info, launcher)
     if not contains_all(network_info, ['encoder', 'decoder']):
         raise ConfigError(
             'network_info should contains encoder and decoder fields')
     self.num_processing_frames = network_info['decoder'].get(
         'num_processing_frames', 16)
     self.processing_frames_buffer = []
     self.encoder = create_encoder(network_info['encoder'], launcher)
     self.decoder = create_decoder(network_info['decoder'], launcher)
     self.store_encoder_predictions = network_info['encoder'].get(
         'store_predictions', False)
     self._encoder_predictions = [] if self.store_encoder_predictions else None
 def from_configs(cls, config):
     dataset_config = config['datasets'][0]
     dataset = Dataset(dataset_config)
     data_reader_config = dataset_config.get('reader', 'opencv_imread')
     data_source = dataset_config['data_source']
     if isinstance(data_reader_config, str):
         reader = BaseReader.provide(data_reader_config, data_source)
     elif isinstance(data_reader_config, dict):
         reader = BaseReader.provide(data_reader_config['type'], data_source, data_reader_config)
     else:
         raise ConfigError('reader should be dict or string')
     preprocessing = PreprocessingExecutor(dataset_config.get('preprocessing', []), dataset.name)
     metrics_executor = MetricsExecutor(dataset_config['metrics'], dataset)
     launcher = create_launcher(config['launchers'][0], delayed_model_loading=True)
     model = SequentialModel(
         config.get('network_info', {}), launcher, config.get('_models', []), config.get('_model_is_blob')
     )
     return cls(dataset, reader, preprocessing, metrics_executor, launcher, model)
Example #11
0
 def __init__(self, network_info, launcher):
     super().__init__(network_info, launcher)
     if not contains_all(
             network_info,
         ['detector', 'recognizer_encoder', 'recognizer_decoder']):
         raise ConfigError(
             'network_info should contains detector, encoder and decoder fields'
         )
     self.detector = create_detector(network_info['detector'], launcher)
     self.recognizer_encoder = create_recognizer(
         network_info['recognizer_encoder'], launcher)
     self.recognizer_decoder = create_recognizer(
         network_info['recognizer_decoder'], launcher)
     self.recognizer_decoder_inputs = network_info[
         'recognizer_decoder_inputs']
     self.recognizer_decoder_outputs = network_info[
         'recognizer_decoder_outputs']
     self.max_seq_len = int(network_info['max_seq_len'])
     self.adapter = create_adapter(network_info['adapter'])
     self.alphabet = network_info['alphabet']
     self.sos_index = int(network_info['sos_index'])
     self.eos_index = int(network_info['eos_index'])
Example #12
0
 def __init__(self, network_info, launcher):
     super().__init__(network_info, launcher)
     if 'predictions' not in network_info:
         raise ConfigError('predictions_file is not found')
     self._predictions = read_pickle(network_info['predictions'])
     self.iterator = 0