def save(self, save_path, model_name=None, for_stat_collection=False): """ Save model as IR in specified path :param save_path: path to save the model :param model_name: name under which the model will be saved :param for_stat_collection: whether model is saved to be used for statistic collection or for normal inference (affects only cascaded models). If set to False, removes model prefixes from node names. :return model_paths: list of dictionaries: 'name': model name (for cascade models only) 'model': path to xml 'weights': path to bin """ if not for_stat_collection: self._remove_models_prefix() name = model_name model_paths = [] for model_dict in self._models: model_path = {} if self._is_cascade: m_name = model_dict['name'] name = '{}_{}'.format(model_name, m_name) if model_name else m_name model_path['name'] = m_name if not name: name = model_dict['model'].name model_path['model'] = os.path.join(save_path, name + '.xml') model_path['weights'] = os.path.join(save_path, name + '.bin') model_paths.append(model_path) stdout_redirect(save_graph, model_dict['model'], save_path, name) if not for_stat_collection: self._restore_models_prefix() return model_paths
def test_sparsity_algo(test_models, tmp_path, models): model_name, model_framework, algorithm, sparsity_level, normed_threshold, ref_name = test_models algorithm_config = Dict({ 'algorithms': [{ 'name': algorithm, 'params': { 'sparsity_level': sparsity_level, 'normed_threshold': normed_threshold, } }] }) model = models.get(model_name, model_framework, tmp_path) engine_config = get_engine_config(model_name) config = merge_configs(model.model_params, engine_config, algorithm_config) config.engine.evaluate = False config.engine.type = 'accuracy_checker' _ = optimize(config) output_dir = os.path.join(config.model.exec_log_dir, 'optimized') xml_path = os.path.join(output_dir, config.model.model_name + '.xml') bin_path = os.path.join(output_dir, config.model.model_name + '.bin') output_model, meta = stdout_redirect(restore_graph_from_ir, xml_path, bin_path) output_model.meta_data = meta assert check_sparsity_level(NXModel(graph=output_model), config, sparsity_level) check_graph(tmp_path, output_model, model_name + ref_name, model_framework, check_weights=True)
def test_sparsity(test_models, tmp_path, models): model_name, model_framework, algorithm, sparsity_level, normed_threshold, expected_accuracy = test_models algorithm_config = Dict({ 'algorithms': [{ 'name': algorithm, 'params': { 'sparsity_level': sparsity_level, 'normed_threshold': normed_threshold, } }] }) if algorithm == 'WeightSparsity': bias_config = Dict({'target_device': 'CPU', 'stat_subset_size': 300}) algorithm_config['algorithms'][0]['params'].update(bias_config) model = models.get(model_name, model_framework, tmp_path) engine_config = get_engine_config(model_name) config = merge_configs(model.model_params, engine_config, algorithm_config) config.engine.models[0].datasets[0].subsample_size = 1000 metrics = optimize(config) output_dir = os.path.join(config.model.exec_log_dir, 'optimized') for metric_name in metrics: print('{}: {:.4f}'.format(metric_name, metrics[metric_name])) assert metrics == pytest.approx(expected_accuracy, abs=0.006) xml_path = os.path.join(output_dir, config.model.model_name + '.xml') bin_path = os.path.join(output_dir, config.model.model_name + '.bin') assert os.path.exists(xml_path) assert os.path.exists(bin_path) # Check resulting sparsity level model, _ = stdout_redirect(restore_graph_from_ir, xml_path, bin_path) assert check_sparsity_level(CompressedModel(graph=model), config, sparsity_level)