def launch_simplified_mode(tmp_path, models, engine_config): model_name, model_framework, algorithm, preset, _ = SIMPLIFIED_TEST_MODELS[0] algorithm_config = make_algo_config(algorithm, preset) model = models.get(model_name, model_framework, tmp_path) config = merge_configs(model.model_params, engine_config, algorithm_config) _ = optimize(config) output_dir = os.path.join(config.model.exec_log_dir, 'optimized') model = os.path.join(output_dir, config.model.model_name + '.xml') weights = os.path.join(output_dir, config.model.model_name + '.bin') assert os.path.exists(model) assert os.path.exists(weights) paths = [{ 'model': model, 'weights': weights }] config.engine = get_engine_config(model_name) metrics = evaluate( config=config, subset=range(1000), paths=paths) metrics = OrderedDict([(metric.name, np.mean(metric.evaluated_value)) for metric in metrics]) for metric_name, metric_val in metrics.items(): print('{}: {:.4f}'.format(metric_name, metric_val)) return metrics
def test_sparsity_algo(test_models, tmp_path, models): model_name, model_framework, algorithm, sparsity_level, normed_threshold, ref_name = test_models algorithm_config = Dict({ 'algorithms': [{ 'name': algorithm, 'params': { 'sparsity_level': sparsity_level, 'normed_threshold': normed_threshold, } }] }) model = models.get(model_name, model_framework, tmp_path) engine_config = get_engine_config(model_name) config = merge_configs(model.model_params, engine_config, algorithm_config) config.engine.evaluate = False config.engine.type = 'accuracy_checker' _ = optimize(config) output_dir = os.path.join(config.model.exec_log_dir, 'optimized') xml_path = os.path.join(output_dir, config.model.model_name + '.xml') bin_path = os.path.join(output_dir, config.model.model_name + '.bin') output_model, meta = stdout_redirect(restore_graph_from_ir, xml_path, bin_path) output_model.meta_data = meta assert check_sparsity_level(NXModel(graph=output_model), config, sparsity_level) check_graph(tmp_path, output_model, model_name + ref_name, model_framework, check_weights=True)
def test_compression(_params, tmp_path, models): model_name, model_framework, algorithm, preset, subset_size, expected_accuracy, additional_params, device = _params algorithm_config = make_algo_config(algorithm, preset, subset_size, additional_params, device) if model_name in CASCADE_MAP: model = models.get_cascade(model_name, model_framework, tmp_path, CASCADE_MAP[model_name]) else: model = models.get(model_name, model_framework, tmp_path) engine_config = get_engine_config(model_name) config = merge_configs(model.model_params, engine_config, algorithm_config) if model_name in CASCADE_MAP: config.engine.evaluations[0].module_config.datasets[ 0].subsample_size = 10 else: config.engine.models[0].datasets[0].subsample_size = 1000 metrics = optimize(config) output_dir = os.path.join(config.model.exec_log_dir, 'optimized') for metric_name in metrics: print('{}: {:.4f}'.format(metric_name, metrics[metric_name])) assert metrics == pytest.approx(expected_accuracy, abs=0.006) if model_name in CASCADE_MAP: for token in CASCADE_MAP.model_name.model_tokens: assert os.path.exists( os.path.join( output_dir, '{}_{}.xml'.format(config.model.model_name, token))) assert os.path.exists( os.path.join( output_dir, '{}_{}.bin'.format(config.model.model_name, token))) else: assert os.path.exists( os.path.join(output_dir, config.model.model_name + '.xml')) assert os.path.exists( os.path.join(output_dir, config.model.model_name + '.bin')) if device == 'GNA' and algorithm == 'AccuracyAwareQuantization': quantized_model_params = deepcopy(model.model_params) quantized_model_params['model'] = os.path.join( output_dir, config.model.model_name + '.xml') quantized_model_params['weights'] = os.path.join( output_dir, config.model.model_name + '.bin') quantized_model = load_model(quantized_model_params) check_model(tmp_path, quantized_model, model_name + '_gna_aa', model_framework)
def test_multiport_outputs_model(tmp_path, models, model_name, model_framework): test_dir = Path(__file__).parent # one image as dataset data_source = (test_dir / 'data/image_data/').as_posix() engine_config = Dict({'type': 'simplified', 'data_source': data_source, 'device': 'CPU'}) model = models.get(model_name, model_framework, tmp_path) algorithm_config = make_algo_config('MinMaxQuantization', 'performance') config = merge_configs(model.model_params, engine_config, algorithm_config) _ = optimize(config)
def test_sparsity(test_models, tmp_path, models): model_name, model_framework, algorithm, sparsity_level, normed_threshold, expected_accuracy = test_models algorithm_config = Dict({ 'algorithms': [{ 'name': algorithm, 'params': { 'sparsity_level': sparsity_level, 'normed_threshold': normed_threshold, } }] }) if algorithm == 'WeightSparsity': bias_config = Dict({'target_device': 'CPU', 'stat_subset_size': 300}) algorithm_config['algorithms'][0]['params'].update(bias_config) model = models.get(model_name, model_framework, tmp_path) engine_config = get_engine_config(model_name) config = merge_configs(model.model_params, engine_config, algorithm_config) config.engine.models[0].datasets[0].subsample_size = 1000 metrics = optimize(config) output_dir = os.path.join(config.model.exec_log_dir, 'optimized') for metric_name in metrics: print('{}: {:.4f}'.format(metric_name, metrics[metric_name])) assert metrics == pytest.approx(expected_accuracy, abs=0.006) xml_path = os.path.join(output_dir, config.model.model_name + '.xml') bin_path = os.path.join(output_dir, config.model.model_name + '.bin') assert os.path.exists(xml_path) assert os.path.exists(bin_path) # Check resulting sparsity level model, _ = stdout_redirect(restore_graph_from_ir, xml_path, bin_path) assert check_sparsity_level(CompressedModel(graph=model), config, sparsity_level)
def run_algo(config, model_name, model_framework, metrics, expected_result, tmp_path): result = optimize(config) metrics.update(result) write_results_to_xlsx(model_name, model_framework, result, expected_result, tmp_path)