lr=0.01, explainer_hidden=[10], conceptizator='identity_bool') start = time.time() trainer.fit(model, train_loader, val_loader) print(f"Concept mask: {model.model[0].concept_mask}") model.freeze() model_results = trainer.test(model, test_dataloaders=test_loader) for j in range(n_classes): n_used_concepts = sum(model.model[0].concept_mask[j] > 0.5) print(f"Extracted concepts: {n_used_concepts}") results, f = model.explain_class(val_loader, val_loader, test_loader, topk_explanations=5, x_to_bool=None, max_accuracy=True, concept_names=concept_names) end = time.time() - start results['model_accuracy'] = model_results[0]['test_acc'] results['extraction_time'] = end results_list.append(results) extracted_concepts = [] all_concepts = model.model[0].concept_mask[0] > 0.5 common_concepts = model.model[0].concept_mask[0] > 0.5 for j in range(n_classes): n_used_concepts = sum(model.model[0].concept_mask[j] > 0.5) print(f"Extracted concepts: {n_used_concepts}") print(f"Explanation: {f[j]['explanation']}")
lr=0.01, explainer_hidden=[20, 20], temperature=tau, l1=l1) start = time.time() trainer.fit(model, train_loader, val_loader) print(f"Gamma: {model.model[0].concept_mask}") model.freeze() model_results = trainer.test(model, test_dataloaders=test_loader) for j in range(n_classes): n_used_concepts = sum(model.model[0].concept_mask[j] > 0.5) print(f"Extracted concepts: {n_used_concepts}") results, f = model.explain_class(val_loader, train_loader, test_loader, topk_explanations=10, concept_names=concept_names) end = time.time() - start results['model_accuracy'] = model_results[0]['test_acc'] results['extraction_time'] = end results['tau'] = tau results['lambda'] = l1 results_list.append(results) results_df = pd.DataFrame(results_list) results_df.to_csv( os.path.join(base_dir, f'results_aware_vdem_l_{l1}_tau_{tau}.csv'))
l1=0.0001, temperature=0.7, lr=0.01, explainer_hidden=[10]) start = time.time() trainer.fit(model, train_loader, val_loader) print(f"Concept mask: {model.model[0].concept_mask}") model.freeze() model_results = trainer.test(model, test_dataloaders=test_loader) for j in range(n_classes): n_used_concepts = sum(model.model[0].concept_mask[j] > 0.5) print(f"Extracted concepts: {n_used_concepts}") results, f = model.explain_class(val_loader, train_loader, test_loader, topk_explanations=50, concept_names=concept_names, verbose=True) end = time.time() - start results['model_accuracy'] = model_results[0]['test_acc'] results['extraction_time'] = end results_list.append(results) extracted_concepts = [] all_concepts = model.model[0].concept_mask[0] > 0.5 common_concepts = model.model[0].concept_mask[0] > 0.5 for j in range(n_classes): n_used_concepts = sum(model.model[0].concept_mask[j] > 0.5) print(f"Extracted concepts: {n_used_concepts}") print(f"Explanation: {f[j]['explanation']}") print(f"Explanation accuracy: {f[j]['explanation_accuracy']}")