def plot(model_name, dataset, datasize): exp = "1" wd = defaultdict(list) rd = defaultdict(list) path_manager = PathManager(model_name, dataset) for expl_name in AbsExp.get_method_names(): result_file = path_manager.result_json_path( exp, expl_name, datasize) if os.path.isfile(result_file): with open(result_file) as f: results = json.loads(f.readlines()[0]) for result in results.values(): wd[expl_name].append(result["CoreParameter"]) rd[expl_name].append(0 if result["Polytope"] == 0 else 1) pdf_name = path_manager.figure_path(datasize, exp) pp = PdfPages(pdf_name) values, names = [], [] logger.info("WD {}".format({i: np.mean(j) for i, j in wd.items()})) for i, j in sorted(wd.items(), key=Plot.sort_key, reverse=True): values.append(j) names.append(i) Plot.plot_box(values, names, pp, model_name, "WD", False, True) logger.info("RD {}".format({i: np.mean(j) for i, j in rd.items()})) values, names = [], [] for i, j in sorted(rd.items(), key=Plot.sort_key, reverse=True): values.append(j) names.append(i) Plot.plot_scatter(values, names, pp, model_name, "RD") pp.close()
def main(): parser = argparse.ArgumentParser() parser.add_argument("--mdl", help="Model Name", choices=["LMT", "MLP"], required=True) parser.add_argument("--dataset", help="data. E.g. FMNIST", choices=["FMNIST", "MNIST"], required=True) parser.add_argument("--explainer", help="name of explainer", choices=AbsExp.get_method_names()) parser.add_argument("--datasize", help="number of test images per class", required=True) parser.add_argument("--gpu", help="GPU used", choices=["cuda:1", "cuda:0", "cuda:2", "cuda:3"]) parsedArgs = parser.parse_args(sys.argv[1:]) model_name = parsedArgs.mdl dataset = parsedArgs.dataset expln_name = parsedArgs.explainer datasize = int(parsedArgs.datasize) config.DEVICE = parsedArgs.gpu exp = Exp5(model_name, dataset, expln_name, datasize) exp.run() logger.info("Finish")
def main(): parser = argparse.ArgumentParser() parser.add_argument("--mdl", help="Model Name", choices=["LMT", "MLP"], required=True) parser.add_argument("--dataset", help="data. E.g. FMNIST", choices=["FMNIST", "MNIST"], required=True) parser.add_argument("--datasize", help="number of test images per class", required=True) parser.add_argument("--explainer", help="name of explainer", choices=AbsExp.get_method_names()) parser.add_argument("--gpu", help="GPU used", choices=["cuda:1", "cuda:0", "cuda:2", "cuda:3"]) parser.add_argument("--task", help="GPU used", choices=["plot", "compute"], required=True) parser.add_argument("--feature", help="Feature type", choices=AbsExp.get_attribute_method_names()) parsedArgs = parser.parse_args(sys.argv[1:]) model_name = parsedArgs.mdl dataset = parsedArgs.dataset datasize = int(parsedArgs.datasize) task = parsedArgs.task if task == "compute": expln_name = parsedArgs.explainer config.DEVICE = parsedArgs.gpu feature_type = parsedArgs.feature exp = Exp6(model_name, dataset, expln_name, datasize, feature_type) exp.run() else: Exp6.plot(model_name, dataset, datasize) logger.info("Finish")
def plot(model_name, dataset, datasize): exp = "4" path_manager = PathManager(model_name, dataset) cpp = defaultdict(list) nlci = defaultdict(list) var_num = 0 for expl_name in AbsExp.get_method_names(): result_file = path_manager.result_json_path(exp, expl_name, datasize) if os.path.isfile(result_file): with open(result_file) as f: results = json.loads(f.readlines()[0]) for value in results.values(): var_num = len(value["CPP"]) cpp[expl_name].append(value["CPP"]) nlci[expl_name].append(value["NLCI"]) pdf_name = path_manager.figure_path(datasize, exp) pp = PdfPages(pdf_name) values = [] names = [] # Filter condition merger = { AbsExp.GroundTrueh: "GT,OA,Z/N($10^{-4}$), Z/N($10^{-8}$)", "{}:0.0001".format(AbsExp.LIMELinearRegression): "L($10^{-4}$), L($10^-8$)" } for name, instances in sorted(cpp.items(), key=Plot.sort_key, reverse=True): value = [0] * (var_num + 1) for instance in instances: for idx, v in enumerate(instance): value[idx + 1] += v values.append([i / len(instances) for i in value]) names.append(name) Plot.plot_line(values, names, pp, model_name, "CPP", "\#Hacked Features", [0, 1.01], 0.5) values = [] names = [] for name, instances in sorted(nlci.items(), key=Plot.sort_key, reverse=True): value = [0] * (var_num + 1) for instance in instances: for idx, v in enumerate(instance): value[idx + 1] += v values.append(value) names.append(name) logger.info("Number of instance {}".format(len(instances))) Plot.plot_line(values, names, pp, model_name, "NLCI", "\#Hacked Features", [0, len(instances)], 500) pp.close()
def plot(model_name, dataset, datasize): exp = "6" path_manager = PathManager(model_name, dataset) cpp = defaultdict(list) nlci = defaultdict(list) var_num = 0 for feature_type in AbsExp.get_attribute_method_names(): result_file = path_manager.result_json_path(exp, feature_type, size=datasize) if os.path.isfile(result_file): with open(result_file) as f: results = json.loads(f.readlines()[0]) for value in results.values(): var_num = len(value["CPP"]) cpp[feature_type].append(value["CPP"]) nlci[feature_type].append(value["NLCI"]) pdf_name = path_manager.figure_path(datasize, exp) pp = PdfPages(pdf_name) values = [] names = [] # Filter condition for name, instances in sorted(cpp.items(), key=Plot.sort_key, reverse=True): value = [0] * (var_num + 1) for instance in instances: for idx, v in enumerate(instance): value[idx + 1] += v values.append([i / len(instances) for i in value]) names.append(name) Plot.plot_line(values, names, pp, model_name, "Avg. CPP", "\#Changed Features", [0, 1.1], 0.5, (18, 6), (20, 50)) values = [] names = [] for name, instances in sorted(nlci.items(), key=Plot.sort_key, reverse=True): value = [0] * (var_num + 1) for instance in instances: for idx, v in enumerate(instance): value[idx + 1] += v values.append(value) names.append(name) logger.info("Number of instance {}".format(len(instances))) Plot.plot_line(values, names, pp, model_name, "Avg. NLCI", "\#Changed Features", [0, 1100], 500, (18, 6), (20, 50)) pp.close()
def plot_new(model_name, dataset, datasize): exp = "1" wd = defaultdict(lambda: defaultdict(list)) rd = defaultdict(lambda: defaultdict(list)) path_manager = PathManager(model_name, dataset) params = set() for expl_name in AbsExp.get_method_names(): result_file = path_manager.result_json_path( exp, expl_name, datasize) if os.path.isfile(result_file): if ":" in expl_name: name, param = expl_name.split(":") params.add(param) else: name, param = expl_name, None with open(result_file) as f: results = json.loads(f.readlines()[0]) for result in results.values(): wd[name][param].append(result["CoreParameter"]) rd[name][param].append(0 if result["Polytope"] == 0 else 1) # Parameters in ascending order params = sorted(params, key=lambda x: float(x) if x is not None else 0, reverse=False) wd[AbsExp.OpenAPI] = {i: wd[AbsExp.OpenAPI][None] for i in params} wd = { name: [(float(param), result[param]) for param in params] for name, result in wd.items() } pdf_name = path_manager.figure_path(datasize, exp) pp = PdfPages(pdf_name) logger.info("WD {}".format({ i + ":" + str(k): np.mean(p) for i, j in wd.items() for k, p in j })) Plot.plot_errorbar(wd, pp, model_name, "WD", "Perturb Distance", (0, 100000), None, (18, 6), 1) pp.close()
def plot(model_name, dataset, datasize): exp = "2" path_manager = PathManager(model_name, dataset) dist = defaultdict(list) for expl_name in AbsExp.get_method_names(): result_file = path_manager.result_json_path( exp, expl_name, datasize) if os.path.isfile(result_file): with open(result_file) as f: results = json.loads(f.readlines()[0]) for value in results.values(): dist[expl_name].append(value["L1Distance"]) pdf_name = path_manager.figure_path(datasize, exp) pp = PdfPages(pdf_name) values, names = [], [] for i, j in sorted(dist.items(), key=Plot.sort_key, reverse=True): values.append(j) names.append(i) Plot.plot_box(values, names, pp, model_name, "L1Dist", True, False) pp.close()
def plot(model_name, dataset, datasize): exp = "7" path_manager = PathManager(model_name, dataset) cs = defaultdict(list) for feature_type in AbsExp.get_attribute_method_names(): result_file = path_manager.result_json_path( exp, feature_type, datasize) if os.path.isfile(result_file): with open(result_file) as f: results = json.loads(f.readlines()[0]) for value in results.values(): cs[feature_type].append(value["Cosine"]) pdf_name = path_manager.figure_path(datasize, exp) pp = PdfPages(pdf_name) values, names = [], [] for i, j in sorted(cs.items(), key=Plot.sort_key, reverse=True): values.append(sorted(j, reverse=True)) names.append(i) Plot.plot_line(values, names, pp, model_name, "CS", "Index of Instance", [-0.05, 1.1], 0.5, (18, 6), (100, 300)) pp.close()
def plot(model_name, dataset, datasize): exp = "3" path_manager = PathManager(model_name, dataset) cs = defaultdict(list) for expl_name in AbsExp.get_method_names(): result_file = path_manager.result_json_path( exp, expl_name, datasize) if os.path.isfile(result_file): with open(result_file) as f: results = json.loads(f.readlines()[0]) for value in results.values(): cs[expl_name].append(value["Cosine"]) pdf_name = path_manager.figure_path(datasize, exp) pp = PdfPages(pdf_name) values, names = [], [] for i, j in sorted(cs.items(), key=Plot.sort_key, reverse=True): if "-08" not in i and "0.0001" not in i and "Ground" not in i: continue values.append(sorted(j, reverse=True)) names.append(i) Plot.plot_line(values, names, pp, model_name, "CS", "Index of Instance", [-0.05, 1.1], 0.5) pp.close()
def __init__(self, model_name, dataset, expln_name, datasize): AbsExp.__init__(self, model_name, dataset, "2", expln_name, datasize)
def __init__(self, model_name, dataset, expln_name, datasize, feature_type): AbsExp.__init__(self, model_name, dataset, "7", expln_name, datasize) self.feature_type = feature_type self.result_json = self.path_manager.result_json_path( "7", feature_type, size=self.data_size)