def accuracy_VS_gamma(epsilon, prior, data, gammas): mean_error = [[]] for g in gammas: Bayesian_Model = BayesInferwithDirPrior(prior, sum(data), epsilon, 0.1, g) Bayesian_Model._set_observation(data) print("start" + str(g)) Bayesian_Model._experiments(1000) print("finished" + str(g)) mean_error[0].append( Bayesian_Model._accuracy_mean[Bayesian_Model._keys[3]]) print('Accuracy / prior: ' + str(prior._alphas) + ", delta: " + str(delta) + ", epsilon:" + str(epsilon)) # print mean_error plot_mean_error(gammas, mean_error, gammas, "Different Gammas for Smooth Sensitivity", [r"$\mathsf{EHDS}$"], "") # plot_error_box(data,"Different Datasizes",datasizes,"Accuracy VS. Data Size", # [r'$\mathcal{M}^{B}_{\mathcal{H}}$',"LapMech (sensitivity = 2)", "LapMech (sensitivity = 3)"], # ['lightblue', 'navy', 'red']) return
def accuracy_VS_prior(sample_size, epsilon, delta, priors, observation): data = [] mean_error = [[], [], [], [], []] for prior in priors: Bayesian_Model = BayesInferwithDirPrior(prior, sample_size, epsilon, delta) Bayesian_Model._set_observation(observation) Bayesian_Model._experiments(1000) data.append(Bayesian_Model._accuracy[Bayesian_Model._keys[3]]) data.append(Bayesian_Model._accuracy[Bayesian_Model._keys[0]]) data.append(Bayesian_Model._accuracy[Bayesian_Model._keys[4]]) mean_error[0].append( Bayesian_Model._accuracy_mean[Bayesian_Model._keys[3]]) mean_error[1].append( Bayesian_Model._accuracy_mean[Bayesian_Model._keys[0]]) mean_error[2].append( Bayesian_Model._accuracy_mean[Bayesian_Model._keys[4]]) print('Accuracy / observation: ' + str(observation) + ", delta: " + str(delta) + ", epsilon:" + str(epsilon)) plot_error_box(data, r"Different Priors on $\theta$", [r"$\mathsf{beta}$" + str(i._alphas) for i in priors], "Accuracy VS. Prior Distribution", [ r'$\mathcal{M}_{\mathcal{H}}$', "LapMech (sensitivity = 1)", "LapMech (sensitivity = 2)" ], ['navy', 'red', 'green']) return
def run_experiments(times, datasizes, observations, epsilon, delta, prior): data = [] errors = [[], [], [], [], []] for i in range(len(datasizes)): observation = observations[i] Bayesian_Model = BayesInferwithDirPrior(prior, sum(observation), epsilon, delta) Bayesian_Model._set_observation(observation) print("start" + str(observation)) Bayesian_Model._experiments(times) print("finished" + str(observation)) for i in range(5): data.append(Bayesian_Model._accuracy[Bayesian_Model._keys[i]]) plot_error_box(data, "Different Data Sets", ["bike", "cryotherapy", "immunotherapy"], "Experiments on Real Data", [ r'Alg 1 - $\mathsf{LSDim}$ (sensitivity = 2.0)', r'Alg 2 - $\mathsf{LSHist}$ (sensitivity = 1.0)', r'Alg 5 - $\mathsf{EHDS}$ ', r"Alg 3 - $\mathsf{EHD}$", r"Alg 4 - $\mathsf{EHDL}$" ], ["skyblue", "navy", "coral", "crimson", "blueviolet"]) return
def accuracy_VS_prior_mean(sample_size, epsilon, delta, priors, observations): data = [] xlabel = [] for prior in priors: for observation in observations: Bayesian_Model = BayesInferwithDirPrior(prior, sample_size, epsilon, delta) Bayesian_Model._set_observation(observation) Bayesian_Model._experiments(300) data.append(Bayesian_Model._accuracy[Bayesian_Model._keys[3]]) data.append(Bayesian_Model._accuracy[Bayesian_Model._keys[0]]) xstick.append( str(prior._alphas) + ", data:" + str(observation) + "/ExpMech") xstick.append( str(prior._alphas) + ", data:" + str(observation) + "/Laplace") plot_error_box(data, "Different Prior Distributions", xstick, "Accuracy VS. Prior Distribution") return
def accuracy_VS_dimension(sample_sizes, epsilon, delta): data = [] xstick = [] for n in sample_sizes: for d in range(2, 5): observation = [n for i in range(d)] prior = Dir([1 for i in range(d)]) Bayesian_Model = BayesInferwithDirPrior(prior, n * d, epsilon, delta) Bayesian_Model._set_observation(observation) Bayesian_Model._experiments(500) data.append(Bayesian_Model._accuracy[Bayesian_Model._keys[3]]) data.append(Bayesian_Model._accuracy[Bayesian_Model._keys[0]]) xstick.append(str(observation) + "/ExpMech") xstick.append(str(observation) + "/Laplace") plot_error_box(data, "Different Prior Distributions", xstick, "Accuracy VS. Prior Distribution") return
def accuracy_VS_datasize(epsilons, delta, prior, observation, datasize): data = [] mean_error = [[], []] for e in epsilons: Bayesian_Model = BayesInferwithDirPrior(prior, sum(observation), e, delta, 0.2) Bayesian_Model._set_observation(observation) print("start" + str(observation)) Bayesian_Model._experiments(1000) print("finished" + str(observation)) for j in range(len(mean_error)): mean_error[j].append( Bayesian_Model._accuracy_mean[Bayesian_Model._keys[j]]) plot_mean_error(epsilons, mean_error, [round(e, 2) for e in epsilons], "Different Datasizes", [r"$Laplace Noise$", r"$Geomoetric Noise$"], "") return
def accuracy_VS_mean(sample_size, epsilon, delta, prior): data = [] xstick = [] temp = BayesInferwithDirPrior(prior, sample_size, epsilon, delta) temp._set_candidate_scores() observations = temp._candidates for i in range(len(observations)): observations[i]._minus(prior) for observation in observations: Bayesian_Model = BayesInferwithDirPrior(prior, sample_size, epsilon, delta) Bayesian_Model._set_observation(observation._alphas) Bayesian_Model._experiments(500) data.append(Bayesian_Model._accuracy[Bayesian_Model._keys[3]]) data.append(Bayesian_Model._accuracy[Bayesian_Model._keys[0]]) xstick.append(str(observation._alphas) + "/ExpMech") xstick.append(str(observation._alphas) + "/Laplace") plot_error_box(data, "Different Prior Distributions", xstick, "Accuracy VS. Prior Distribution") return
def accuracy_VS_datasize(epsilon, delta, prior, observations, datasizes): data = [] mean_error = [[], []] for i in range(len(datasizes)): observation = observations[i] Bayesian_Model = BayesInferwithDirPrior(prior, sum(observation), epsilon, delta, 0.2) Bayesian_Model._set_observation(observation) print("start" + str(observation)) Bayesian_Model._experiments(1000) print("finished" + str(observation)) for j in range(len(mean_error)): mean_error[j].append( Bayesian_Model._accuracy_mean[Bayesian_Model._keys[j]]) print('Accuracy / prior: ' + str(prior._alphas) + ", delta: " + str(delta) + ", epsilon:" + str(epsilon)) plot_mean_error(datasizes, mean_error, datasizes, "Different Datasizes", [r"$Laplace Noise$", r"$Geomoetric Noise$"], "") return
def accuracy_VS_datasize(epsilon, delta, prior, observations, datasizes): data = [] mean_error = [[], [], [], [], [], []] for i in range(len(datasizes)): observation = observations[i] Bayesian_Model = BayesInferwithDirPrior(prior, sum(observation), epsilon, delta, 0.2) Bayesian_Model._set_observation(observation) print("start" + str(observation)) Bayesian_Model._experiments(500) print("finished" + str(observation)) for j in range(len(mean_error)): mean_error[j].append( Bayesian_Model._accuracy_mean[Bayesian_Model._keys[j]]) # data.append(Bayesian_Model._accuracy[Bayesian_Model._keys[3]]) # data.append(Bayesian_Model._accuracy[Bayesian_Model._keys[0]]) # data.append(Bayesian_Model._accuracy[Bayesian_Model._keys[4]]) # a = statistics.median(Bayesian_Model._accuracy[Bayesian_Model._keys[3]]) # b = statistics.median(Bayesian_Model._accuracy[Bayesian_Model._keys[0]]) # c = statistics.median(Bayesian_Model._accuracy[Bayesian_Model._keys[4]]) print('Accuracy / prior: ' + str(prior._alphas) + ", delta: " + str(delta) + ", epsilon:" + str(epsilon)) # print mean_error plot_mean_error(datasizes, mean_error, datasizes, "Different Datasizes", [ r"$\mathsf{LSDim}$", r"$\mathsf{LSHist}$", r"$\mathsf{LSZhang}$", r"$\mathsf{EHDS}$", r"$\mathsf{EHD}$", r"$\mathsf{EHDL}$" ], "") # plot_error_box(data,"Different Datasizes",datasizes,"Accuracy VS. Data Size", # [r'$\mathcal{M}^{B}_{\mathcal{H}}$',"LapMech (sensitivity = 2)", "LapMech (sensitivity = 3)"], # ['lightblue', 'navy', 'red']) return