def uncertainty_gs(probs, likelyhoods, credal_size): sorted_index = np.argsort(likelyhoods, kind='stable') l = likelyhoods[sorted_index] p = probs[:,sorted_index] gs_total = [] gs_epist = [] gs_ale = [] for level in range(credal_size-1): p_cut = p[:,0:level+2] # get the level cut probs based on sorted likelyhood # computing levi (set14) for level cut p_cut and appeinding to the unc array entropy = -p_cut*np.ma.log2(p_cut) entropy = entropy.filled(0) entropy_sum = np.sum(entropy, axis=2) s_max = np.max(entropy_sum, axis=1) s_min = np.min(entropy_sum, axis=1) gh = set_gh(p_cut) total = s_max e = gh a = total - e gs_total.append(total) gs_epist.append(e) gs_ale.append(a) gs_total = np.mean(np.array(gs_total), axis=0) gs_epist = np.mean(np.array(gs_epist), axis=0) gs_ale = np.mean(np.array(gs_ale), axis=0) return gs_total, gs_epist, gs_ale
def uncertainty_ent_bays(probs, likelihoods): # three dimentianl array with d1 as datapoints, (d2) the rows as samples and (d3) the columns as probability for each class p = np.array(probs) # print("prob\n", probs) # print("likelihoods in bays", likelihoods) entropy = -p*np.ma.log2(p) entropy = entropy.filled(0) # print("entropy\n", entropy) a = np.sum(entropy, axis=2) al = a * likelihoods a = np.sum(al, axis=1) given_axis = 1 dim_array = np.ones((1,probs.ndim),int).ravel() dim_array[given_axis] = -1 b_reshaped = likelihoods.reshape(dim_array) mult_out = probs*b_reshaped p_m = np.sum(mult_out, axis=1) # p_m = np.mean(p, axis=1) #* likelihoods total = -np.sum(p_m*np.ma.log2(p_m), axis=1) total = total.filled(0) e = total - a return total, e, a
def uncertainty_set15(probs, bootstrap_size=0, sampling_size=0, credal_size=0): if bootstrap_size > 0: p = [] #np.array(probs) for data_point in probs: d_p = [] for sampling_seed in range(bootstrap_size): d_p.append(resample(data_point, random_state=sampling_seed)) p.append(np.array(d_p)) p = np.array(p) p = np.mean(p, axis=2) if sampling_size > 0: p = [] for sample_index in range(sampling_size): # number_of_samples = int(probs.shape[1] / sampling_size) # print("number_of_samples ", number_of_samples) sampled_index = np.random.choice(probs.shape[1], credal_size) p.append(probs[:,sampled_index,:]) p = np.array(p) p = np.mean(p, axis=2) p = p.transpose([1,0,2]) else: p = probs entropy = -p*np.ma.log2(p) entropy = entropy.filled(0) entropy_sum = np.sum(entropy, axis=2) s_min = np.min(entropy_sum, axis=1) s_max = np.max(entropy_sum, axis=1) total = s_max e = s_max - s_min a = total - e return total, e, a
def uncertainty_ent(probs): # three dimentianl array with d1 as datapoints, (d2) the rows as samples and (d3) the columns as probability for each class p = np.array(probs) entropy = -p*np.ma.log2(p) entropy = entropy.filled(0) a = np.sum(entropy, axis=1) a = np.sum(a, axis=1) / entropy.shape[1] p_m = np.mean(p, axis=1) total = -np.sum(p_m*np.ma.log2(p_m), axis=1) total = total.filled(0) e = total - a return total, e, a # now it should be correct
def uncertainty_ent_levi(probs, credal_size=30): # three dimentianl array with d1 as datapoints, (d2) the rows as samples and (d3) the columns as probability for each class p = [] #np.array(probs) for data_point in probs: d_p = [] for sampling_seed in range(credal_size): d_p.append(resample(data_point, random_state=sampling_seed)) p.append(np.array(d_p)) p = np.array(p) p = np.mean(p, axis=2) entropy = -p*np.ma.log10(p) entropy = entropy.filled(0) a = np.sum(entropy, axis=1) a = np.sum(a, axis=1) / entropy.shape[1] p_m = np.mean(p, axis=1) total = -np.sum(p_m*np.ma.log10(p_m), axis=1) total = total.filled(0) e = total - a return total, e, a # now it should be correct
def uncertainty_set17(probs, bootstrap_size=0, sampling_size=0, credal_size=0, log=False): if bootstrap_size > 0: p = [] #np.array(probs) for data_point in probs: d_p = [] for sampling_seed in range(bootstrap_size): d_p.append(resample(data_point, random_state=sampling_seed)) p.append(np.array(d_p)) p = np.array(p) p = np.mean(p, axis=2) if sampling_size > 0: p = [] for sample_index in range(sampling_size): # number_of_samples = int(probs.shape[1] / sampling_size) sampled_index = np.random.choice(probs.shape[1], credal_size) p.append(probs[:,sampled_index,:]) p = np.array(p) p = np.mean(p, axis=2) p = p.transpose([1,0,2]) else: p = probs if log: print("------------------------------------set14 prob after averaging each ensemble") print("Set14 p \n" , p) print(p.shape) # entropy = -p*np.log2(p) entropy = -p*np.ma.log2(p) entropy = entropy.filled(0) p_m = np.mean(p, axis=1) total = -np.sum(p_m*np.ma.log2(p_m), axis=1) total = total.filled(0) entropy_sum = np.sum(entropy, axis=2) s_max = np.max(entropy_sum, axis=1) gh = set_gh(p) e = gh a = s_max total = a + e return total, e, a
def uncertainty_ent_standard(probs): # for tree p = np.array(probs) entropy = -p*np.ma.log10(p) entropy = entropy.filled(0) total = np.sum(entropy, axis=1) return total, total, total # now it should be correct