def individual_nudge(old_X: dit.Distribution, eps: float = 0.01, rvs_other=None) -> dit.Distribution: mask = old_X._mask base = old_X.get_base() if old_X.outcome_length() == 1: return global_nudge(old_X, eps) outcomes = old_X.outcomes rv_names = old_X.get_rv_names() if rvs_other == None: rvs = old_X.get_rv_names() rvs_other = np.random.choice(rvs, len(rvs) - 1, replace=False) X_other, Xi_given_Xother = old_X.condition_on(rvs_other) nudge_size = len(Xi_given_Xother[0]) if base == 'linear': nudge = generate_nudge(nudge_size, eps / len(Xi_given_Xother)) for Xi in Xi_given_Xother: perform_nudge(Xi, nudge) else: nudge, sign = generate_log_nudge(nudge_size, eps) for Xi in Xi_given_Xother: perform_log_nudge(Xi, nudge, sign) new_X = dit.joint_from_factors(X_other, Xi_given_Xother).copy(base) #add back any missing outcomes dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes} #print(outcomes, dct) new_X = dit.Distribution(dct) new_X.set_rv_names(rv_names) new_X.make_dense() new_X._mask = mask return new_X
def max_individual(input: dit.Distribution, conditional: np.ndarray, eps: float = 0.01, minimal_entropy_idx=None): rvs = input.get_rv_names() conditional = conditional / conditional.sum() states = len(input.alphabet[0]) if not minimal_entropy_idx == 0 and not minimal_entropy_idx: minimal_entropy_idx = np.argmin([ entropy(input.marginal([rv], rv_mode='indices').pmf) for rv in range(len(rvs)) ]) non_minimal_rvs = rvs[:minimal_entropy_idx] + rvs[minimal_entropy_idx + 1:] non_minimal_marginal, minimal_conditional = input.condition_on( non_minimal_rvs) [d.make_dense() for d in minimal_conditional] # minimal_conditional = np.stack([d.pmf for d in minimal_conditional]) # print("minimal_conditional:",minimal_conditional) indiv_shape = (len(minimal_conditional), len(minimal_conditional[0])) # minimal_conditional = minimal_conditional.flatten() nudge_vector = np.zeros(indiv_shape) rotated_conditional = R(conditional, minimal_entropy_idx, len(rvs), states) total_max_impact = 0 # print(len(rvs), (eps / 2)/len(minimal_conditional)) for i, mc_dist in enumerate(minimal_conditional): rows = rotated_conditional[i * states:(i + 1) * states, :] max_impact = 0 for allignment in itertools.product( [-1, 1], repeat=rotated_conditional.shape[1]): allignment = np.array(allignment) if np.all(allignment == 1) or np.all(allignment == -1): continue scores = np.sum(allignment * rows, axis=1) # Add rotation of scores so that scores are well aligned. # Weigh scores using the non_minimal_marginal vector, impact = find_max_impact(scores, mc_dist.pmf, (eps / 2) / len(minimal_conditional)) if impact > max_impact: nudge_vector[i, :] = vector max_impact = impact total_max_impact += max_impact return nudge_vector, total_max_impact, minimal_entropy_idx
def get_marginals( d: dit.Distribution) -> (dit.Distribution, List[dit.Distribution]): rvs = d.get_rv_names()[:-1] #everything except the output return d.condition_on(rvs)