示例#1
0
def individual_nudge(old_X: dit.Distribution,
                     eps: float = 0.01,
                     rvs_other=None) -> dit.Distribution:
    mask = old_X._mask
    base = old_X.get_base()
    if old_X.outcome_length() == 1:
        return global_nudge(old_X, eps)
    outcomes = old_X.outcomes
    rv_names = old_X.get_rv_names()

    if rvs_other == None:
        rvs = old_X.get_rv_names()
        rvs_other = np.random.choice(rvs, len(rvs) - 1, replace=False)

    X_other, Xi_given_Xother = old_X.condition_on(rvs_other)
    nudge_size = len(Xi_given_Xother[0])

    if base == 'linear':
        nudge = generate_nudge(nudge_size, eps / len(Xi_given_Xother))
        for Xi in Xi_given_Xother:
            perform_nudge(Xi, nudge)
    else:
        nudge, sign = generate_log_nudge(nudge_size, eps)
        for Xi in Xi_given_Xother:
            perform_log_nudge(Xi, nudge, sign)
    new_X = dit.joint_from_factors(X_other, Xi_given_Xother).copy(base)
    #add back any missing outcomes
    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rv_names)
    new_X.make_dense()
    new_X._mask = mask
    return new_X
示例#2
0
def max_global_nudge(old_X: dit.Distribution,
                     YgivenX: np.ndarray,
                     eps: float = 0.01):
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    rvs = old_X.get_rv_names()
    outcomes = old_X.outcomes

    nudge, _ = max_nudge(old_X.copy('linear'),
                         YgivenX,
                         eps=eps,
                         nudge_type='global')

    #  print("global eps",sum(abs(nudge)), eps, old_X.outcome_length())
    if base == 'linear':
        perform_nudge(new_X, nudge)
    else:
        # print(nudge)
        log_nudge, sign = np.log(np.abs(nudge)), np.sign(nudge)
        # print(log_nudge, sign)
        # log_nudge[log_nudge == -np.inf] = 0
        # print("converted to log nudge",nudge, log_nudge, sign)
        perform_log_nudge(new_X, log_nudge, sign)

    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rvs)
    return new_X
示例#3
0
def max_synergistic_nudge(old_X: dit.Distribution,
                          YgivenX: np.ndarray,
                          eps: float = 0.01):
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    rvs = old_X.get_rv_names()
    outcomes = old_X.outcomes
    if len(rvs) < 3:
        return max_global_nudge(old_X, YgivenX, eps)

    nudge, _ = max_nudge(old_X.copy('linear'),
                         YgivenX,
                         eps=eps,
                         nudge_type='synergistic_old')
    #  print("synergistic eps",sum(abs(nudge)), eps, old_X.outcome_length())
    if base == 'linear':
        perform_nudge(new_X, nudge)
    else:
        log_nudge, sign = np.log(np.abs(nudge)), np.sign(nudge)
        perform_log_nudge(new_X, log_nudge, sign)
    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rvs)
    return new_X
示例#4
0
def max_derkjanistic_nudge(old_X: dit.Distribution,
                           YgivenX: np.ndarray,
                           eps: float = 0.01):
    rvs = old_X.get_rv_names()
    if len(rvs) < 2:
        return max_global_nudge(old_X, YgivenX, eps)

    base = old_X.get_base()

    new_X = max_derkjanistic(old_X.copy('linear'), YgivenX, eps)
    return new_X.copy(base)
示例#5
0
def max_derkjanistic(input: dit.Distribution,
                     conditional: np.ndarray,
                     eps: float = 0.01) -> dit.Distribution:
    rvs = input.get_rv_names()
    outcomes = input.outcomes
    evo_params = get_config(len(rvs))
    max_dj_nudge_found = max_dj_nudge(input, conditional, eps, evo_params)
    new_X = max_dj_nudge_found.new_dist

    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rvs)
    return new_X
示例#6
0
def max_individual(input: dit.Distribution,
                   conditional: np.ndarray,
                   eps: float = 0.01,
                   minimal_entropy_idx=None):
    rvs = input.get_rv_names()
    conditional = conditional / conditional.sum()
    states = len(input.alphabet[0])
    if not minimal_entropy_idx == 0 and not minimal_entropy_idx:
        minimal_entropy_idx = np.argmin([
            entropy(input.marginal([rv], rv_mode='indices').pmf)
            for rv in range(len(rvs))
        ])

    non_minimal_rvs = rvs[:minimal_entropy_idx] + rvs[minimal_entropy_idx + 1:]
    non_minimal_marginal, minimal_conditional = input.condition_on(
        non_minimal_rvs)
    [d.make_dense() for d in minimal_conditional]
    # minimal_conditional = np.stack([d.pmf for d in minimal_conditional])
    # print("minimal_conditional:",minimal_conditional)
    indiv_shape = (len(minimal_conditional), len(minimal_conditional[0]))

    # minimal_conditional = minimal_conditional.flatten()
    nudge_vector = np.zeros(indiv_shape)
    rotated_conditional = R(conditional, minimal_entropy_idx, len(rvs), states)
    total_max_impact = 0
    # print(len(rvs), (eps / 2)/len(minimal_conditional))
    for i, mc_dist in enumerate(minimal_conditional):

        rows = rotated_conditional[i * states:(i + 1) * states, :]

        max_impact = 0
        for allignment in itertools.product(
            [-1, 1], repeat=rotated_conditional.shape[1]):
            allignment = np.array(allignment)
            if np.all(allignment == 1) or np.all(allignment == -1):
                continue
            scores = np.sum(allignment * rows, axis=1)

            # Add rotation of scores so that scores are well aligned.
            # Weigh scores using the non_minimal_marginal

            vector, impact = find_max_impact(scores, mc_dist.pmf, (eps / 2) /
                                             len(minimal_conditional))
            if impact > max_impact:
                nudge_vector[i, :] = vector
                max_impact = impact
        total_max_impact += max_impact
    return nudge_vector, total_max_impact, minimal_entropy_idx
示例#7
0
def max_local(input: dit.Distribution,
              conditional: np.ndarray,
              eps: float = 0.01):
    rvs = input.get_rv_names()
    sorted_rvs = np.argsort([
        entropy(input.marginal([rv], rv_mode='indices').pmf)
        for rv in range(len(rvs))
    ])
    nudge_vectors = np.zeros(
        (input.outcome_length(), int(len(input) / 3), 3)
    )  # For each random variable we get (hopefully) a different nudge vector of len the input size
    max_impacts = np.zeros(input.outcome_length())
    for rv in sorted_rvs:
        nudge_vectors[rv, :, :], max_impacts[rv], _ = max_individual(
            input, conditional, eps / len(sorted_rvs), rv)
    return nudge_vectors, max_impacts
示例#8
0
def local_nudge2(old_X: dit.Distribution,
                 eps: float = 0.01) -> dit.Distribution:
    mask = old_X._mask
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    rvs = list(old_X.get_rv_names())

    random.shuffle(rvs)

    new_Xs = np.zeros((len(rvs), len(old_X)))
    for i in range(len(rvs)):
        rvs_other = rvs[:i] + rvs[i + 1:]
        #print(new_X.get_rv_names())
        new_X = individual_nudge(new_X, eps / len(rvs), rvs_other=rvs_other)
    return new_X
示例#9
0
def max_synergistic(input: dit.Distribution,
                    conditional: np.ndarray,
                    eps: float = 0.01):
    rvs = input.get_rv_names()
    states = input.alphabet[0]
    partition_size = int(len(input) / (len(states)**2))
    max_entropy = (len(states)**2) * entropy(np.ones(partition_size))
    best_syn_vars = (0, 1)
    best_outcome_dict = {}
    lowest_entropy = max_entropy

    # conditional = np.stack([d.pmf for d in conditional]) # stack the conditional
    # conditional = conditional/conditional.sum() # normalize the conditional to give each
    for synergy_vars in itertools.combinations(range(len(rvs)), r=2):
        # Build the outcome dict
        outcome_dict = {
            state: np.zeros(partition_size, dtype=int)
            for state in list(itertools.product(states, repeat=2))
        }
        for i, outcome in enumerate(input.outcomes):
            cur_state = outcome[synergy_vars[0]], outcome[synergy_vars[1]]
            outcome_dict[cur_state][np.argmax(
                outcome_dict[cur_state] ==
                0)] = i  # Choose the first zero entry to fill

        current_entropy = sum([
            entropy(input.pmf[indices])
            for state, indices in outcome_dict.items()
        ])

        if current_entropy < lowest_entropy:
            best_syn_vars = synergy_vars
            lowest_entropy = current_entropy
            best_outcome_dict = outcome_dict

    # Use best syn vars to find the nudge vector that makes the largest impact
    nudge_vector = np.zeros(len(input))

    for state, indices in best_outcome_dict.items():
        nudge_vector[indices] = max_global(
            input.pmf[indices],
            np.array([d for i, d in enumerate(conditional) if i in indices]),
            eps / len(best_outcome_dict), False)

    return nudge_vector, best_syn_vars
示例#10
0
def global_nudge(old_X: dit.Distribution,
                 eps: float = 0.01) -> dit.Distribution:
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    outcomes = old_X.outcomes
    nudge_size = len(old_X)
    rvs = old_X.get_rv_names()
    if base == 'linear':
        nudge = generate_nudge(nudge_size, eps)
        perform_nudge(new_X, nudge)
    else:
        nudge, sign = generate_log_nudge(nudge_size, eps)
        perform_log_nudge(new_X, nudge, sign)
    new_X = dit.Distribution(
        {o: new_X[o] if o in new_X.outcomes else 0
         for o in outcomes})
    new_X.set_rv_names(rvs)
    return new_X
示例#11
0
def derkjanistic_nudge(old_X: dit.Distribution,
                       eps: float = 0.01) -> dit.Distribution:
    base = old_X.get_base()
    outcomes = old_X.outcomes
    new_X = old_X.copy(base='linear')
    rvs = old_X.get_rv_names()
    if len(rvs) < 2:
        return global_nudge(old_X, eps)
    delta = eps / len(old_X)
    new_pmf = dj_nudge(new_X, delta)
    # print(delta)
    new_X.pmf = new_pmf
    new_X = dit.Distribution(
        {o: new_X[o] if o in new_X.outcomes else 0
         for o in outcomes})
    new_X.set_rv_names(rvs)
    new_X.normalize()
    new_X = new_X.copy(base=base)
    return new_X
示例#12
0
def max_local_nudge2(old_X: dit.Distribution,
                     YgivenX: np.ndarray,
                     eps: float = 0.01):
    if old_X.outcome_length() == 1:
        return max_global_nudge(old_X, YgivenX, eps)

    mask = old_X._mask
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    rvs = old_X.get_rv_names()
    sorted_rvs = np.argsort([
        entropy(old_X.marginal([rv], rv_mode='indices').pmf)
        for rv in range(len(rvs))
    ])
    oldshape = len(old_X)
    outcomes = old_X.outcomes
    # print("before", new_X.pmf.shape)
    for i, rv in enumerate(sorted_rvs):
        nudges, _ = max_nudge(new_X.copy('linear'),
                              YgivenX,
                              eps=(eps / len(sorted_rvs)),
                              nudge_type='individual',
                              minimal_entropy_idx=rv)
        #        print("local eps",sum([sum(abs(nudge)) for nudge in nudges]), eps, old_X.outcome_length())
        new_X = do_max_individual_nudge(new_X, nudges, rv, True)
        # print("after {}".format(i), new_X.pmf.shape)
        new_X.make_dense()
        newshape = len(new_X)
    #  if oldshape != newshape:
    #      print(nudges)
    #   print("after {} and making dense".format(i), new_X.pmf.shape)
    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rvs)
    new_X._mask = mask
    return new_X
示例#13
0
def max_local_nudge1(old_X: dit.Distribution,
                     YgivenX: np.ndarray,
                     eps: float = 0.01):
    if old_X.outcome_length() == 1:
        return max_global_nudge(old_X, YgivenX, eps)

    mask = old_X._mask
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    outcomes = old_X.outcomes
    rvs = old_X.get_rv_names()

    individual_nudges, _ = max_nudge(old_X.copy('linear'),
                                     YgivenX,
                                     eps=eps,
                                     nudge_type='local')
    new_Xs = np.zeros((old_X.outcome_length(), len(old_X)))
    for i, nudges in enumerate(individual_nudges):
        tmp = do_max_individual_nudge(old_X, nudges, i)

        # print(i, tmp.pmf, old_X.pmf, new_Xs)
        new_Xs[i, :] = tmp.pmf - old_X.pmf

    nudge = new_Xs.sum(axis=0)
    nudge = eps * nudge / (abs(nudge).sum())

    if base == 'linear':
        perform_nudge(new_X, nudge)
    else:
        log_nudge, sign = np.log(np.abs(nudge)), np.sign(nudge)
        perform_log_nudge(new_X, log_nudge, sign)
    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rvs)
    new_X._mask = mask
    return new_X
示例#14
0
def local_nudge1(old_X: dit.Distribution,
                 eps: float = 0.01) -> dit.Distribution:
    mask = old_X._mask
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    outcomes = old_X.outcomes
    rvs = list(old_X.get_rv_names())

    random.shuffle(rvs)
    #print(rvs)
    new_Xs = np.zeros((len(rvs), len(old_X)))
    for i in range(len(rvs)):
        rvs_other = rvs[:i] + rvs[i + 1:]
        tmp = individual_nudge(old_X, eps, rvs_other=rvs_other)
        #print("tmp",tmp)
        tmp.make_dense()

        old_X.make_dense()
        if base == 'linear':
            new_Xs[i, :] = tmp.pmf - old_X.pmf
        else:
            new_Xs[i] = tmp.copy(base='linear').pmf - old_X.copy(
                base='linear').pmf
        #old_X.make_sparse()
    nudge = new_Xs.sum(axis=0)
    nudge = eps * nudge / (abs(nudge).sum())

    if base == 'linear':
        perform_nudge(new_X, nudge)
    else:
        perform_log_nudge(new_X, np.log(np.abs(nudge)), np.sign(nudge))
    new_X = dit.Distribution(
        {o: new_X[o] if o in new_X.outcomes else 0
         for o in outcomes})
    new_X.set_rv_names(rvs)
    new_X._mask = mask
    return new_X
示例#15
0
def synergistic_nudge(old_X: dit.Distribution,
                      eps: float = 0.01) -> dit.Distribution:
    base = old_X.get_base()
    outcomes = old_X.outcomes
    new_X = old_X.copy(base=base)
    rvs = old_X.get_rv_names()
    if len(rvs) < 3:
        return global_nudge(old_X, eps)

    synergy_vars = np.random.choice(len(rvs), 2, replace=False)
    states = old_X.alphabet[0]
    nudge_size = int(len(old_X) / (len(states)**2))
    outcome_dict = {
        state: np.zeros(nudge_size, dtype=int)
        for state in list(itertools.product(states, repeat=2))
    }
    for i, outcome in enumerate(old_X.outcomes):
        cur_state = outcome[synergy_vars[0]], outcome[synergy_vars[1]]
        outcome_dict[cur_state][np.argmax(
            outcome_dict[cur_state] ==
            0)] = i  # Choose the first zero entry to fill

    if base == 'linear':
        nudge = generate_nudge(nudge_size, eps / len(outcome_dict))
        perform_nudge(new_X, nudge, outcome_dict.values())
    else:
        nudge, sign = generate_log_nudge(nudge_size, eps / len(outcome_dict))
        perform_log_nudge(new_X, nudge, sign, outcome_dict.values())
    new_X = dit.Distribution(
        {o: new_X[o] if o in new_X.outcomes else 0.0
         for o in outcomes})
    new_X.set_rv_names(rvs)
    new_X.pmf[new_X.pmf == np.nan] = -np.inf
    new_X.normalize()

    return new_X
示例#16
0
def get_marginals(
        d: dit.Distribution) -> (dit.Distribution, List[dit.Distribution]):
    rvs = d.get_rv_names()[:-1]  #everything except the output
    return d.condition_on(rvs)