Example #1
0
def max_global_nudge(old_X: dit.Distribution,
                     YgivenX: np.ndarray,
                     eps: float = 0.01):
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    rvs = old_X.get_rv_names()
    outcomes = old_X.outcomes

    nudge, _ = max_nudge(old_X.copy('linear'),
                         YgivenX,
                         eps=eps,
                         nudge_type='global')

    #  print("global eps",sum(abs(nudge)), eps, old_X.outcome_length())
    if base == 'linear':
        perform_nudge(new_X, nudge)
    else:
        # print(nudge)
        log_nudge, sign = np.log(np.abs(nudge)), np.sign(nudge)
        # print(log_nudge, sign)
        # log_nudge[log_nudge == -np.inf] = 0
        # print("converted to log nudge",nudge, log_nudge, sign)
        perform_log_nudge(new_X, log_nudge, sign)

    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rvs)
    return new_X
Example #2
0
def individual_nudge(old_X: dit.Distribution,
                     eps: float = 0.01,
                     rvs_other=None) -> dit.Distribution:
    mask = old_X._mask
    base = old_X.get_base()
    if old_X.outcome_length() == 1:
        return global_nudge(old_X, eps)
    outcomes = old_X.outcomes
    rv_names = old_X.get_rv_names()

    if rvs_other == None:
        rvs = old_X.get_rv_names()
        rvs_other = np.random.choice(rvs, len(rvs) - 1, replace=False)

    X_other, Xi_given_Xother = old_X.condition_on(rvs_other)
    nudge_size = len(Xi_given_Xother[0])

    if base == 'linear':
        nudge = generate_nudge(nudge_size, eps / len(Xi_given_Xother))
        for Xi in Xi_given_Xother:
            perform_nudge(Xi, nudge)
    else:
        nudge, sign = generate_log_nudge(nudge_size, eps)
        for Xi in Xi_given_Xother:
            perform_log_nudge(Xi, nudge, sign)
    new_X = dit.joint_from_factors(X_other, Xi_given_Xother).copy(base)
    #add back any missing outcomes
    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rv_names)
    new_X.make_dense()
    new_X._mask = mask
    return new_X
Example #3
0
def max_synergistic_nudge(old_X: dit.Distribution,
                          YgivenX: np.ndarray,
                          eps: float = 0.01):
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    rvs = old_X.get_rv_names()
    outcomes = old_X.outcomes
    if len(rvs) < 3:
        return max_global_nudge(old_X, YgivenX, eps)

    nudge, _ = max_nudge(old_X.copy('linear'),
                         YgivenX,
                         eps=eps,
                         nudge_type='synergistic_old')
    #  print("synergistic eps",sum(abs(nudge)), eps, old_X.outcome_length())
    if base == 'linear':
        perform_nudge(new_X, nudge)
    else:
        log_nudge, sign = np.log(np.abs(nudge)), np.sign(nudge)
        perform_log_nudge(new_X, log_nudge, sign)
    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rvs)
    return new_X
Example #4
0
def global_nudge(old_X: dit.Distribution,
                 eps: float = 0.01) -> dit.Distribution:
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    outcomes = old_X.outcomes
    nudge_size = len(old_X)
    rvs = old_X.get_rv_names()
    if base == 'linear':
        nudge = generate_nudge(nudge_size, eps)
        perform_nudge(new_X, nudge)
    else:
        nudge, sign = generate_log_nudge(nudge_size, eps)
        perform_log_nudge(new_X, nudge, sign)
    new_X = dit.Distribution(
        {o: new_X[o] if o in new_X.outcomes else 0
         for o in outcomes})
    new_X.set_rv_names(rvs)
    return new_X
Example #5
0
def do_max_individual_nudge(old_X, nudges, minimal_idx, from_local=False):
    mask = old_X._mask
    base = old_X.get_base()
    rvs = old_X.get_rv_names()
    outcomes = old_X.outcomes
    states = len(old_X.alphabet[0])

    non_minimal_rvs = rvs[:minimal_idx] + rvs[minimal_idx + 1:]
    Xother, Xi_given_Xother = old_X.condition_on(non_minimal_rvs)
    old_shape = len(old_X)
    old_Xis = [Xi.copy() for Xi in Xi_given_Xother]
    for nudge, Xi in zip(nudges, Xi_given_Xother):
        # if from_local:
        #    print("before_nudge", nudge, Xi)

        if base == 'linear':
            perform_nudge(Xi, nudge)
        else:
            log_nudge, sign = np.log(np.abs(nudge)), np.sign(nudge)
            perform_log_nudge(Xi, log_nudge, sign)
        # if from_local:
        #   print("after_nudge",Xi)
    new_X = dit.joint_from_factors(Xother, Xi_given_Xother).copy(base)
    new_X.make_dense()
    new_shape = len(new_X)
    x = new_X.pmf.reshape(-1, states)
    y = [np.all(r == -np.inf) for r in x]
    row_deleted = np.any(y)

    #if from_local and new_shape != old_shape or from_local and row_deleted:
    #    print("nudges:", nudges)
    #    print("old_X:", old_X.pmf.reshape(-1, states))
    #    print("new_X", new_X.pmf.reshape(-1, states))
    #    print("old Xis", np.vstack([oXi.pmf for oXi in old_Xis]))
    #    print("new Xis", np.vstack([nXi.pmf for nXi in Xi_given_Xother]))

    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rvs)
    new_X._mask = mask
    return new_X
Example #6
0
def max_local_nudge1(old_X: dit.Distribution,
                     YgivenX: np.ndarray,
                     eps: float = 0.01):
    if old_X.outcome_length() == 1:
        return max_global_nudge(old_X, YgivenX, eps)

    mask = old_X._mask
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    outcomes = old_X.outcomes
    rvs = old_X.get_rv_names()

    individual_nudges, _ = max_nudge(old_X.copy('linear'),
                                     YgivenX,
                                     eps=eps,
                                     nudge_type='local')
    new_Xs = np.zeros((old_X.outcome_length(), len(old_X)))
    for i, nudges in enumerate(individual_nudges):
        tmp = do_max_individual_nudge(old_X, nudges, i)

        # print(i, tmp.pmf, old_X.pmf, new_Xs)
        new_Xs[i, :] = tmp.pmf - old_X.pmf

    nudge = new_Xs.sum(axis=0)
    nudge = eps * nudge / (abs(nudge).sum())

    if base == 'linear':
        perform_nudge(new_X, nudge)
    else:
        log_nudge, sign = np.log(np.abs(nudge)), np.sign(nudge)
        perform_log_nudge(new_X, log_nudge, sign)
    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rvs)
    new_X._mask = mask
    return new_X
Example #7
0
def local_nudge1(old_X: dit.Distribution,
                 eps: float = 0.01) -> dit.Distribution:
    mask = old_X._mask
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    outcomes = old_X.outcomes
    rvs = list(old_X.get_rv_names())

    random.shuffle(rvs)
    #print(rvs)
    new_Xs = np.zeros((len(rvs), len(old_X)))
    for i in range(len(rvs)):
        rvs_other = rvs[:i] + rvs[i + 1:]
        tmp = individual_nudge(old_X, eps, rvs_other=rvs_other)
        #print("tmp",tmp)
        tmp.make_dense()

        old_X.make_dense()
        if base == 'linear':
            new_Xs[i, :] = tmp.pmf - old_X.pmf
        else:
            new_Xs[i] = tmp.copy(base='linear').pmf - old_X.copy(
                base='linear').pmf
        #old_X.make_sparse()
    nudge = new_Xs.sum(axis=0)
    nudge = eps * nudge / (abs(nudge).sum())

    if base == 'linear':
        perform_nudge(new_X, nudge)
    else:
        perform_log_nudge(new_X, np.log(np.abs(nudge)), np.sign(nudge))
    new_X = dit.Distribution(
        {o: new_X[o] if o in new_X.outcomes else 0
         for o in outcomes})
    new_X.set_rv_names(rvs)
    new_X._mask = mask
    return new_X
Example #8
0
def synergistic_nudge(old_X: dit.Distribution,
                      eps: float = 0.01) -> dit.Distribution:
    base = old_X.get_base()
    outcomes = old_X.outcomes
    new_X = old_X.copy(base=base)
    rvs = old_X.get_rv_names()
    if len(rvs) < 3:
        return global_nudge(old_X, eps)

    synergy_vars = np.random.choice(len(rvs), 2, replace=False)
    states = old_X.alphabet[0]
    nudge_size = int(len(old_X) / (len(states)**2))
    outcome_dict = {
        state: np.zeros(nudge_size, dtype=int)
        for state in list(itertools.product(states, repeat=2))
    }
    for i, outcome in enumerate(old_X.outcomes):
        cur_state = outcome[synergy_vars[0]], outcome[synergy_vars[1]]
        outcome_dict[cur_state][np.argmax(
            outcome_dict[cur_state] ==
            0)] = i  # Choose the first zero entry to fill

    if base == 'linear':
        nudge = generate_nudge(nudge_size, eps / len(outcome_dict))
        perform_nudge(new_X, nudge, outcome_dict.values())
    else:
        nudge, sign = generate_log_nudge(nudge_size, eps / len(outcome_dict))
        perform_log_nudge(new_X, nudge, sign, outcome_dict.values())
    new_X = dit.Distribution(
        {o: new_X[o] if o in new_X.outcomes else 0.0
         for o in outcomes})
    new_X.set_rv_names(rvs)
    new_X.pmf[new_X.pmf == np.nan] = -np.inf
    new_X.normalize()

    return new_X