Exemple #1
0
def max_synergistic_nudge(old_X: dit.Distribution,
                          YgivenX: np.ndarray,
                          eps: float = 0.01):
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    rvs = old_X.get_rv_names()
    outcomes = old_X.outcomes
    if len(rvs) < 3:
        return max_global_nudge(old_X, YgivenX, eps)

    nudge, _ = max_nudge(old_X.copy('linear'),
                         YgivenX,
                         eps=eps,
                         nudge_type='synergistic_old')
    #  print("synergistic eps",sum(abs(nudge)), eps, old_X.outcome_length())
    if base == 'linear':
        perform_nudge(new_X, nudge)
    else:
        log_nudge, sign = np.log(np.abs(nudge)), np.sign(nudge)
        perform_log_nudge(new_X, log_nudge, sign)
    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rvs)
    return new_X
Exemple #2
0
def max_global_nudge(old_X: dit.Distribution,
                     YgivenX: np.ndarray,
                     eps: float = 0.01):
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    rvs = old_X.get_rv_names()
    outcomes = old_X.outcomes

    nudge, _ = max_nudge(old_X.copy('linear'),
                         YgivenX,
                         eps=eps,
                         nudge_type='global')

    #  print("global eps",sum(abs(nudge)), eps, old_X.outcome_length())
    if base == 'linear':
        perform_nudge(new_X, nudge)
    else:
        # print(nudge)
        log_nudge, sign = np.log(np.abs(nudge)), np.sign(nudge)
        # print(log_nudge, sign)
        # log_nudge[log_nudge == -np.inf] = 0
        # print("converted to log nudge",nudge, log_nudge, sign)
        perform_log_nudge(new_X, log_nudge, sign)

    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rvs)
    return new_X
Exemple #3
0
def local_nudge2(old_X: dit.Distribution,
                 eps: float = 0.01) -> dit.Distribution:
    mask = old_X._mask
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    rvs = list(old_X.get_rv_names())

    random.shuffle(rvs)

    new_Xs = np.zeros((len(rvs), len(old_X)))
    for i in range(len(rvs)):
        rvs_other = rvs[:i] + rvs[i + 1:]
        #print(new_X.get_rv_names())
        new_X = individual_nudge(new_X, eps / len(rvs), rvs_other=rvs_other)
    return new_X
Exemple #4
0
def global_nudge(old_X: dit.Distribution,
                 eps: float = 0.01) -> dit.Distribution:
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    outcomes = old_X.outcomes
    nudge_size = len(old_X)
    rvs = old_X.get_rv_names()
    if base == 'linear':
        nudge = generate_nudge(nudge_size, eps)
        perform_nudge(new_X, nudge)
    else:
        nudge, sign = generate_log_nudge(nudge_size, eps)
        perform_log_nudge(new_X, nudge, sign)
    new_X = dit.Distribution(
        {o: new_X[o] if o in new_X.outcomes else 0
         for o in outcomes})
    new_X.set_rv_names(rvs)
    return new_X
Exemple #5
0
def max_local_nudge2(old_X: dit.Distribution,
                     YgivenX: np.ndarray,
                     eps: float = 0.01):
    if old_X.outcome_length() == 1:
        return max_global_nudge(old_X, YgivenX, eps)

    mask = old_X._mask
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    rvs = old_X.get_rv_names()
    sorted_rvs = np.argsort([
        entropy(old_X.marginal([rv], rv_mode='indices').pmf)
        for rv in range(len(rvs))
    ])
    oldshape = len(old_X)
    outcomes = old_X.outcomes
    # print("before", new_X.pmf.shape)
    for i, rv in enumerate(sorted_rvs):
        nudges, _ = max_nudge(new_X.copy('linear'),
                              YgivenX,
                              eps=(eps / len(sorted_rvs)),
                              nudge_type='individual',
                              minimal_entropy_idx=rv)
        #        print("local eps",sum([sum(abs(nudge)) for nudge in nudges]), eps, old_X.outcome_length())
        new_X = do_max_individual_nudge(new_X, nudges, rv, True)
        # print("after {}".format(i), new_X.pmf.shape)
        new_X.make_dense()
        newshape = len(new_X)
    #  if oldshape != newshape:
    #      print(nudges)
    #   print("after {} and making dense".format(i), new_X.pmf.shape)
    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rvs)
    new_X._mask = mask
    return new_X
Exemple #6
0
def max_local_nudge1(old_X: dit.Distribution,
                     YgivenX: np.ndarray,
                     eps: float = 0.01):
    if old_X.outcome_length() == 1:
        return max_global_nudge(old_X, YgivenX, eps)

    mask = old_X._mask
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    outcomes = old_X.outcomes
    rvs = old_X.get_rv_names()

    individual_nudges, _ = max_nudge(old_X.copy('linear'),
                                     YgivenX,
                                     eps=eps,
                                     nudge_type='local')
    new_Xs = np.zeros((old_X.outcome_length(), len(old_X)))
    for i, nudges in enumerate(individual_nudges):
        tmp = do_max_individual_nudge(old_X, nudges, i)

        # print(i, tmp.pmf, old_X.pmf, new_Xs)
        new_Xs[i, :] = tmp.pmf - old_X.pmf

    nudge = new_Xs.sum(axis=0)
    nudge = eps * nudge / (abs(nudge).sum())

    if base == 'linear':
        perform_nudge(new_X, nudge)
    else:
        log_nudge, sign = np.log(np.abs(nudge)), np.sign(nudge)
        perform_log_nudge(new_X, log_nudge, sign)
    dct = {o: new_X[o] if o in new_X.outcomes else 0.0 for o in outcomes}
    #print(outcomes, dct)
    new_X = dit.Distribution(dct)
    new_X.set_rv_names(rvs)
    new_X._mask = mask
    return new_X
Exemple #7
0
def local_nudge1(old_X: dit.Distribution,
                 eps: float = 0.01) -> dit.Distribution:
    mask = old_X._mask
    base = old_X.get_base()
    new_X = old_X.copy(base=base)
    old_X.make_dense()
    outcomes = old_X.outcomes
    rvs = list(old_X.get_rv_names())

    random.shuffle(rvs)
    #print(rvs)
    new_Xs = np.zeros((len(rvs), len(old_X)))
    for i in range(len(rvs)):
        rvs_other = rvs[:i] + rvs[i + 1:]
        tmp = individual_nudge(old_X, eps, rvs_other=rvs_other)
        #print("tmp",tmp)
        tmp.make_dense()

        old_X.make_dense()
        if base == 'linear':
            new_Xs[i, :] = tmp.pmf - old_X.pmf
        else:
            new_Xs[i] = tmp.copy(base='linear').pmf - old_X.copy(
                base='linear').pmf
        #old_X.make_sparse()
    nudge = new_Xs.sum(axis=0)
    nudge = eps * nudge / (abs(nudge).sum())

    if base == 'linear':
        perform_nudge(new_X, nudge)
    else:
        perform_log_nudge(new_X, np.log(np.abs(nudge)), np.sign(nudge))
    new_X = dit.Distribution(
        {o: new_X[o] if o in new_X.outcomes else 0
         for o in outcomes})
    new_X.set_rv_names(rvs)
    new_X._mask = mask
    return new_X