示例#1
0
def test_binning(dims, nreps, nbins, data_ranges):
    """ Test execution of both types of binning
    1. Equal interval
    2. Manual specification
    """
    print("\n" + bcolors.TEST_HEADER + "BINNING" + bcolors.ENDC)
    mi_eq = mi_mb = None
    # resetting for this test
    dims = 2
    # generating a commong set of datapoints
    datapoints = []
    for _ in range(1000):
        point1 = np.random.rand()
        point2 = point1 + (np.random.rand() / 30)
        datapoints.append([point1, point2])

    # Equal interval binning
    try:
        print("Estimating MI using equal interval binning = ",
              end="",
              flush=True)
        it = infotheory.InfoTools(dims, nreps)

        # set bin boundaries
        it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])

        # adding points
        it.add_data(datapoints)

        # computing mutual information
        mi_eq = it.mutual_info([0, 1])
        print(mi_eq, SUCCESS)
    except Exception as e:
        _except(e)

    # Manual binning
    try:
        print("Estimating MI using manually specified binning = ",
              end="",
              flush=True)
        it = infotheory.InfoTools(dims, nreps)

        # set bin boundaries
        it.set_bin_boundaries([[0.3333, 0.6666], [0.3333, 0.6666]])

        # adding points
        it.add_data(datapoints)

        # computing mutual information
        mi_mb = it.mutual_info([0, 1])
        print(mi_mb, SUCCESS)
    except Exception as e:
        _except(e)

    # mi_eq == mi_mb?
    print(
        "Tested both binning methods. Difference in result = {}".format(mi_eq -
                                                                        mi_mb),
        SUCCESS,
    )
示例#2
0
def calculate_mi(filename, nbins=50, nreps=3):

    dat = np.load(filename)
    print(dat.shape)
    nI = 10
    nH = 10

    # to iterate through all neurons
    neuron_inds = np.arange(nI,nI+nH)

    # setup for infotheory analysis
    mis = []
    mins = np.min(dat[:,:nI+nH], 0)
    maxs = np.max(dat[:,:nI+nH], 0)
    dims = nI+nH

    # add all data
    it = infotheory.InfoTools(dims, nreps)
    it.set_equal_interval_binning([nbins]*dims, mins, maxs)
    it.add_data(dat[:,:nI+nH])

    # estimate entropy
    var_ids = [0]*nI + [-1]*nH
    ent =  it.entropy(var_ids)

    # estimate mutual information
    for i in range(nI,nI+nH):
        print("\tNeuron # {}".format(i+1))
        var_ids[i] = 1
        mi = it.mutual_info(var_ids)
        var_ids[i] = -1
        mis.append(mi/ent)

    return mis
示例#3
0
def pid_test_3D(dims, nreps, nbins, data_ranges, data):
    """ testing sum of pid == total_mi """
    try:
        # creating the object
        it = infotheory.InfoTools(dims, nreps)
        it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
        # adding points
        it.add_data(data)
        # estimating mutual information
        mi = it.mutual_info([1, 1, 0])
        redundant_info = it.redundant_info([1, 2, 0])
        unique_1 = it.unique_info([1, 2, 0])
        unique_2 = it.unique_info([2, 1, 0])
        synergy = it.synergy([1, 2, 0])

        # total_pid
        total_pid = np.sum(
            np.round([redundant_info, unique_1, unique_2, synergy],
                     decimals=6))
        # mi
        total_mi = np.round(mi, decimals=6)

        if (total_pid - total_mi) < 1e-5:
            print(total_pid, total_mi, SUCCESS)
        else:
            raise Exception(
                "Total PID does not equal MI: total_mi = {}; total_pid = {}".
                format(total_pid, total_mi))
    except Exception as e:
        _except(e)
示例#4
0
def decomposition_test_4D(dims, nreps, nbins, data_ranges, data, targets):
    """ testing if 4D PID matches expected values """
    try:
        # creating the object and adding data
        it_par = infotheory.InfoTools(dims, nreps)
        it_par.set_equal_interval_binning(nbins, data_ranges[0],
                                          data_ranges[1])
        it_par.add_data(data)

        # PID-ing
        total_mi = it_par.mutual_info([1, 1, 1, 0])
        redundant_info = it_par.redundant_info([1, 2, 3, 0])
        unique_1 = it_par.unique_info([1, 2, 3, 0])
        unique_2 = it_par.unique_info([2, 1, 3, 0])
        unique_3 = it_par.unique_info([2, 3, 1, 0])
        synergy = it_par.synergy([1, 2, 3, 0])
        results = [
            total_mi, redundant_info, unique_1, unique_2, unique_3, synergy
        ]

        base_str = "Decomposition test | "
        do_matching(base_str, total_mi, targets[0], "Total MI")
        do_matching(base_str, redundant_info, targets[1], "Redundant info | ")
        do_matching(base_str, unique_1, targets[2], "Unique source 1 info | ")
        do_matching(base_str, unique_2, targets[3], "Unique source 2 info | ")
        do_matching(base_str, unique_3, targets[4], "Unique source 3 info | ")
        do_matching(base_str, synergy, targets[5], "Synergistic info | ")

    except Exception as e:
        _except(e)
示例#5
0
def test_creation(dims, nreps, nbins, data_ranges):
    print("Testing creating an object. ", end="", flush=True)
    try:
        # creating object
        it = infotheory.InfoTools(dims, nreps)
        it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
        print(bcolors.OKGREEN + "SUCCESS" + bcolors.ENDC)
    except Exception as e:
        _except(e)
示例#6
0
def get_pid_3D(data):
    dims = np.shape(data)[1]
    it = infotheory.InfoTools(dims, 3)
    it.set_equal_interval_binning([10] * dims, np.min(data, 0),
                                  np.max(data, 0))
    it.add_data(data)
    # PID-ing
    u1 = it.unique_info([0, 1, 2])
    u2 = it.unique_info([0, 2, 1])
    r = it.redundant_info([0, 1, 2])
    s = it.synergy([0, 1, 2])
    return rnd(u1), rnd(u2), rnd(r), rnd(s)
示例#7
0
def entropy_test(dims,
                 nreps,
                 nbins,
                 data_ranges,
                 data_sampler,
                 num_samples=1000):
    try:
        # creating the object
        it = infotheory.InfoTools(dims, nreps)
        it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
        # adding points
        for _ in range(num_samples):
            it.add_data_point([data_sampler()])
        # estimate entropy
        print(it.entropy([0]), SUCCESS)
    except Exception as e:
        _except(e)
示例#8
0
def decomposition_equivalence_3D(dims, nreps, nbins, data_ranges, data):
    try:
        # creating the object
        it = infotheory.InfoTools(dims, nreps)
        it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
        # adding points
        it.add_data(data)
        # estimating mutual information
        redundant_info_1 = it.redundant_info([1, 2, 0])
        synergy_1 = it.synergy([1, 2, 0])
        redundant_info_2 = it.redundant_info([2, 1, 0])
        synergy_2 = it.synergy([2, 1, 0])
        base_str = "Decomposition equivalence | "
        do_matching(base_str, redundant_info_1, redundant_info_2,
                    "Redundant info | ")
        do_matching(base_str, synergy_1, synergy_2, "Synergy | ")
    except Exception as e:
        _except(e)
示例#9
0
def decomposition_test_3D(dims, nreps, nbins, data_ranges, data, results):
    try:
        # creating the object
        it = infotheory.InfoTools(dims, nreps)
        it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
        # adding points
        it.add_data(data)
        # estimating mutual information
        redundant_info = it.redundant_info([1, 2, 0])
        unique_1 = it.unique_info([1, 2, 0])
        unique_2 = it.unique_info([2, 1, 0])
        synergy = it.synergy([1, 2, 0])
        if all(
                np.round([redundant_info, unique_1, unique_2, synergy],
                         decimals=2) == results):
            print(synergy, SUCCESS)
        else:
            raise Exception("PID computation error")
    except Exception as e:
        _except(e)
示例#10
0
def uniform_random_mi_test(dims, nreps, nbins, data_ranges, num_samples=1000):
    print("Testing mutual info with uniform random variables. MI = ",
          end="",
          flush=True)
    try:
        # creating the object
        it = infotheory.InfoTools(dims, nreps)
        it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])

        # adding points
        it.add_data(np.random.rand(num_samples, dims))
        # ...alternatively,
        # for _ in range(num_samples):
        #    it.add_data_point(np.random.rand(dims))

        # estimating mutual information
        mi = it.mutual_info([0, 1]) / ((1 / dims) * np.log2(np.prod(nbins)))
        print(mi, SUCCESS)

    except Exception as e:
        print(e)
        _except(e)
示例#11
0
def identical_random_mi_test(dims,
                             nreps,
                             nbins,
                             data_ranges,
                             add_noise=False,
                             num_samples=1000):
    print("Testing mutual info with identical random variables",
          end="",
          flush=True)
    if add_noise:
        print(" with noise. MI = ", end="", flush=True)
    else:
        print(". MI = ", end="", flush=True)

    try:
        # creating the object
        if dims % 2 != 0:
            dims += 1
        it = infotheory.InfoTools(dims, nreps)
        it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
        p_dims = int(dims / 2)

        # adding points
        for _ in range(num_samples):
            point1 = np.random.rand(p_dims)
            if add_noise:
                point2 = point1 + (np.random.rand(p_dims) / 30)
            else:
                point2 = point1
            it.add_data_point(np.concatenate((point1, point2)))

        # computing mutual information
        mi = it.mutual_info([0, 1]) / ((1 / dims) * np.log2(np.prod(nbins)))
        print(mi, SUCCESS)

    except Exception as e:
        _except(e)
示例#12
0
def manual_test(m, n):
    it = infotheory.InfoTools(2, 1, [2, 2], [0, 0], [1, 1])
    it.add_data([[0, 0]] * m + [[1, 1]] * n)
    print("m = ", m, " n = ", n, " MI = ", it.mutual_info([0, 1]))
示例#13
0
###############################################################################
# Parital information decomposition baseline
#
# Madhavun Candadai
# Dec, 2018
#
# Partial information decomposition for XOR and AND gates
###############################################################################
import infotheory

# 2 input AND gate
print("2-input logical AND")
data = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]]

# creating the object and adding data
it_and = infotheory.InfoTools(3, 0)
it_and.set_equal_interval_binning([2] * 3, [0] * 3, [1] * 3)
it_and.add_data(data)

# PID-ing
total_mi = it_and.mutual_info([0, 0, 1])
redundant_info = it_and.redundant_info([1, 2, 0])
unique_1 = it_and.unique_info([1, 2, 0])
unique_2 = it_and.unique_info([2, 1, 0])
synergy = it_and.synergy([1, 2, 0])

print("total_mi = {}".format(total_mi))
print("redundant_info = {}".format(redundant_info))
print("unique_1 = {}".format(unique_1))
print("unique_2 = {}".format(unique_2))
print("synergy = {}\n".format(synergy))
示例#14
0
# Python demo
# Madhavun Candadai
# Nov 2018
##############################################################################
import infotheory
import numpy as np

## Setup
dims = 4  # total dimensionality of all variables = 2+2 = 4
nreps = 1  # number of shifted binnings over which data is binned and averaged
nbins = [10] * dims  # number of bins along each dimension of the data
mins = [0] * dims  # min value or left edge of binning for each dimension
maxs = [1] * dims  # max value or right edge of binning for each dimension

## Creating object
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, mins, maxs)

## Adding data - concatenate data from all vars
for _ in range(100):
    it.add_data_point(np.random.rand(dims))

## Invoke infotheory tools
varIDs = [0, 0, 1, 1]  # to identify the different vars
mi = it.mutual_info(varIDs)  # mutual information between two random vars
mi /= np.log2(np.prod(nbins))  # normalizing
print("Mutual information between the two random 2D data = {}".format(mi))

varIDs = [
    0,
    -1,
示例#15
0
# Entropy of a coin flip for different probabilities of HEADS ranging from 0 to
# 1 should give an inverted-U shaped curve
###############################################################################

import numpy as np
import infotheory
import matplotlib

matplotlib.use("TkAgg")
import matplotlib.pyplot as plt

# range of probabilties for HEADS in coin flip
p = np.arange(0, 1.01, 0.01)
entropies = []
for pi in p:
    it = infotheory.InfoTools(1, 0)
    it.set_equal_interval_binning([2], [0], [1])

    # flipping coin 10000 times
    for _ in range(10000):
        if np.random.rand() < pi:
            it.add_data_point([0])
        else:
            it.add_data_point([1])

    # estimating entropy
    entropies.append(it.entropy([0]))

plt.figure(figsize=[3, 2])
plt.plot(p, entropies)
plt.xlabel("Probability of HEADS")
示例#16
0
def sub_plt(ind, data, mi):
    plt.subplot(ind)
    plt.plot(data[0], data[1], ".", markersize=2)
    plt.xlim([0, 1])
    plt.ylim([0, 1])
    plt.xlabel("Random variable, X", fontsize=12)
    plt.ylabel("Random variable, Y", fontsize=12)
    plt.title("Mutual information\n {}".format(np.round(mi, 4)), fontsize=13)


plt.figure(figsize=[10, 3])

# identical variables
x = np.arange(0, 1, 1 / 1000)
y = np.flipud(x)
it = infotheory.InfoTools(2, 0)
it.set_equal_interval_binning([50] * 2, [0] * 2, [1] * 2)
it.add_data(np.vstack([x, y]).T)
mi = it.mutual_info([0, 1]) / np.log2(50)
sub_plt(141, [x, y], mi)

# shuffled identical variables
inds = np.arange(0.05, 1, 0.1)
x = [np.random.normal(loc=id, scale=0.015, size=[30]) for id in inds]
x = np.asarray(x).flatten()
s_inds = np.random.permutation(inds)
y = [np.random.normal(loc=id, scale=0.015, size=[30]) for id in s_inds]
y = np.asarray(y).flatten()
it = infotheory.InfoTools(2, 0)
it.set_equal_interval_binning([10] * 2, [0] * 2, [1] * 2)
it.add_data(np.vstack([x, y]).T)
示例#17
0
# Nov 2018
##############################################################################
import infotheory
import numpy as np

## Setup
dims = 3  # total dimensionality of all variables = 2 sources + 1 target = 3
nreps = 0  # number of shifted binnings over which data is binned and averaged
nbins = [
    2
] * dims  # number of bins along each dimension of the data = 2 for binary data
mins = [0] * dims  # min value or left edge of binning for each dimension
maxs = [1] * dims  # max value or right edge of binning for each dimension

## Creating object
it_xor = infotheory.InfoTools(dims, nreps)
it_xor.set_equal_interval_binning(nbins, mins, maxs)

## Adding data for XOR
it_xor.add_data([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]])

# display config
it_xor.display_config()

## Invoking info theoretic functions
xor_synergy = it_xor.synergy([1, 2, 0])

## Creating object
it_and = infotheory.InfoTools(dims, nreps)
it_and.set_equal_interval_binning(nbins, mins, maxs)