コード例 #1
0
ファイル: compare.py プロジェクト: SofiaAlmeida/TFG
def ent(k, x, y):
    print("Entropía de X:")
    print("Estimador 1: ", mi.entropy(x, k=k))
    print("Estimador 2: ", ee.entropy(x))

    print("Entropía de Y:")
    print("Estimador 1: ", mi.entropy(y, k=k))
    print("Estimador 2: ", ee.entropy(y))
コード例 #2
0
ファイル: CaPC.py プロジェクト: RogerDev/Causality
 def computeEntropies(self):
     for var in self.data.getSeriesNames():
         d = self.data.getSeries(var)
         #print('p1')
         p = self.mi_prepare(d)
         #print('p2')
         entropy = ee.entropy(p)
         self.entropies[var] = entropy
         Dprint("Entropy of ", var, "=", entropy)
     return
コード例 #3
0
 def allAverageSubsetEntropies(variables):
     entropies = []
     a = 1
     while a < len(variables):
         subsets = randomSubsets(len(variables), a)
         entropy1 = averageSubsetEntropy(variables, subsets)
         entropies.append(entropy1)
         a += 1
     bigSetEntropy = ee.entropy(list(np.array(variables).T), k=10)
     return (entropies, bigSetEntropy)
コード例 #4
0
ファイル: compare.py プロジェクト: SofiaAlmeida/TFG
def example_ent_unif(dat, k=3):
    x = dat.X
    a = dat.a
    b = dat.b

    H_est = mi.entropy(x, k)
    H_est2 = ee.entropy(x, k)

    print("Entropía x:")
    print("Teórica: ", np.log(b - a) / np.log(2))
    print("Estimador 1: ", H_est)
    print("Estimador 2: ", H_est2)
コード例 #5
0
 def averageSubsetEntropy(variables, subsets):
     totalEntropy = 0.0
     a = 0
     while a < len(subsets):
         subset = np.array(subsets[a])
         vars1 = np.array(variables)[subset]
         vars1 = list(vars1.T)
         entropy = ee.entropy(vars1, k=10)
         totalEntropy += entropy
         a += 1
     totalEntropy = totalEntropy / float(len(subsets))
     return totalEntropy
コード例 #6
0
ファイル: compare.py プロジェクト: SofiaAlmeida/TFG
def example_ent():
    x = np.random.choice([1.0, 2.0, 3.0, 4.0], (1000, 1),
                         p=[0.5, 0.25, 0.125, 0.125])

    H_est = mi.entropy(x, k=5)
    H_est2 = ee.entropy(x, k=5)

    print("Entropía x:")
    print("Teórica: ", 7.0 / 4.0)
    print("Estimador 1: ", H_est)
    print("Estimador 2: ", H_est2)

    y = np.random.choice([1.0, 2.0, 3.0, 4.0], (1000, 1),
                         p=[0.25, 0.25, 0.25, 0.25])

    H_est = mi.entropy(y, k=5)
    H_est2 = ee.entropy(y, k=5)

    print("Entropía y:")
    print("Teórica: ", 2.0)
    print("Estimador 1: ", H_est)
    print("Estimador 2: ", H_est2)
コード例 #7
0
ファイル: compare.py プロジェクト: SofiaAlmeida/TFG
def test_entropy(dat, k=3):
    C = dat.C
    X = dat.X

    H_th = mi.entropy_gaussian(C) / np.log(2)
    H_est = mi.entropy(X, k)
    H_est2 = ee.entropy(X, k)

    print("Entropía gaussiana:")
    print("Teórica: ", H_th)
    print("Estimador 1: ", H_est)
    print("Estimador 2: ", H_est2)

    # La entropía estimada debe ser menor que la teórica pero no por mucho
    np.testing.assert_array_less(H_est, H_th)
    np.testing.assert_array_less(H_est2, H_th)
    np.testing.assert_array_less(.9 * H_th, H_est)
    np.testing.assert_array_less(.9 * H_th, H_est2)
コード例 #8
0
ファイル: ttest.py プロジェクト: SofiaAlmeida/TFG
def ttest(N, ds, ns, k, save):
    # DataFrame - MultiIndex para almacenar mediciones
    # Creamos nombres de filas y columnas
    iterables = [ds, ['p', 't']]
    index = pd.MultiIndex.from_product(iterables, names=['ds', 'test-res'])
    iterables2 = [['ent', 'mi'], ns]
    cols = pd.MultiIndex.from_product(iterables2, names=['función', 'ns'])
    # Creamos el DataFrame
    df = pd.DataFrame(index=index, columns=cols)

    for d in ds:
        for n in ns:
            # ENTROPÍA
            # Generamos los datos
            X = [data.Data('random', n, d).X for i in range(0, N)]

            # Calculamos la entropía
            mi_ent = np.array([mi.entropy(x, k=k) for x in X])
            ee_ent = np.array([ee.entropy(x, k=k) for x in X])

            # Calculamos el ttest
            t, p = stats.ttest_rel(mi_ent, ee_ent)

            # Almacenamos los datos
            df.loc[(d, 'p'), ('ent', n)] = p
            df.loc[(d, 't'), ('ent', n)] = t

            # INFORMACIÓN MUTUA
            Y = [data.Data('random', n, d).X for i in range(0, N)]
            mi_mi = np.array(
                [mi.mutual_information((x, y), k=k) for x, y in zip(X, Y)])
            ee_mi = np.array([ee.mi(x, y, k=k) for x, y in zip(X, Y)])

            # Calculamos el ttest
            t, p = stats.ttest_rel(mi_mi, ee_mi)

            # Almacenamos los datos
            df.loc[(d, 'p'), ('mi', n)] = p
            df.loc[(d, 't'), ('mi', n)] = t

            # Imprimimos en un archivo por cada función los resultados hasta el momento
            if (save):
                df.to_pickle("./stats/ttest.pkl")
コード例 #9
0
ファイル: ttest.py プロジェクト: SofiaAlmeida/TFG
def test_normality(N, ds, ns, k):
    for d in ds:
        for n in ns:
            print("ENTROPÍA")
            # Generamos los datos
            X = np.array([data.Data('random', n, d).X for i in range(0, N)])

            # Calculamos la entropía
            mi_ent = np.array([mi.entropy(x, k=k) for x in X])
            ee_ent = np.array([ee.entropy(x, k=k) for x in X])

            # Calculamos las diferencias entre las entropías
            D = np.array([X1 - X2 for X1, X2 in zip(mi_ent, ee_ent)])

            # Realizamos el test de normalidad
            k2, p = stats.normaltest(D)

            print("d: ", d, ", n: ", n)
            if p <= 0.05:  # Hipótesis nula: D proviene de una distribución normal
                print("Podemos rechazar la hipótesis nula")
            else:
                print("No podemos rechazar la hipótesis nula")

            print("INFORMACIÓN MUTUA")
            # Generamos los datos
            Y = [data.Data('random', n, d).X for i in range(0, N)]

            # Calculamos la información mutua
            mi_mi = np.array(
                [mi.mutual_information((x, y), k=k) for x, y in zip(X, Y)])
            ee_mi = np.array([ee.mi(x, y, k=k) for x, y in zip(X, Y)])

            # Calculamos las diferencias entre las implementaciones
            D = np.array([X1 - X2 for X1, X2 in zip(mi_mi, ee_mi)])

            # Realizamos el test de normalidad
            k2, p = stats.normaltest(D)

            print("d: ", d, ", n: ", n)
            if p <= 0.05:  # Hipótesis nula: D proviene de una distribución normal
                print("Podemos rechazar la hipótesis nula")
            else:
                print("No podemos rechazar la hipótesis nula")
コード例 #10
0
ファイル: compare.py プロジェクト: SofiaAlmeida/TFG
def err_entropy(dat, k=3, show=False):
    C = dat.C
    X = dat.X

    # Calculamos las entropías
    H_th = mi.entropy_gaussian(C) / np.log(2)
    H_est = mi.entropy(X, k)
    H_est2 = ee.entropy(X, k)

    if (show):
        print("Entropía gaussiana:")
        print("Teórica: ", H_th)
        print("Estimador 1: ", H_est)
        print("Estimador 2: ", H_est2)

    # Calculamos la diferencia entre la entropía teórica y la estimada
    dif1 = abs(H_th - H_est)
    dif2 = abs(H_th - H_est2)

    return dif1, dif2
コード例 #11
0
 def computeEntropies(self):
     eList = []
     eList2 = []
     for var in self.data.getSeriesNames():
         d = self.data.getSeries(var)
         #print('len = ', len(d))
         #print('p1')
         p = self.mi_prepare(d)
         #print('p2')
         entropy = ee.entropy(p, base=math.e)
         self.entropies[var] = entropy
         dentropy = self.dentropy(var)
         #print("Entropy, dentropy of ", var, "=", entropy, dentropy)
         eList.append((dentropy, var))
         eList2.append((entropy, var))
     eList.sort()
     #print('eList = ', eList)
     eList2.sort()
     #print('eList2 = ', eList2)
     return
コード例 #12
0
ファイル: test_main.py プロジェクト: Palpatineli/npeepy
def test_entropy():
    np.random.seed(12345)
    results = [[ee.entropy(np.random.rand(200, 2), k=j) for j in range(1, 6)]
               for _ in range(200)]
    assert (np.all(np.diff(np.mean(results, axis=0)) > 0))
コード例 #13
0
ファイル: test.py プロジェクト: apengelbrecht/alexeyche-junk
#!/bin/env python
# Testing the NPEET estimators

import entropy_estimators as ee
from math import log, pi
import numpy as np
import numpy.random as nr
import random
from numpy.linalg import det

# Some test cases to see usage and correctness

# Differential entropy estimator
print("For a uniform distribution with width alpha, the differential entropy is log_2 alpha, setting alpha = 2")
print("and using k=1, 2, 3, 4, 5")
print("result:", [ee.entropy([[2 * random.random()] for i in range(1000)], k=j + 1) for j in range(5)])

# CONDITIONAL MUTUAL INFORMATION
Ntry = [10000] #Number of samples to use in estimate
nsamples = 5  # Number of times to est mutual information for CI
samplo = int(0.025 * nsamples)  # confidence intervals
samphi = int(0.975 * nsamples)

# print('\nGaussian random variables\n')
# print('Conditional Mutual Information')
d1 = [1, 1, 0]
d2 = [1, 0, 1]
d3 = [0, 1, 1]
mat = [d1, d2, d3]
tmat = np.transpose(mat)
diag = [[3, 0, 0], [0, 1, 0], [0, 0, 1]]
コード例 #14
0
import entropy_estimators as ee
from math import log, pi
import numpy as np
import numpy.random as nr
import random
from numpy.linalg import det

# Some test cases to see usage and correctness

# Differential entropy estimator
print(
    "For a uniform distribution with width alpha, the differential entropy is log_2 alpha, setting alpha = 2"
)
print("and using k=1, 2, 3, 4, 5")
print("result:", [
    ee.entropy([[2 * random.random()] for i in range(1000)], k=j + 1)
    for j in range(5)
])

# CONDITIONAL MUTUAL INFORMATION
Ntry = [10, 25, 50, 100,
        200]  # , 1000, 2000] #Number of samples to use in estimate
nsamples = 100  # Number of times to est mutual information for CI
samplo = int(0.025 * nsamples)  # confidence intervals
samphi = int(0.975 * nsamples)

print('\nGaussian random variables\n')
print('Conditional Mutual Information')
d1 = [1, 1, 0]
d2 = [1, 0, 1]
d3 = [0, 1, 1]
コード例 #15
0
#python2.7
#Testing the NPEET estimators

import entropy_estimators as ee
from math import log,pi
import numpy as np
import numpy.random as nr
import random
from numpy.linalg import det

#Some test cases to see usage and correctness

## Differential entropy estimator
print "For a uniform distribution with width alpha, the differential entropy is log_2 alpha, setting alpha = 2"
print "and using k=1,2,3,4,5"
print "result:", [ee.entropy([[2*random.random()] for i in range(1000)],k=j+1) for j in range(5)]

## CONDITIONAL MUTUAL INFORMATION
Ntry = [10,25,50,100,200] #,1000,2000] #Number of samples to use in estimate
nsamples = 100 #Number of times to est mutual information for CI
samplo = int(0.025*nsamples) #confidence intervals
samphi = int(0.975*nsamples)

print '\nGaussian random variables\n'
print 'Conditional Mutual Information'
d1 = [1,1,0]
d2 = [1,0,1]
d3 = [0,1,1]
mat = [d1,d2,d3]
tmat = np.transpose(mat)
diag = [[3,0,0],[0,1,0],[0,0,1]]
コード例 #16
0
def calculateEntropy(x, k):
    return ee.entropy(np.array([x]).T, k=k)
コード例 #17
0
ファイル: plot.py プロジェクト: dartmouthrobotics/gds_tools
def logging_entropy(log_path):
    if os.path.isfile(os.path.join(log_path, 'entropy.txt')):
        return
    img = cv2.imread(
        '/home/alberto/catkin_ws/src/pf2d_localizer/worlds/test_environment_similarities/test_environment_similarities.png',
        0)
    with open(
            '/home/alberto/DATA/Dropbox/work/catkin_src/pf2d_localizer/worlds/test_environment_similarities/test_environment_similarities.yaml',
            'r') as stream:
        yaml_data = yaml.load(stream)

        x_min = -img.shape[1] / 2.0 * float(yaml_data['resolution'])
        y_max = img.shape[0] / 2.0 * float(yaml_data['resolution'])

        onlyfiles = [
            f for f in os.listdir(log_path)
            if os.path.isfile(os.path.join(log_path, f)) and "particles" in f
            and "txt" in f
        ]
        onlyfiles = sorted(
            onlyfiles, key=lambda f: int(os.path.splitext(f)[0].split('_')[1]))
        entropies = []
        for f in onlyfiles:
            #print onlyfiles

            particles = numpy.genfromtxt(os.path.join(log_path, f),
                                         unpack=True)

            #img = cv2.imread('/home/alberto/DATA/Dropbox/work/catkin_src/pf2d_localizer/worlds/map_turtlebot/map_turtlebot.pgm', 0)
            """
            total_weight = 0.0
            for w in particles[3]:
                total_weight += w
            for w in particles[3]:
                #print "prev ", w
                w /= total_weight
                #print "aft ", w
            bins = numpy.add.accumulate(particles[3])
            values_indices = numpy.digitize(numpy.random.random_sample(10000), bins)"""
            """for v in values_indices:
                if particles[3][v] == 0:
                    print  particles[3][v]"""
            if consider_angle:
                #particles_pair = [list(p) for p in zip(particles[0]-x_min/float(yaml_data['resolution']), (y_max-particles[1])/float(yaml_data['resolution']), particles[2]/3.14*180+180)] # TODO weight
                particles_pair = [
                    list(p)
                    for p in zip(particles[0], particles[1], particles[2])
                ]  # TODO weight
                #particles_pair = [list([particles[0][v]-x_min/float(yaml_data['resolution']), (y_max-particles[1][v])/float(yaml_data['resolution']), particles[2][v]/3.14*180+180]) for v in values_indices if particles[3][v] > 0.000001] # TODO weight
            else:
                #particles_pair = [list(p) for p in zip(particles[0]-x_min/float(yaml_data['resolution']), (y_max-particles[1])/float(yaml_data['resolution']))]
                particles_pair = [
                    list(p) for p in zip(particles[0], particles[1])
                ]  # TODO weight
                #particles_pair = [list([particles[0][v-1]-x_min/float(yaml_data['resolution']), (y_max-particles[1][v-1])/float(yaml_data['resolution'])]) for v in values_indices if particles[3][v-1] > 0.0001]

            entropies.append(ee.entropy(particles_pair))

        with open(os.path.join(log_path, 'entropy.txt'),
                  'w') as f:  # TODO parameter
            for e in entropies:
                f.write(str(e) + '\n')