Exemple #1
0
 def setup(self):
     #prepare unit test. Load data etc
     print("setting up " + __name__)
     self.td = atleast_2d(arange(0, 10, 1)).T
     self.tw = ones(10)
     self.surprise = Surprise()
     self.tdt1 = ([0.], [[1.]])
     self.tdt2 = ([1.], [[0.5]])
     self.tdt3 = ([1.], [[2.]])
Exemple #2
0
class TestSurprise(object):
    def setup(self):
        #prepare unit test. Load data etc
        print("setting up " + __name__)
        self.td = atleast_2d(arange(0, 10, 1)).T
        self.tw = ones(10)
        self.surprise = Surprise()
        self.tdt1 = ([0.], [[1.]])
        self.tdt2 = ([1.], [[0.5]])
        self.tdt3 = ([1.], [[2.]])

    def test_weightedLoad(self):
        res = self.surprise.load(self.td, self.tw)
        assert allclose(res.mean, matrix(self.td.mean(0)))
        assert allclose(res.cov, matrix(cov(self.td.T)))
        assert allclose(res.icov, matrix(cov(self.td.T)).I)

    def test_load(self):
        res = self.surprise.load(self.td, None)
        assert allclose(res[0], matrix(self.td.mean(0)))
        assert allclose(res[1], matrix(cov(self.td.T)))
        assert allclose(res[2], matrix(cov(self.td.T)).I)

    def test_getExpectedRelEnt(self):
        mode = ['add', 'replace', 'partial']
        D = .5 + log(2)
        ere = [log(2), log(2) + 1, log(2) + .5]
        S = [.5, -.5, 0]
        sD = [.5, 1.5, sqrt(255.) / 16.]
        lams = [.5, 1.5, 15. / 16.]
        dmus = [1., 1., 15. / 16.]
        # numerical values from R
        p = [0.1573089, 0.4142238, 0.3173187]
        for i, m in enumerate(mode):
            res = self.surprise(self.tdt1,
                                self.tdt2,
                                m,
                                self.tdt3,
                                getChi2=True,
                                bits=False)
            assert allclose(D, res[0] * 2.)
            assert allclose(ere[i], res[1] * 2.)
            assert allclose(S[i], res[2] * 2.)
            assert allclose(sD[i], res[3] * sqrt(2.))
            assert allclose(lams[i], res[5])
            assert allclose(dmus[i], res[6])
            if self.surprise.rpy2:
                assert allclose(p[i], res[4])
            else:
                assert res[4] is None

    def teardown(self):
        #tidy up
        print("tearing down " + __name__)
        pass
Exemple #3
0
class TestSurprise(object):

    def setup(self):
        #prepare unit test. Load data etc
        print("setting up " + __name__)
        self.td=atleast_2d(arange(0,10,1)).T
        self.tw=ones(10)
        self.surprise=Surprise()
        self.tdt1=([0.],[[1.]])
        self.tdt2=([1.],[[0.5]])
        self.tdt3=([1.],[[2.]])

    def test_weightedLoad(self):
        res = self.surprise.load(self.td, self.tw)
        assert allclose(res.mean, matrix(self.td.mean(0)))
        assert allclose(res.cov, matrix(cov(self.td.T)))
        assert allclose(res.icov, matrix(cov(self.td.T)).I)
        
    def test_load(self):
        res = self.surprise.load(self.td, None)
        assert allclose(res[0], matrix(self.td.mean(0)))
        assert allclose(res[1], matrix(cov(self.td.T)))
        assert allclose(res[2], matrix(cov(self.td.T)).I)

    def test_getExpectedRelEnt(self):
        mode = ['add', 'replace', 'partial']
        D = .5 + log(2)
        ere = [log(2), log(2) + 1, log(2) + .5]
        S = [.5, -.5, 0]
        sD = [.5, 1.5, sqrt(255.)/16.]
        lams = [.5, 1.5, 15./16.]
        dmus = [1., 1., 15./16.]
        # numerical values from R
        p = [0.1573089, 0.4142238, 0.3173187]
        for i, m in enumerate(mode):
            res = self.surprise(self.tdt1, self.tdt2, m, self.tdt3,
                                getChi2 = True, bits = False)
            assert allclose(D, res[0]*2.)
            assert allclose(ere[i], res[1]*2.)
            assert allclose(S[i], res[2]*2.)
            assert allclose(sD[i], res[3]*sqrt(2.))
            assert allclose(lams[i], res[5])
            assert allclose(dmus[i], res[6])
            if self.surprise.rpy2:
                assert allclose(p[i], res[4])
            else:
                assert res[4] is None
        
    def teardown(self):
        #tidy up
        print("tearing down " + __name__)
        pass
Exemple #4
0
 def setup(self):
     #prepare unit test. Load data etc
     print("setting up " + __name__)
     self.td=atleast_2d(arange(0,10,1)).T
     self.tw=ones(10)
     self.surprise=Surprise()
     self.tdt1=([0.],[[1.]])
     self.tdt2=([1.],[[0.5]])
     self.tdt3=([1.],[[2.]])
Exemple #5
0
from player import Player
import Certificates
from dice import Dice
import board
import numpy as np
from colorama import init, Fore, Back, Style
import sys
import time
import datetime

ROLLING = "-- You rolled a %s --"
START = "--- Welcome to The Package Arrived game.--- \nYou are positioned at (%s,%s)"

### Game Objects ###
board_game = board.Board().transition_dict
surprise_generator = Surprise()


def print_plan(plan, logs):
    """
    Prints the plan to the screen and to the log file
    :param plan: the plan
    :param logs: the log file
    """
    for a in plan:
        if type(a) != str:
            a = a.name
        print(a)
        write_to_log(a, logs)

Exemple #6
0
#!/usr/bin/env python
"""
Uses surprise on a toy example.
"""

from surprise import Surprise
from numpy.random.mtrand import randn, randint
from matplotlib.mlab import entropy

sc = Surprise()

# Create two samples from a standard normal distribution
# Reshape into (# of samples, # of dimensions) array
n = 100
sample1 = randn(n).reshape(-1, 1)
sample2 = randn(n).reshape(-1, 1)

# Mode is 'replace', i.e. we are assuming that the two distributions
# are separately analysed posteriors.
mode = 'replace'

# Calculate entropy numbers with surprise
rel_ent, exp_rel_ent, S, sD, p = sc(sample1, sample2, mode=mode)

print('Entropy estimates for two standard normal distributions.')
print('Relative entropy D: %f' % rel_ent)
print('Expected relative entropy <D>: %f' % exp_rel_ent)
print('Surprise S: %f' % S)
print('Expected fluctuations of relative entropy sigma(D): %f' % sD)
print('p-value of Surprise: %f' % p)
def IterativeEntropy(pri_data, post_data, iterations, mode='add'):
    """
    Algorithm used to iteratively compute the relative entropy based on
    gaussianisation of both the prior and the posterior distribution.
    
    Args:
        pri_data (ndarray): Data array of shape (n, m) where n is #variats and 
                           m is #observations
        post_data (ndarray): Data array of shape (n, m) where n is #variats
                            and m is #observations
        iterations (int): Number of iterations
        mode (str): relation between dist1 and dist2; either
                    * 'add' if dist1 is the prior of dist2
                    * 'replace' if dist1 and dist2 are independently derived 
                       posteriors and the prior is much wider than the constraints
                    Default: 'add'
    Returns:
        prior (ndarray): Resulting data from iterative Box-Cox transformation
        posterior (ndarray): Resulting data from iterative Box-Cox transformation
        cach_rel_ent (list): Entropy computed after each Box-Cox transformation
        cach_exp_ent (list): Expected Entropy computed after each Box-Cox transformation
        cach_S (list): Surprise computed after each Box-Cox transformation
        cach_sD (list): Standard deviation computed after each Box-Cox transformation
        BoxCoxError (bool): True if Box-Cox failed
    """
    if not isinstance(mode, str) and mode in ["add", "replace"]:
        raise Exception(
            "Invalid kind value %s. Allowed format:"
            "'add' or 'replace'", mode)

    sc = Surprise()

    cach_rel_ent = []
    cach_exp_ent = []
    cach_S = []
    cach_sD = []

    BoxCoxError = False
    for steps in range(iterations):
        if steps == 0:
            prior, posterior, dim = standardise_data(pri_data.T,
                                                     post_data.T,
                                                     return_dim=True)
            prior = prior.T
            posterior = posterior.T

        # Transform to positive parameter values
        for k in range(dim):
            prior_a = np.amin(prior[k, :])
            post_a = np.amin(posterior[k, :])
            if prior_a < post_a and prior_a < 0:
                prior[k, :] -= prior_a
                posterior[k, :] -= prior_a
            elif post_a < 0:
                prior[k, :] -= post_a
                posterior[k, :] -= post_a
            prior_a = np.amin(prior[k, :])
            post_a = np.amin(posterior[k, :])
            while prior_a <= 0 or post_a <= 0:
                prior[k, :] += 5.0E-6
                posterior[k, :] += 5.0E-6
                prior_a = np.amin(prior[k, :])
                post_a = np.amin(posterior[k, :])

            # Find optimal one-parameter Box_Cox transformation
            try:
                lmbda = boxcox_normmax(prior[k, :],
                                       brack=(-1.9, 2.0),
                                       method='mle')
                box_cox_prior = boxcox(prior[k, :], lmbda=lmbda)
                prior[k, :] = box_cox_prior

                box_cox_post = boxcox(posterior[k, :], lmbda=lmbda)
                posterior[k, :] = box_cox_post
            except RuntimeWarning:
                logger.warning(
                    f"Box Cox transformation failed during step {steps}")
                BoxCoxError = True
                break

        if BoxCoxError:
            break
        prior_mu = np.mean(prior, axis=1)
        prior_std = np.std(prior, axis=1)

        # Standardize data
        prior = ((prior.T - prior_mu) / prior_std).T
        posterior = ((posterior.T - prior_mu) / prior_std).T

        if dim > 1:
            # Rotate data into eigenbasis of the covar matrix
            pri_cov = np.cov(prior)

            # Compute eigenvalues
            eVa, eVe = np.linalg.eig(pri_cov)

            # Compute transformation matrix from eigen decomposition
            R, S = eVe, np.diag(np.sqrt(eVa))
            T = np.matmul(R, S).T

            # Transform data with inverse transformation matrix T^-1
            try:
                inv_T = np.linalg.inv(T)
                prior = np.matmul(prior.T, inv_T).T
                posterior = np.matmul(posterior.T, inv_T).T
            except np.linalg.LinAlgError:
                logger.warning(
                    f"Singular matrix, inversion failed! Setting all output values for step {steps} to None"
                )
                cach_rel_ent.append(None)
                cach_exp_ent.append(None)
                cach_S.append(None)
                cach_sD.append(None)
                break
        # Compute D, <D>, S and sigma(D)
        try:
            rel_ent, exp_rel_ent, S, sD, p = sc(prior.T,
                                                posterior.T,
                                                mode=mode)
        except:
            logger.warning(
                f"Suprise() failed to compute the entropy values. Setting all output values for step {steps} to None"
            )
            rel_ent = None
            exp_rel_ent = None
            S = None
            sD = None

        cach_rel_ent.append(rel_ent)
        cach_exp_ent.append(exp_rel_ent)
        cach_S.append(S)
        cach_sD.append(sD)
        convergence_flag = 0
        """
         Very empirical convergence criterions. Idee is, that the true entropy value of the probe is either found after
         very vew transformations 1-3. First few transformations do not alter the computed entropy value by much, later
         on the transformations push the computed entropy away from the true value. Or the probe gets truly gaussianised
         by the transformation and the computed entropy value slowly converges to the true value i.e after 10+
         transformations.
        """
        if 6 > steps >= 1 and not None in cach_rel_ent[-2:]:
            if cach_rel_ent[-1] > cach_rel_ent[-2] and abs(
                    cach_rel_ent[-1] -
                    cach_rel_ent[-2]) / cach_rel_ent[-1] < 0.035:
                convergence_flag = 2
        elif steps == 2 and not None in cach_rel_ent[-2:]:
            if abs(cach_rel_ent[-1] -
                   cach_rel_ent[-2]) / cach_rel_ent[-1] < 0.001:
                convergence_flag += 1
            if abs(cach_rel_ent[-2] -
                   cach_rel_ent[-3]) / cach_rel_ent[-1] < 0.001:
                convergence_flag += 1
        elif steps == 3 and not None in cach_rel_ent[-3:]:
            if abs(cach_rel_ent[-1] -
                   cach_rel_ent[-2]) / cach_rel_ent[-1] < 0.002:
                convergence_flag += 1
            if abs(cach_rel_ent[-1] -
                   cach_rel_ent[-3]) / cach_rel_ent[-1] < 0.008:
                convergence_flag += 1
        if steps > 3 and not None in cach_rel_ent[-4:]:
            if abs(cach_rel_ent[-1] -
                   cach_rel_ent[-2]) / cach_rel_ent[-1] < 0.0002:
                convergence_flag += 1
            if abs(cach_rel_ent[-1] -
                   cach_rel_ent[-3]) / cach_rel_ent[-1] < 0.0005:
                convergence_flag += 1
            if abs(cach_rel_ent[-1] -
                   cach_rel_ent[-4]) / cach_rel_ent[-1] < 0.0008:
                convergence_flag += 1
        if convergence_flag >= 2:
            logger.info(f"Convergence reached at step {steps}")
            break

    return prior, posterior, cach_rel_ent[-1], cach_exp_ent[-1], cach_S[
        -1], cach_sD[-1], BoxCoxError