def calcColNormalizer(inMatrix):
    #Theano function for calculating logSum, i.e., calculate ln(X + Y) based on ln(X) and ln(Y).
    maxExp = -4950.0 
    x, y = T.fscalars(2)
    
    yMinusx = y - x  ## this part is for the condition which x > y
    xMinusy = x - y  # if x < y
    bigger = T.switch(T.gt(x, y), x, y)
    YSubtractX = T.switch(T.gt(x,y), yMinusx, xMinusy)       
    x_prime =  T.log(1 + T.exp(YSubtractX)) + bigger
    calcSum = T.switch(T.lt(YSubtractX, maxExp), bigger, x_prime)
    logSum = function([x, y], calcSum, allow_input_downcast=True)
    ####### end of logSum  ###############
    
    # now we  caclculate sum of log joint as normalizer 
    if len(inMatrix.shape) < 2:
        raise Exception ("calcColNormalizer expect a 2D matrix")
    nRows, nCols = inMatrix.shape        
    columnAccumLogSum = np.zeros(nCols)        
    for col in range(nCols):
        currLogSum = np.NINF
        for j in range(nRows):
            if inMatrix[j,col] == np.NINF:
                continue
            currLogSum = logSum(currLogSum, inMatrix[j,col])             
        columnAccumLogSum[col] = currLogSum
        
    return columnAccumLogSum
Exemple #2
0
def advance(u, u_1, u_2, f_a, Cx2, Cy2, dt2, V=None, step1=False):
    u_in, u_1_in, u_2_in = T.fmatrices('u_in','u_1_in','u_2_in')
    f_a_in, V_in = T.fmatrices('f_in','V_in')
    step1_in = T.lscalar('step1_in')
    Cx2_in, Cy2_in, dt2_in = T.fscalars('Cx2_in','Cy2_in','dt2_in')

    u_out = T.fmatrix('u_out')

    if V is None:
        V = np.zeros_like(f_a)
        
    step_f = theano.function([u_in, u_1_in, u_2_in, f_a_in, Cx2_in, Cy2_in, dt2_in, V_in, step1_in], u_out, step, on_unused_input='ignore')
    u_out = step_f(u, u_1, u_2, f_a, Cx2, Cy2, dt2, V, step1)

    return u_out
Exemple #3
0
import sys, os, math
import NamedMatrix
import scipy.special as ss
from utilTCI import *

###############################################################################################
"""
 The following block define the Theano functions that going to be used in the regular python funcitons
 """
"""
1.  Calculate ln(X + Y) based on ln(X) and ln(Y) using theano library

"""
########### Theano function for calculating logSum
maxExp = -4950.0 
x, y = T.fscalars(2)

yMinusx = y - x  ## this part is for the condition which x > y
xMinusy = x - y  # if x < y
bigger = T.switch(T.gt(x, y), x, y)
YSubtractX = T.switch(T.gt(x,y), yMinusx, xMinusy)       
x_prime =  T.log(1 + T.exp(YSubtractX)) + bigger
calcSum = T.switch(T.lt(YSubtractX, maxExp), bigger, x_prime)
logSum = function([x, y], calcSum, allow_input_downcast=True)


####### end of logSum  ###############

def calcTDI (mutcnaMatrixFN, degMatrixFN, alphaNull = [1, 1], alphaIJKList = [2, 1, 1, 2], 
              v0=0.3,   outputPath = ".", opFlag = None, rowBegin=0, rowEnd = None, GeGlobalDriverDict = None):
    """ 
Exemple #4
0
# Multiple outputs
a, b = T.fmatrices('a', 'b')
diff = a - b
absDiff = abs(diff)
sqrDiff = diff**2

f = function([a, b], [diff, absDiff, sqrDiff])

mat2 = [[10, 5], [5, 10]]
mat3 = [[5, 10], [10, 5]]

print(f(mat2, mat3))

# Default values

x, y = T.fscalars('x', 'y')
z = x + y

f = function([x, In(y, value=0)], z)

print(f(20))
print(f(20, 10))

# Shared variables

state = shared(0)
inc = T.iscalar('inc')

accumulator = function([inc], state, updates=[(state, state + inc)])

print("state : ", state.get_value())
Exemple #5
0
# Multiple outputs
a, b = T.fmatrices('a', 'b')
diff = a - b
absDiff = abs(diff)
sqrDiff = diff ** 2

f = function([a, b], [diff, absDiff, sqrDiff])

mat2 = [[10, 5], [5, 10]]
mat3 = [[5, 10], [10, 5]]

print(f(mat2, mat3))

# Default values

x, y = T.fscalars('x', 'y')
z = x + y

f = function([x, In(y, value=0)], z)

print(f(20))
print(f(20, 10))

# Shared variables

state = shared(0)
inc = T.iscalar('inc')

accumulator = function([inc], state, updates=[(state, state + inc)])

print("state : ", state.get_value())
Exemple #6
0
x = T.fvector('x')
x1 = T.fscalar('x1')
y = 1/(1 + T.exp(-x))
y1 = 1/(1 + T.exp(-x1))
logistic = function([x], y)
logistic1 = function([x1], y1)
grady = T.grad(y1, x1)
derivada = function([x1], grady)
 
a = float(input('Introduce el extremo izqdo. \n'))
b = float(input('Introduce el extremo drcho. \n'))
particion = float(input('Introduce la longitud de particion del intervalo. \n'))
pderiv = float(input('Introduce el punto donde hallar su recta tangente. \n'))
 
xval = arange(a,b,particion, dtype='float32')
z,w,w1=T.fscalars('z', 'w', 'w1')
rectatg2 = (x-z)*w+w1
rectatg3 = function([x, Param(z, default=pderiv), Param(w, default=derivada(pderiv)), Param(w1, default=logistic1(pderiv))], rectatg2)
 
figure(1)
 
plot(xval, logistic(xval), linewidth=1.5, color='r')
plot(xval, rectatg3(xval), linewidth=1.0, color='g')
ylim([0,1])
 
xlabel(r'\textbf{Abcisa}', fontsize=12)
ylabel(r'\textit{Ordenada}',fontsize=12)
title(r"Funcion logistica f(x) = $\displaystyle\frac{1}{1+e^{-x}}$", fontsize=12, color='r')
legend(('Funcion Logistica', 'Recta Tangente'),'upper left', shadow=True, fancybox=True)
 
leg = gca().get_legend()
    def __init__(self, inf=1e37):

        pos, vel = T.fmatrices(['pos', 'vel'])
        nc, N, n_steps = T.iscalars(['nc', 'N', 'n_steps'])
        ra, rb, re, r0 = T.fscalars(['ra', 'rb', 're', 'r0'])
        v0, j, b = T.fscalars(['v0', 'J', 'b'])

        nu = trng.uniform(size=(N, 2), low=0.0, high=3.14159, dtype='floatX')

        def distance_tensor(X):
            E = X.reshape((X.shape[0], 1, -1)) - X.reshape((1, X.shape[0], -1))
            D = T.sqrt(T.sum(T.square(E), axis=2))
            return D

        def direction_tensor(X):
            E = X.reshape((X.shape[0], 1, -1)) - X.reshape((1, X.shape[0], -1))
            L = T.sqrt(T.sum(T.square(E), axis=2))
            L = T.pow(L + T.identity_like(L), -1)
            L = T.stack([L, L, L], axis=2)
            return L * E

        def neighbourhood(X):
            D = distance_tensor(X)
            N = T.argsort(D, axis=0)
            mask = T.cast(T.lt(N, nc), 'float32')
            return N[1:nc + 1], mask

        def alignment(X, Y):
            n, d = neighbourhood(X)
            return T.sum(Y[n], axis=0)

        def cohesion(X, inf=100.0):
            D = distance_tensor(X)
            E = direction_tensor(X)
            n, d = neighbourhood(X)

            F = T.zeros_like(E)
            D = T.stack([D, D, D], axis=2)
            d = T.stack([d, d, d], axis=2)

            c1 = T.lt(D, rb)
            c2 = T.and_(T.gt(D, rb), T.lt(D, ra))
            c3 = T.and_(T.gt(D, ra), T.lt(D, r0))

            F = T.set_subtensor(F[c1], -E[c1])
            F = T.set_subtensor(F[c2], 0.25 * (D[c2] - re) / (ra - re) * E[c2])
            F = T.set_subtensor(F[c3], E[c3])

            return T.sum(d * F, axis=0)

        def perturbation(nu=nu):
            phi = nu[:, 0]
            theta = 2.0 * nu[:, 1]

            return T.stack([
                T.sin(theta) * T.sin(phi),
                T.cos(theta) * T.sin(phi),
                T.cos(phi)
            ],
                           axis=1)

        def step(X, dX):
            X_ = X + dX
            V_ = j * nc / v0 * (alignment(
                X, dX)) + b * (cohesion(X)) + nc * (perturbation())
            dV = T.sqrt(T.sum(T.square(V_), axis=1)).reshape(V_.shape[0], 1)
            dV = T.stack([dV, dV, dV], axis=1)
            V = v0 * V_ / dV

            return T.cast(X_, 'float32'), T.cast(V, 'float32')

        def probability(X, Y):
            n, d = neighbourhood(X)
            vDv = T.batched_dot(Y[n].swapaxes(0, 1), Y)
            p = T.exp((j / 2.0) * T.sum(vDv, axis=1))

            return p / T.sum(p)

        sim, update = theano.scan(step,
                                  outputs_info=[pos, vel],
                                  n_steps=n_steps)

        pos_, vel_ = sim

        mean_final_velocity = 1 / (N * v0) * T.sqrt(
            T.sum(T.square(T.sum(vel_[-1], axis=0))))

        particle_probability = probability(pos_[-1], vel_[-1])

        self.f = theano.function(
            [pos, vel, nc, ra, rb, r0, re, j, v0, b, N, n_steps], [pos_, vel_],
            allow_input_downcast=True)

        self.g = theano.function(
            [pos, vel, nc, ra, rb, r0, re, j, v0, b, N, n_steps],
            mean_final_velocity,
            allow_input_downcast=True)

        self.h = theano.function(
            [pos, vel, nc, ra, rb, r0, re, j, v0, b, N, n_steps],
            particle_probability,
            allow_input_downcast=True)
import theano
import theano.tensor as T
import numpy as np
from scipy.stats import norm
from theano import function
import _pickle as cPickle

# dictionary with the distributions
data = cPickle.load(
    open('C:\\Users\\user\\Desktop\\Master-Project-master\\save.pkl', 'rb'))
dict = {}
words = ['ich', 'das', 'ist', 'du', 'die']

# TensorVariables for mi, mj, si, sj respectivelly.
a, b = T.fscalars('a', 'b')  # mu
c, d = T.fscalars('c', 'd')  # sigma

# Energy as a TensorVariable
E = 0.5 * (d / c + (a - b)**2 / c - 1 - T.log(d / c))
enrg = function([a, b, c, d], E)


def KLdivergence(data):

    dictKLword = []
    distList = []

    for i in range(len(data)):
        mu, std = norm.fit(data[i])
        var = np.power(std, 2)
        distList.append([mu, var])
Exemple #9
0
x = T.fvector("x")
x1 = T.fscalar("x1")
y = 1/(1 + T.exp(-x))
y1 = 1/(1 + T.exp(-x1))
logistic = function([x], y)
logistic1 = function([x1], y1)
grady = T.grad(y1, x1)
derivada = function([x1], grady)

a = float(input("Introduce el extremo izqdo. \n"))
b = float(input("Introduce el extremo drcho. \n"))
particion = float(input("Introduce la longitud de particion del intervalo. \n"))
pderiv = float(input("Introduce el punto donde hallar su recta tangente. \n"))

xval = arange(a,b,particion, dtype="float32")
z,w,w1=T.fscalars("z", "w’, ‘w1′)
rectatg2 = (x-z)*w+w1
rectatg3 = function([x, Param(z, default=pderiv), Param(w, default=derivada(pderiv)), Param(w1, default=logistic1(pderiv))], rectatg2)

figure(1)

plot(xval, logistic(xval), linewidth=1.5, color=’r")
plot(xval, rectatg3(xval), linewidth=1.0, color=’g")
ylim([0,1])

xlabel(r’\textbf{Abcisa}’, fontsize=12)
ylabel(r’\textit{Ordenada}’,fontsize=12)
title(r”Funcion logistica f(x) = $\displaystyle\frac{1}{1+e^{-x}}$”, fontsize=12, color=’r")
legend((‘Funcion Logistica’, ‘Recta Tangente’),’upper left’, shadow=True, fancybox=True)

leg = gca().get_legend()
Exemple #10
0
    def __init__(self,
                 N=None,
                 size=1,
                 mu0=0.1,
                 sigma_mean0=10,
                 sigma_std0=1.0,
                 sigma_min=0.1,
                 sigma_max=10,
                 data=None):

        self.N = N
        self.K = size

        # Parameter initialization
        #random init
        if data is None:

            # mu = random normal with std mu0,mean 0
            self.mu = mu0 * np.random.randn(self.N, self.K).astype(DTYPE)

            # Sigma = random normal with mean sigma_mean0, std sigma_std0, and min/max of sigma_min, sigma_max
            self.Sigma = np.random.randn(self.N, 1).astype(DTYPE)
            self.Sigma *= sigma_std0
            self.Sigma += sigma_mean0
            self.Sigma = np.maximum(sigma_min,
                                    np.minimum(self.Sigma, sigma_max))
            self.Gaussian = np.concatenate((self.mu, self.Sigma), axis=1)

            # TensorVariables for mi, mj, si, sj respectivelly.
            a, b = T.fvectors('a', 'b')
            c, d = T.fscalars('c', 'd')

            # Energy as a TensorVariable
            E = -0.5 * (self.K * d / c + T.sum(
                (a - b)**2 / c) - self.K - self.K * T.log(d / c))
            self.enrg = function([a, b, c, d], E)

            g1 = T.grad(E, a)  # dE/dmi
            self.f1 = function([a, b, c, d], g1)

            g2 = T.grad(E, b)  # dE/dmj
            self.f2 = function([a, b, c, d], g2)

            g3 = T.grad(E, c)  # dE/dsi
            self.f3 = function([a, b, c, d], g3)

            g4 = T.grad(E, d)  # dE/dsj
            self.f4 = function([a, b, c, d], g4)

        #non random init
        else:
            self.mu = []
            self.Sigma = []

            for i in range(len(data)):
                mu, std = norm.fit(data[i])
                var = np.power(std, 2)
                self.mu.append(mu)
                self.Sigma.append(var)

            self.Gaussian = np.concatenate(
                (np.asarray(self.mu), np.asarray(self.Sigma)), axis=1)
            self.Gaussian = np.reshape(self.Gaussian, (2, N)).T