Example #1
0
    def __init__(self, mean, var, bounds=None):
        """
        A univariate Gaussian probability distribution object

        Parameters
        ----------
        mean: float
            mean of Gaussian probability distribution
        var: float
            variance of Gaussian probability distribution
        """
        self.mean = mean
        self.var = var
        self.sigma = self.norm_var()
        self.invvar = self.invert_var()
        self.dist = ND(self.mean, self.sigma)
from pomegranate.NaiveBayes import NaiveBayes
from pomegranate.distributions import NormalDistribution

from pomegranate import *
import numpy as np
import matplotlib.pyplot as plt
import seaborn
seaborn.set_style('whitegrid')


# First we'll create the distributions for our model. In this case we'll assume that height, weight, and foot size are normally distributed. We'll fit our distribution to a set of data for males and females.

# In[2]:


male = NormalDistribution.from_samples([182.88  , 180.4416, 170.0784, 180.4416, 185.3184, 177.6984])
female = NormalDistribution.from_samples([152.4   , 167.64  , 165.2016, 175.26  , 157.5816, 152.4   ])


# Let's check on the parameters for our male and female height distributions.

# In[3]:

plt.close("all")

male.plot( n=100000, edgecolor='c', color='c', bins=50, label='Male' )
female.plot( n=100000, edgecolor='g', color='g', bins=50, label='Female' )
plt.legend( fontsize=14 )
plt.ylabel('Count')
plt.xlabel('Height (ft)')
plt.show()
Example #3
0
class gauss(object):
    def __init__(self, mean, var, bounds=None):
        """
        A univariate Gaussian probability distribution object

        Parameters
        ----------
        mean: float
            mean of Gaussian probability distribution
        var: float
            variance of Gaussian probability distribution
        """
        self.mean = mean
        self.var = var
        self.sigma = self.norm_var()
        self.invvar = self.invert_var()
        self.dist = ND(self.mean, self.sigma)

    def norm_var(self):
        """
        Function to create standard deviation from variance
        """
        return np.sqrt(self.var)

    def invert_var(self):
        """
        Function to invert variance
        """
        return 1. / self.var

    def pdf(self, xs):
        return self.evaluate(xs)

    def evaluate_one(self, x):
        """
        Function to evaluate Gaussian probability distribution once

        Parameters
        ----------
        x: float
            value at which to evaluate Gaussian probability distribution

        Returns
        -------
        p: float
            probability associated with x
        """
        # p = 1. / (np.sqrt(2. * np.pi) * self.sigma) * \
        # np.exp(-0.5 * (self.mean - x) * self.invvar * (self.mean - x))
        p = self.dist.probability(x)
        return p

    def evaluate(self, xs):
        """
        Function to evaluate univariate Gaussian probability distribution at multiple points

        Parameters
        ----------
        xs: numpy.ndarray, float
            input values at which to evaluate probability

        Returns
        -------
        ps: ndarray, float
            output probabilities
        """
        # ps = 1. / (np.sqrt(2. * np.pi) * self.sigma) * \
        # np.exp(-0.5 * (self.mean - xs) * self.invvar * (self.mean - xs))
        # ps = np.zeros_like(xs)
        # for n, x in enumerate(xs):
        #     ps[n] += self.evaluate_one(x)
        ps = self.dist.probability(xs)
        return ps

    def sample_one(self):
        """
        Function to take one sample from univariate Gaussian probability distribution

        Returns
        -------
        x: float
            single sample from Gaussian probability distribution
        """
        # x = self.mean + self.sigma * np.random.normal()
        x = self.dist.sample(1)
        return x

    def sample(self, n_samps):
        """
        Function to sample univariate Gaussian probability distribution

        Parameters
        ----------
        n_samps: positive int
            number of samples to take

        Returns
        -------
        xs: ndarray, float
            array of n_samps samples from Gaussian probability distribution
        """
        # print('gauss trying to sample '+str(n_samps)+' from '+str(self.dist))
        # xs = np.array([self.sample_one() for n in range(n_samps)])
        xs = np.array(self.dist.sample(n_samps))
        # print('gauss sampled '+str(n_samps)+' from '+str(self.dist))
        return xs
def default_gaussian_noise(n):
    return IndependentComponentsDistribution(
        [NormalDistribution(0, np.random.rand()) for _ in range(n)])
import random

from pomegranate.base import State
from pomegranate.distributions import UniformDistribution, NormalDistribution
from pomegranate.hmm import HiddenMarkovModel

from pomegranate import *

random.seed(0)

# First we will create the states of the model, one uniform and one normal.

# In[2]:

state1 = State(UniformDistribution(0.0, 1.0), name="uniform")
state2 = State(NormalDistribution(0, 2), name="normal")

# We will then create the model by creating a HiddenMarkovModel instance. Then we will add the states.

# In[3]:

model = HiddenMarkovModel(name="ExampleModel")
model.add_state(state1)
model.add_state(state2)

# Now we'll add the start states to the model.

# In[4]:

model.add_transition(model.start, state1, 0.5)
model.add_transition(model.start, state2, 0.5)
Example #6
0
def default_noise(n):
    return IndependentComponentsDistribution(
        [NormalDistribution(0, 0.1) for _ in range(n)])
# This example shows how to use yahmm to sample from an infinite HMM. The premise is that you have an HMM which does not have transitions to the end state, and so can continue on forever. This is done by not adding transitions to the end state. If you bake a model with no transitions to the end state, you get an infinite model, with no extra work! This change is passed on to all the algorithms.

# In[1]:
from pomegranate.base import State
from pomegranate.distributions import NormalDistribution
from pomegranate.hmm import HiddenMarkovModel

from pomegranate import *
import itertools as it
import numpy as np

# First we define the possible states in the model. In this case we make them all have normal distributions.

# In[2]:

s1 = State(NormalDistribution(5, 2), name="S1")
s2 = State(NormalDistribution(15, 2), name="S2")
s3 = State(NormalDistribution(25, 2), name="S3")

# We then create the HMM object, naming it, logically, "infinite".

# In[3]:

model = HiddenMarkovModel("infinite")

# We then add the possible transition, making sure not to add an end state. Thus with no end state, the model is infinite!

# In[4]:

model.add_transition(model.start, s1, 0.7)
model.add_transition(model.start, s2, 0.2)