コード例 #1
0
def createSignalModelExponential(data):
    """
    Toy model that treats the first ~10% of the waveform as an exponential.  Does a good job of finding the start time (t_0)
    Since I made this as a toy, its super brittle.  Waveform must be normalized
  """
    print "Creating model"
    switchpoint = DiscreteUniform('switchpoint', lower=0, upper=len(data))

    noise_sigma = HalfNormal('noise_sigma', tau=sigToTau(.01))
    exp_sigma = HalfNormal('exp_sigma', tau=sigToTau(.05))

    #Modeling these parameters this way is why wf needs to be normalized
    exp_rate = Uniform('exp_rate', lower=0, upper=.1)
    exp_scale = Uniform('exp_scale', lower=0, upper=.1)

    timestamp = np.arange(0, len(data), dtype=np.float)

    @deterministic(plot=False, name="test")
    def uncertainty_model(s=switchpoint, n=noise_sigma, e=exp_sigma):
        ''' Concatenate Poisson means '''
        out = np.empty(len(data))
        out[:s] = n
        out[s:] = e
        return out

    @deterministic
    def tau(eps=uncertainty_model):
        return np.power(eps, -2)

##  @deterministic(plot=False, name="test2")
##  def adjusted_scale(s=switchpoint, s1=exp_scale):
##    out = np.empty(len(data))
##    out[:s] = s1
##    out[s:] = s1
##    return out
#
#  scale_param = adjusted_scale(switchpoint, exp_scale)

    @deterministic(plot=False)
    def baseline_model(s=switchpoint, r=exp_rate, scale=exp_scale):
        out = np.zeros(len(data))
        out[s:] = scale * (np.exp(r * (timestamp[s:] - s)) - 1.)

        #    plt.figure(fig.number)
        #    plt.clf()
        #    plt.plot(out ,color="blue" )
        #    plt.plot(data ,color="red" )
        #    value = raw_input('  --> Press q to quit, any other key to continue\n')

        return out

    baseline_observed = Normal("baseline_observed",
                               mu=baseline_model,
                               tau=tau,
                               value=data,
                               observed=True)
    return locals()
コード例 #2
0
def MakeModel(sig1, max_hermit_level=7):
    '''Create P wave delineation model for MCMC.'''
    # Load ECG segment array
    sig1 = np.array(sig1, dtype=np.float32)

    # Length of the ECG segment
    len_sig1 = sig1.size

    common_length = len_sig1

    hermit_coefs = list()
    # Dc baseline
    # coef = pymc.Normal('hc0', mu = 0, tau = 0.003)
    coef = DiscreteUniform('hc0', lower=-0.05, upper=0.05, doc='hc0')
    hermit_coefs.append(coef)
    # level 1
    ind = 1
    hermit_coefs.append(pymc.Normal('hc%d' % ind, mu=0, tau=1))
    # level 2
    ind = 2
    hermit_coefs.append(pymc.Normal('hc%d' % ind, mu=0, tau=1))
    for ind in xrange(3, HermitFunction_max_level):
        coef = pymc.Normal('hc%d' % ind, mu=0, tau=1)
        hermit_coefs.append(coef)

    @deterministic(plot=False)
    def wave_diff(hermit_coefs=hermit_coefs, ):
        ''' Concatenate wave.'''

        out = sig1[:common_length]
        fitting_curve = np.zeros(common_length, )
        for level, coef in zip(xrange(0, max_hermit_level), hermit_coefs):
            fitting_curve += HermitFunction(level, int(common_length)) * coef

        return out - fitting_curve

    diff_sig = pymc.Normal('diff_sig',
                           mu=wave_diff,
                           tau=17,
                           value=[
                               0,
                           ] * common_length,
                           observed=True)

    return locals()
コード例 #3
0
def createSignalModel(data):
    #set up your model parameters
    switchpoint = DiscreteUniform('switchpoint', lower=0, upper=len(data))
    early_sigma = HalfNormal('early_sigma', tau=sigToTau(1))
    late_sigma = HalfNormal('late_sigma', tau=sigToTau(1))
    early_mu = Normal('early_mu', mu=.5, tau=sigToTau(1))
    late_mu = Normal('late_mu', mu=.5, tau=sigToTau(1))

    #set up the model for uncertainty (ie, the noise) and the signal (ie, the step function)

    ############################
    @deterministic(plot=False, name="test")
    def uncertainty_model(s=switchpoint, n=early_sigma, e=late_sigma):
        #Concatenate Uncertainty sigmas (or taus or whatever) around t0
        s = np.around(s)
        out = np.empty(len(data))
        out[:s] = n
        out[s:] = e
        return out

    ############################
    @deterministic
    def tau(eps=uncertainty_model):
        #pymc uses this tau parameter instead of sigma to model a gaussian.  its annoying.
        return np.power(eps, -2)

    ############################
    @deterministic(plot=False, name="siggenmodel")
    def signal_model(s=switchpoint, e=early_mu, l=late_mu):
        #makes the step function using the means
        out = np.zeros(len(data))
        out[:s] = e
        out[s:] = l
        return out

    ############################

    #Full model: normally distributed noise around a step function
    baseline_observed = Normal("baseline_observed",
                               mu=signal_model,
                               tau=tau,
                               value=data,
                               observed=True)
    return locals()
コード例 #4
0
from pymc import DiscreteUniform, Exponential, deterministic, Poisson, Uniform, Lambda, MCMC, observed, poisson_like
from pymc.distributions import Impute
from numpy.ma import masked_array
import numpy as np

# Missing values indicated by -999 placeholder values
disasters_array = np.array([
    4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6, 3, 3, 5, 4, 5, 3, 1, 4, 4,
    1, 5, 5, 3, 4, 2, 5, 2, 2, 3, 4, 2, 1, 3, -999, 2, 1, 1, 1, 1, 3, 0, 0, 1,
    0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 2, 1, 0,
    0, 0, 1, 1, 0, 2, 3, 3, 1, -999, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4, 0, 0,
    0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1
])

# Switchpoint
switch = DiscreteUniform('switch', lower=0, upper=110)
# Early mean
early_mean = Exponential('early_mean', beta=1)
# Late mean
late_mean = Exponential('late_mean', beta=1)


@deterministic(plot=False)
def rates(s=switch, e=early_mean, l=late_mean):
    """Allocate appropriate mean to time series"""
    out = np.empty(len(disasters_array))
    # Early mean prior to switchpoint
    out[:s] = e
    # Late mean following switchpoint
    out[s:] = l
    return out
コード例 #5
0
ファイル: coal_distasters.py プロジェクト: copr/diplomka
from pymc import DiscreteUniform, Exponential, deterministic, Poisson, Uniform
import numpy as np

disasters_array = np.array([ 4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
                   3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
                   2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0, 0,
                   1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
                   0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
                   3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
                   0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])

switchpoint = DiscreteUniform('switchpoint', lower=0, upper=110,
                              doc='Switchpoint[year]')

early_mean = Exponential('early_mean', beta=1.)
late_mean = Exponential('late_mean', beta=1.)

@deterministic(plot=False)
def rate(s=switchpoint, e=early_mean, l=late_mean):
    out = np.empty(len(disasters_array))
    out[:s] = e
    out[s:] = l
    return out

disasters = Poisson('disasters', mu=rate, value=disasters_array, observed=True)
コード例 #6
0
from pymc import DiscreteUniform, Exponential, deterministic, Poisson, Uniform, Lambda, MCMC, observed, poisson_like
from pymc.distributions import Impute
import numpy as np

# Missing values indicated by None placeholders
disasters_array = np.array([
    4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6, 3, 3, 5, 4, 5, 3, 1, 4, 4,
    1, 5, 5, 3, 4, 2, 5, 2, 2, 3, 4, 2, 1, 3, None, 2, 1, 1, 1, 1, 3, 0, 0, 1,
    0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 2, 1, 0,
    0, 0, 1, 1, 0, 2, 3, 3, 1, None, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4, 0, 0,
    0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1
])

# Switchpoint
s = DiscreteUniform('s', lower=0, upper=110)
# Early mean
e = Exponential('e', beta=1)
# Late mean
l = Exponential('l', beta=1)


@deterministic(plot=False)
def r(s=s, e=e, l=l):
    """Allocate appropriate mean to time series"""
    out = np.empty(len(disasters_array))
    # Early mean prior to switchpoint
    out[:s] = e
    # Late mean following switchpoint
    out[s:] = l
    return out
コード例 #7
0
ファイル: lg_1break.py プロジェクト: copr/diplomka
# data = load('data/bran_body_weight.txt', 33)
# converted_data = [(float(x[0]), float(x[1]), float(x[2])) for x in data]
# xs = np.array([x[1] for x in converted_data])
# ys = np.array([x[2] for x in converted_data])

xs = np.array([1,2,3,4,5,6,7,8,9,10])
ys = np.array([1,1,1,1,1,2,3,4,5,6]) + np.random.normal(0, 2, len(xs))


b00 = Uniform("b00", -50, 50)
b01 = Uniform("b01", -50, 50)

b10 = Uniform("b10", -50, 50)
b11 = Uniform("b11", -50, 50)

switchpoint = DiscreteUniform("switch", min(xs), max(xs))

err = Uniform("err", 50, 500)

x_weight = Normal("weight", 0, 1, value=xs, observed=True)

@deterministic(plot=False)
def pred(b00=b00, b01=b01, b10=b10, b11=b11, s=switchpoint):
    out = np.empty(len(xs))
    breakk = s
    out[:breakk] = b00 + b01*xs[:breakk]
    out[breakk:] = b10 + b11*xs[breakk:]
    return out

y = Normal("y", mu=pred, tau=err, value=ys, observed=True)
コード例 #8
0
def MakeModel(sig, annots, max_hermit_level=7):
    '''Create P wave delineation model for MCMC.
    annots: Must contain Ponset, P and Poffset
    '''
    # Load ECG segment array
    sig = np.array(sig, dtype=np.float32)

    # Length of the ECG segment
    len_sig = sig.size

    common_length = len_sig

    hermit_coefs = list()
    # Dc baseline
    # coef = pymc.Normal('hc0', mu = 0, tau = 0.003)
    coef = DiscreteUniform('hc0', lower=-0.05, upper=0.05, doc='hc0')
    hermit_coefs.append(coef)
    # level 1
    ind = 1
    hermit_coefs.append(pymc.Normal('hc%d' % ind, mu=0, tau=1))
    # level 2
    ind = 2
    hermit_coefs.append(pymc.Normal('hc%d' % ind, mu=0, tau=900))
    for ind in xrange(3, HermitFunction_max_level):
        coef = pymc.Normal('hc%d' % ind, mu=0, tau=1)
        hermit_coefs.append(coef)

    # Gaussian
    pos_ponset = None
    pos_p = None
    pos_poffset = None
    for pos, label in annots:
        if label == 'Ponset':
            pos_ponset = pos
        elif label == 'P':
            pos_p = pos
        elif label == 'Poffset':
            pos_poffset = pos
    # Validation check
    if None in [pos_p, pos_ponset, pos_poffset]:
        raise StandardError(
            'P wave annotations is not complete in this segment!')

    # val1 = sig[pos_p]
    # val2 = sig[pos_ponset]
    # val3 = sig[pos_poffset]
    # print sig[pos_p] - sig[pos_ponset]
    # print sig[pos_p] - sig[pos_poffset]
    # pdb.set_trace()
    gaussian_amplitude = max(abs(sig[pos_p] - sig[pos_ponset]),
                             abs(sig[pos_p] - sig[pos_poffset]))
    gaussian_amplitude = pymc.Normal('g_amp', mu=gaussian_amplitude, tau=100)
    gaussian_sigma = (abs(pos_p - pos_ponset) + abs(pos_p - pos_poffset)) / 2.0
    gaussian_sigma = pymc.Normal('g_sigma', mu=gaussian_sigma, tau=0.1)
    gaussian_dc = pymc.Uniform('dc',
                               lower=np.min(np.array(sig)),
                               upper=np.max(np.array(sig)),
                               doc='dc')

    @deterministic(plot=False)
    def wave_diff(hermit_coefs=hermit_coefs,
                  amp=gaussian_amplitude,
                  sigma=gaussian_sigma,
                  dc=gaussian_dc):
        ''' Concatenate wave.'''

        out = sig[:common_length]
        fitting_curve = np.zeros(common_length, )
        for level, coef in zip(xrange(0, max_hermit_level), hermit_coefs):
            fitting_curve += HermitFunction(level, int(common_length)) * coef

        gaussian_curve = GetGaussianPwave(common_length * 2, amp, sigma / 3,
                                          dc)

        gaussian_segment = gaussian_curve[common_length -
                                          pos_p:2 * common_length - pos_p]
        fitting_curve += gaussian_segment

        return out - fitting_curve

    diff_sig = pymc.Normal('diff_sig',
                           mu=wave_diff,
                           tau=17,
                           value=[
                               0,
                           ] * common_length,
                           observed=True)

    return locals()
コード例 #9
0
from pymc import DiscreteUniform

appointment_time = DiscreteUniform('appt time',
                                   lower=8,
                                   upper=17,
                                   doc='Appointment time in 24 hour clock')

print appointment_time.value
コード例 #10
0
ファイル: markovModel.py プロジェクト: bdyer8/Markov
#     np.array([ 4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
#                   3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
#                   2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0, 0,
#                   1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
#                   0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
#                   3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
#                   0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])

dataSet = np.ones(100)
#dataSet[20:60]=2
dataSet[60:100] = 3
dataSet = dataSet + normal(0, .1, dataSet.size)

#scatter(range(dataSet.size),dataSet,c=(.98,.4,.4))

changepoint = DiscreteUniform('changepoint', lower=0, upper=100)
early_mean = Normal('early_mean', 2, 1)
late_mean = Normal('late_mean', 2, 1)


@deterministic(plot=False)
def rate(s=changepoint, e=early_mean, l=late_mean):
    ''' Concatenate Normal means '''
    out = np.empty(len(dataSet))
    out[:s] = e
    out[s:] = l
    return out


datapoints = Normal('datapoints',
                    mu=rate,
コード例 #11
0
Model:

    Claim: advertising on the tube increased the rate of conversions.

    (Conversions_t | start_t, before_rate, after_rate)  ~ Poisson(rate_t)

    start_t ~ DiscreteUniform(first_period, last_period)  //can be any period with equal probability

    rate_t = ( before_rate ... //start_t // .. after_rate)

'''

conversions_data = np.array(
    [1, 2, 1, 2, 3, 4, 1, 2, 3, 5, 7, 3, 8, 7, 4, 5, 7])
start_t = DiscreteUniform("start_t", lower=0, upper=len(conversions_data))

before_mean = Exponential('before_mean', beta=1.)
after_mean = Exponential('after_mean', beta=1.)


@deterministic(plot=False)
def rate(start_period=start_t,
         before_period_mean=before_mean,
         after_period_mean=after_mean):
    output = np.empty(len(conversions_data))
    output[:start_period] = before_period_mean
    output[start_period:] = after_period_mean
    return output