Пример #1
0
    def sample(self, model, evidence):
        z = evidence['z']
        T = evidence['T']
        g = evidence['g']
        h = evidence['h']
        transition_var_g = evidence['transition_var_g']
        shot_id = evidence['shot_id']

        observation_var_g = model.known_params['observation_var_g']
        observation_var_h = model.known_params['observation_var_h']
        prior_mu_g = model.hyper_params['g']['mu']
        prior_cov_g = model.hyper_params['g']['cov']
        N = len(z)
        n = len(g)

        # Make g, h, and z vector valued to avoid ambiguity
        g = g.copy().reshape((n, 1))
        h = h.copy().reshape((n, 1))

        z_g = ma.asarray(nan + zeros((n, 1)))
        obs_cov = ma.asarray(inf + zeros((n, 1, 1)))
        for i in xrange(n):
            z_i = z[shot_id == i]
            T_i = T[shot_id == i]
            if 1 in T_i and 2 in T_i:
                # Sample mean and variance for multiple observations
                n_obs_g, n_obs_h = sum(T_i == 1), sum(T_i == 2)
                obs_cov_g, obs_cov_h = observation_var_g / n_obs_g, observation_var_h / n_obs_h
                z_g[i] = (mean(z_i[T_i == 1]) / obs_cov_g +
                          mean(z_i[T_i == 2] - h[i]) / obs_cov_h) / (
                              1 / obs_cov_g + 1 / obs_cov_h)
                obs_cov[i] = 1 / (1 / obs_cov_g + 1 / obs_cov_h)
            elif 1 in T_i:
                n_obs_g = sum(T_i == 1)
                z_g[i] = mean(z_i[T_i == 1])
                obs_cov[i] = observation_var_g / n_obs_g
            elif 2 in T_i:
                n_obs_h = sum(T_i == 2)
                z_g[i] = mean(z_i[T_i == 2] - h[i])
                obs_cov[i] = observation_var_h / n_obs_h

        z_g[isnan(z_g)] = ma.masked
        obs_cov[isinf(obs_cov)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean = array([
            prior_mu_g[0],
        ])
        kalman.initial_state_covariance = array([
            prior_cov_g[0],
        ])
        kalman.transition_matrices = eye(1)
        kalman.transition_covariance = array([
            transition_var_g,
        ])
        kalman.observation_matrices = eye(1)
        kalman.observation_covariance = obs_cov
        sampled_g = forward_filter_backward_sample(kalman, z_g, prior_mu_g,
                                                   prior_cov_g)
        return sampled_g.reshape((n, ))
Пример #2
0
    def sample(self, model, evidence):
        z = evidence['z']
        T, g, h, sigma_g = [evidence[var] for var in ['T', 'g', 'h', 'sigma_g']]
        sigma_z_g = model.known_params['sigma_z_g']
        sigma_z_h = model.known_params['sigma_z_h']
        prior_mu_g, prior_cov_g = [model.hyper_params[var] for var in ['prior_mu_g', 'prior_cov_g']]
        n = len(g)

        # Must be a more concise way to deal with scalar vs vector
        g = g.copy().reshape((n,1))
        h = h.copy().reshape((n,1))
        z_g = ma.asarray(z.copy().reshape((n,1)))
        obs_cov = sigma_z_g**2*ones((n,1,1))
        if sum(T == 0) > 0:
            z_g[T == 0] = nan
        if sum(T == 2) > 0:
            z_g[T == 2] -= h[T == 2]
            obs_cov[T == 2] = sigma_z_h**2
        z_g[isnan(z_g)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean = array([prior_mu_g[0],])
        kalman.initial_state_covariance = array([prior_cov_g[0,0],])
        kalman.transition_matrices = eye(1)
        kalman.transition_covariance = array([sigma_g**2,])
        kalman.observation_matrices = eye(1)
        kalman.observation_covariance = obs_cov
        sampled_g = forward_filter_backward_sample(kalman, z_g)

        return sampled_g.reshape((n,))
def define_model(data):
    # Builds model object
    n = len(data)
    z = data.get('z')
    variable_names = ['surfaces', 'T', 'p_type', 'sigma_g', 'sigma_h']
    known_params = {
        'sigma_z_g': sigma_z_g,
        'sigma_z_h': sigma_z_h,
        'mu_h': mu_h,
        'phi': phi
    }
    hyper_params = {
        'alpha_type': array((1., 1., 1.)),
        'prior_mu_g': -25. + zeros(n),
        'prior_cov_g': 100. * eye(n),
        'prior_mu_h': 30. + zeros(n),
        'prior_cov_h': 100. * eye(n),
        'a_g': 11,
        'b_g': .1,
        'a_h': 11,
        'b_h': 40
    }
    initials = {
        'surfaces': [-25, 30] * ones((n, 2)),
        'sigma_g': .1,
        'sigma_h': 1,
        'T': array([(0 if abs(z[i] + 25) > 1 else 1) for i in xrange(n)])
    }
    #initials = {'sigma_g': sigma_g,
    #            'sigma_h': sigma_h,
    #            'T': T[:n],
    #            'g': g[:n],
    #            'h': h[:n]}
    priors = {
        'p_type': dirichlet(hyper_params['alpha_type']),
        'sigma_g': stats.invgamma(hyper_params['a_g'],
                                  scale=hyper_params['b_g']),
        'sigma_h': stats.invgamma(hyper_params['a_h'],
                                  scale=hyper_params['b_h']),
        'T': iid_dist(categorical(hyper_params['alpha_type']), n)
    }
    FCP_samplers = {
        'p_type': p_type_step(),
        'surfaces': surfaces_step(),
        'sigma_g': sigma_ground_step(),
        'sigma_h': sigma_height_step(),
        'T': type_step()
    }

    model = Model()
    model.set_variable_names(variable_names)
    model.set_known_params(known_params)
    model.set_hyper_params(hyper_params)
    model.set_priors(priors)
    model.set_initials(initials)
    model.set_FCP_samplers(FCP_samplers)
    model.set_data(data)

    return model
Пример #4
0
def setup():
    A = pb.matrix([[0.8, -0.4], [1, 0]])
    B = pb.matrix([[1, 0], [0, 1]])
    C = pb.matrix([[1, 0], [0, 1], [1, 1]])
    Q = 2.3 * pb.matrix(pb.eye(2))
    R = 0.2 * pb.matrix(pb.eye(3))
    x0 = pb.matrix([[1], [1]])
    return LDS.LDS(A, B, C, Q, R, x0)
Пример #5
0
def setup():
    A = pb.matrix([[0.8, -0.4], [1, 0]])
    B = pb.matrix([[1, 0], [0, 1]])
    C = pb.matrix([[1, 0], [0, 1], [1, 1]])
    Q = 2.3 * pb.matrix(pb.eye(2))
    R = 0.2 * pb.matrix(pb.eye(3))
    x0 = pb.matrix([[1], [1]])
    return LDS.LDS(A, B, C, Q, R, x0)
Пример #6
0
    def sample(self, model, evidence):
        z = evidence['z']
        T = evidence['T']
        g = evidence['g']
        h = evidence['h']
        transition_var_g = evidence['transition_var_g']
        shot_id = evidence['shot_id']

        observation_var_g = model.known_params['observation_var_g']
        observation_var_h = model.known_params['observation_var_h']
        prior_mu_g = model.hyper_params['g']['mu'] 
        prior_cov_g = model.hyper_params['g']['cov'] 
        N = len(z)
        n = len(g)

        ## Make g, h, and z vector valued to avoid ambiguity
        #g = g.copy().reshape((n, 1))
        #h = h.copy().reshape((n, 1))
        #
        pdb.set_trace()
        z_g = ma.asarray(nan + zeros(n))
        obs_cov = ma.asarray(inf + zeros(n))
        if 1 in T:
            z_g[T==1] = z[T==1]
            obs_cov[T==1] = observation_var_g
        if 2 in T:
            z_g[T==2] = z[T==2] - h[T==2]
            obs_cov[T==2] = observation_var_h
        #for i in xrange(n):
        #    z_i = z[shot_id == i]
        #    T_i = T[shot_id == i]
        #    if 1 in T_i and 2 in T_i:
        #        # Sample mean and variance for multiple observations
        #        n_obs_g, n_obs_h = sum(T_i == 1), sum(T_i == 2)
        #        obs_cov_g, obs_cov_h = observation_var_g/n_obs_g, observation_var_h/n_obs_h
        #        z_g[i] = (mean(z_i[T_i == 1])/obs_cov_g + mean(z_i[T_i == 2] - h[i])/obs_cov_h)/(1/obs_cov_g + 1/obs_cov_h)
        #        obs_cov[i] = 1/(1/obs_cov_g + 1/obs_cov_h)
        #    elif 1 in T_i:
        #        n_obs_g = sum(T_i == 1) 
        #        z_g[i] = mean(z_i[T_i == 1])
        #        obs_cov[i] = observation_var_g/n_obs_g
        #    elif 2 in T_i:
        #        n_obs_h = sum(T_i == 2) 
        #        z_g[i] = mean(z_i[T_i == 2] - h[i])
        #        obs_cov[i] = observation_var_h/n_obs_h

        z_g[isnan(z_g)] = ma.masked
        obs_cov[isinf(obs_cov)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean = array([prior_mu_g[0],])
        kalman.initial_state_covariance = array([prior_cov_g[0],])
        kalman.transition_matrices = eye(1)
        kalman.transition_covariance = array([transition_var_g,])
        kalman.observation_matrices = eye(1)
        kalman.observation_covariance = obs_cov
        sampled_g = forward_filter_backward_sample(kalman, z_g, prior_mu_g, prior_cov_g)
        return sampled_g.reshape((n,))
def smooth(y,smoothBeta=smoothBeta):
    m = len(y)
    
    p = pl.diff(pl.eye(m),3).transpose()
    A = pl.eye(m)+smoothBeta*pl.dot(p.transpose(),p)
    
    smoothY = pl.solve(A, y)
    
    return smoothY
Пример #8
0
def setup_nonstationary():
    T = 100
    A_ns = [pb.matrix([[0.8, -0.4], [1, 0]]) for t in range(T)]
    B = pb.matrix([[1, 0], [0, 1]])
    C = pb.matrix([[1, 0], [0, 1], [1, 1]])
    Q = 2.3 * pb.matrix(pb.eye(2))
    R = 0.2 * pb.matrix(pb.eye(3))
    x0 = pb.matrix([[1], [1]])
    return T, LDS.LDS(A_ns, B, C, Q, R, x0)
Пример #9
0
def setup_nonstationary():
    T = 100
    A_ns = [pb.matrix([[0.8, -0.4], [1, 0]]) for t in range(T)]
    B = pb.matrix([[1, 0], [0, 1]])
    C = pb.matrix([[1, 0], [0, 1], [1, 1]])
    Q = 2.3 * pb.matrix(pb.eye(2))
    R = 0.2 * pb.matrix(pb.eye(3))
    x0 = pb.matrix([[1], [1]])
    return T, LDS.LDS(A_ns, B, C, Q, R, x0)
Пример #10
0
def define_model(data):
    # Builds model object
    m = 3
    n_points = len(data)
    n_shots = len(set(data['shot_id']))
    variable_names = ['g', 'h', 'T', 'p_type', 'sigma_g', 'sigma_h']
    known_params = {'sigma_z_g': sigma_z_g,
                    'sigma_z_h': sigma_z_h,
                    'mu_h': mu_h,
                    'phi': phi}
    hyper_params = {'alpha_type': array((0, 1., 1.)),
                    'prior_mu_g': -25.+zeros(n_shots),
                    'prior_cov_g': 100.*eye(n_shots),
                    'prior_mu_h': 30.+zeros(n_shots),
                    'prior_cov_h': 100.*eye(n_shots),
                    'a_g': 6,
                    'b_g': 1,
                    'a_h': 6,
                    'b_h': 1}
    initials = {}
    #initials = {'sigma_g': sigma_g,
    #            'sigma_h': sigma_h,
    #            'T': T[:n_shots],
    #            'g': g[:n_shots],
    #            'h': h[:n_shots]}
    priors = {'p_type': dirichlet(hyper_params['alpha_type']),
              'sigma_g': stats.invgamma(hyper_params['a_g'], scale=hyper_params['b_g']),
              'sigma_h': stats.invgamma(hyper_params['a_h'], scale=hyper_params['b_h']),
              'g': mvnorm(hyper_params['prior_mu_g'], hyper_params['prior_cov_g']),
              'h': mvnorm(hyper_params['prior_mu_h'], hyper_params['prior_cov_h']),
              'T': iid_dist(categorical(hyper_params['alpha_type']/sum(hyper_params['alpha_type'])), n_points)}
    FCP_samplers = {'p_type': p_type_step(),
                    'g': ground_height_step(),
                    'h': canopy_height_step(),
                    'sigma_g': sigma_ground_step(),
                    'sigma_h': sigma_height_step(),
                    'T': type_step()}

    model = Model()
    model.set_variable_names(variable_names)
    model.set_known_params(known_params)
    model.set_hyper_params(hyper_params)
    model.set_priors(priors)
    model.set_initials(initials)
    model.set_FCP_samplers(FCP_samplers)
    model.set_data(data)

    return model
def all_related_equally(n, sigma):
    C = pl.eye(n)
    for ii in range(n-2):
        for jj in range(n-2):
            C[ii,jj] += 1.
    C *= sigma**2.
    return C
Пример #12
0
def mpow(A, n):
    """ 
    Returns the n-th power of A.
    If n is no integer, the next smaller integer will be assumed.

    ==========
    Parameter:
    ==========
    A : *array*
        the square matrix from which the n-th power should be returned
    n : *integer*
        the power

    ========
    Returns:
    ========
    B : *array*
        B = A^n

    """
    return reduce(dot, [
        eye(A.shape[0]),
    ] * 2 + [
        A,
    ] * int(n))
Пример #13
0
    def __init__(self, n, x_guess, P_guess, Q, R, A, H, B):
        """
        x state (n)
        z mesurement (m)
        u control (l)

        A (n*n): x_k = A*x_k-1
        B (n*l): dx = B*u
        H (m*n): z_k = H*x_k
        Q (n*n) process noise covariance m.rand(3,4)
        R (m*m) measurement noise covariance
        P (n*n) state noise covariance
        K Kalman gain
        """
        super(KalmanFilter, self).__init__()

        # allocate space for arrays
        self.x = np.zeros(n)      # a posteri estimate of x
        self.x_bel = np.zeros(n)  # a priori estimate of x
        self.P = np.zeros(n)      # a posteri error estimate
        self.P_bel = np.zeros(n)  # a priori error estimate
        self.K = np.zeros(n)      # gain or blending factor

        self.Q = Q
        self.R = R
        self.A = A
        self.H = H
        self.B = B
        self.I = pylab.eye(1)

        self.x[0] = x_guess[0]
        self.P[0] = P_guess
Пример #14
0
def define_model(data):
    # Builds model object
    n = len(data)
    variable_names = ['g', 'sigma_g']
    known_params = {'sigma_z_g': sigma_z_g,
                    'T': ones(n)}
    hyper_params = {'prior_mu_g': 0+zeros(n),
                    'prior_cov_g': 100*eye(n),
                    'a_g': 0.,
                    'b_g': 0.}
    priors = {'sigma_g': stats.invgamma(hyper_params['a_g'], scale=hyper_params['b_g']),
              'g': mvnorm(hyper_params['prior_mu_g'], hyper_params['prior_cov_g'])}
    initials = {'g': g[:n],
                'sigma_g': sigma_g}
    FCP_samplers = {'g': ground_height_step(),
                    'sigma_g': sigma_ground_step()}

    model = Model()
    model.set_variable_names(variable_names)
    model.set_known_params(known_params)
    model.set_hyper_params(hyper_params)
    model.set_priors(priors)
    model.set_initials(initials)
    model.set_FCP_samplers(FCP_samplers)
    model.set_data(data)

    return model
def testCollisionsE8(n,d=8):
    M = pylab.eye(8,8)

    S = [0.0]*n
    C = [0]*n
    #generate distances and buckets
    for i in range(n):
        p = [random() for j in xrange(d)]
        q = [p[j] + (gauss(0,1)/(d**.5)) for j in xrange(d)]
        S[i]=distance(p,q,d)
        C[i]= int(decodeE8(dot(p,M)) == decodeE8(dot(q,M)))
    
    ranges = pylab.histogram(S,30)[1]   
    bucketsCol = [0]*len(ranges)
    bucketsDis = [0]*len(ranges)

    #fill buckets with counts 
    for i in xrange(n):
        k = len(ranges)-1
        while S[i] < ranges[k]:k=k-1
        if C[i]:bucketsCol[k]=bucketsCol[k]+1
        else:bucketsDis[k] = bucketsDis[k]+1
    print bucketsDis
    print ranges
    pylab.plot(ranges,[float(bucketsCol[i])/(float(bucketsDis[i]+.000000000001))  for i in range(len(ranges))],color='purple') 
Пример #16
0
 def __init__(self, poscar=None, xyz=None):
     """Initialize the data members of this class"""
     # List of the chemical symbols of the atoms
     self.atom_symbols = []
     # Lattice constant, in Ångströms
     self.lattice_constant = 1.
     # 3x3 matrix containing the basis vectors of the supercell
     # in row major format
     self.basis_vectors = m.eye(3)
     # Are the ions allowed to move?
     self.selective_dynamics = False
     # Flags for each atom describing in which cartesian coordinate
     # direction the atom is allowed to move. It is thus a natomsx3
     # size list
     self.selective_flags = []
     # Are the atomic coordinates cartesian or in direct coordinates
     # If direct, cartesian coordinates can be calculated by
     # multiplying each coordinate with the basis vector matrix
     # times the lattice constant
     self.cartesian = True
     # Coordinates of the atoms
     self.atoms = m.zeros((0, 3))
     if (poscar != None):
         self.read_poscar(poscar)
     elif (xyz != None):
         self.read_xyz(xyz)
Пример #17
0
    def sample(self, model, evidence):
        z, T, g, h, sigma_h, phi  = [evidence[var] for var in ['z', 'T', 'g', 'h', 'sigma_h', 'phi']]
        sigma_z_h = model.known_params['sigma_z_h']
        mu_h = model.known_params['mu_h']
        prior_mu_h = model.hyper_params['prior_mu_h']
        prior_cov_h = model.hyper_params['prior_cov_h']
        n = len(h)

        g = g.copy().reshape((n,1))
        h = h.copy().reshape((n,1))
        z_h = ma.asarray(z.copy().reshape((n,1)))
        if sum(T == 0) > 0:
            z_h[T == 0] = nan
        if sum(T == 1) > 0:
            z_h[T == 1] = nan
        if sum(T == 2) > 0:
            z_h[T == 2] -= g[T == 2]
        z_h[isnan(z_h)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean = array([prior_mu_h[0],])
        kalman.initial_state_covariance = array([prior_cov_h[0,0],])
        kalman.transition_matrices = array([phi,])
        kalman.transition_covariance = array([sigma_h**2,])
        kalman.transition_offsets = mu_h*(1-phi)*ones((n, 1))
        kalman.observation_matrices = eye(1)
        kalman.observation_covariance = array([sigma_z_h**2,])
        sampled_h = forward_filter_backward_sample(kalman, z_h)

        return sampled_h.reshape((n,))
Пример #18
0
	def _filter(self,Y):

		## initialise
		xf=self.x0
		Pf=self.P0
		# filter quantities
		xfStore =[]
		PfStore=[]

		#calculate the weights
		Wm_i,Wc_i=self.sigma_vectors_weights()

		for y in Y:
			#calculate the sigma points matrix, each column is a sigma vector
			Xi_f_=self.sigma_vectors(xf,Pf)
			#propogate sigma verctors through non-linearity
			Xi_f=self.state_equation(Xi_f_)
			#pointwise multiply by weights and sum along y-axis
			xf_=pb.sum(Wm_i*Xi_f,1)
			xf_=xf_.reshape(self.nx,1)
			#purturbation
			Xi_purturbation=Xi_f-xf_
			weighted_Xi_purturbation=Wc_i*Xi_purturbation
			Pf_=pb.dot(Xi_purturbation,weighted_Xi_purturbation.T)+self.Sigma_e			
			#measurement update equation
			Pyy=dots(self.C,Pf_,self.C.T)+self.Sigma_varepsilon 
			Pxy=pb.dot(Pf_,self.C.T)
			K=pb.dot(Pxy,pb.inv(Pyy))
			yf_=pb.dot(self.C,xf_)
			xf=xf_+pb.dot(K,(y-yf_))
			Pf=pb.dot((pb.eye(self.nx)-pb.dot(K,self.C)),Pf_)
			xfStore.append(xf)
			PfStore.append(Pf)

		return xfStore,PfStore
Пример #19
0
    def mcmc_fit(stoch_names):
        print '\nfitting', ' '.join(stoch_names)
        mcmc = mc.MCMC([dm.vars[key] for key in stoch_names] + [dm.vars['observed_counts'], dm.vars['rate_potential'], dm.vars['priors']])
        mcmc.use_step_method(mc.Metropolis, dm.vars['log_dispersion'],
                             proposal_sd=dm.vars['dispersion_step_sd'])
        # TODO: make a wrapper function for handling this adaptive metropolis setup
        stoch_list = [dm.vars['study_coeffs'], dm.vars['region_coeffs'], dm.vars['age_coeffs_mesh']]
        d1 = len(dm.vars['study_coeffs'].value)
        d2 = len(dm.vars['region_coeffs_step_cov'])
        d3 = len(dm.vars['age_coeffs_mesh_step_cov'])
        C = pl.eye(d1+d2+d3)
        C[d1:(d1+d2), d1:(d1+d2)] = dm.vars['region_coeffs_step_cov']
        C[(d1+d2):(d1+d2+d3), (d1+d2):(d1+d2+d3)] = dm.vars['age_coeffs_mesh_step_cov']
        C *= .01
        mcmc.use_step_method(mc.AdaptiveMetropolis, stoch_list, cov=C)

        # more step methods
        mcmc.use_step_method(mc.AdaptiveMetropolis, dm.vars['study_coeffs'])
        mcmc.use_step_method(mc.AdaptiveMetropolis, dm.vars['region_coeffs'], cov=dm.vars['region_coeffs_step_cov'])
        mcmc.use_step_method(mc.AdaptiveMetropolis, dm.vars['age_coeffs_mesh'], cov=dm.vars['age_coeffs_mesh_step_cov'])

        try:
            mcmc.sample(iter=10000, burn=5000, thin=5, verbose=verbose)
        except KeyboardInterrupt:
            debug('User halted optimization routine before optimal value found')
        sys.stdout.flush()

        # reset stoch values to sample mean
        for key in stoch_names:
            mean = dm.vars[key].stats()['mean']
            if isinstance(dm.vars[key], mc.Stochastic):
                dm.vars[key].value = mean
            print key, mean.round(2)
Пример #20
0
def animation_to_SO3(skeleton, animation):
    # Get rotation matrices for all joints and all frames
    bone_names = ['root'] + list(skeleton.bones.keys())

    frames = []
    for frame in animation.get_frames():
        frames.append(convert_to_SO3(skeleton, frame, True))

    # Convert into numpy arrays
    channels = pl.zeros((len(bone_names), len(frames), 3, 3))

    i = 0
    for key in animation.channel_order:
        if not key in bone_names:
            continue
        i += 1
        for j, frame in enumerate(frames):
            channels[i, j, :, :] = frame[key][:3, :3]

    for i in range(channels.shape[0]):
        for j in range(channels.shape[1]):
            if pl.array_equal(channels[i, j, :, :], pl.zeros((3, 3))):
                channels[i, j, :, :] = pl.eye(3)

    return channels
Пример #21
0
def testCollisionsE8(n, d=8):
    M = pylab.eye(8, 8)

    S = [0.0] * n
    C = [0] * n
    #generate distances and buckets
    for i in range(n):
        p = [random() for j in xrange(d)]
        q = [p[j] + (gauss(0, 1) / (d**.5)) for j in xrange(d)]
        S[i] = distance(p, q, d)
        C[i] = int(decodeE8(dot(p, M)) == decodeE8(dot(q, M)))

    ranges = pylab.histogram(S, 30)[1]
    bucketsCol = [0] * len(ranges)
    bucketsDis = [0] * len(ranges)

    #fill buckets with counts
    for i in xrange(n):
        k = len(ranges) - 1
        while S[i] < ranges[k]:
            k = k - 1
        if C[i]: bucketsCol[k] = bucketsCol[k] + 1
        else: bucketsDis[k] = bucketsDis[k] + 1
    print bucketsDis
    print ranges
    pylab.plot(ranges, [
        float(bucketsCol[i]) / (float(bucketsDis[i] + .000000000001))
        for i in range(len(ranges))
    ],
               color='purple')
Пример #22
0
def define_model(data):
    # Builds model object
    n = len(data)
    variable_names = ['g', 'sigma_g']
    known_params = {'sigma_z_g': sigma_z_g, 'T': ones(n)}
    hyper_params = {
        'prior_mu_g': 0 + zeros(n),
        'prior_cov_g': 100 * eye(n),
        'a_g': 0.,
        'b_g': 0.
    }
    priors = {
        'sigma_g': stats.invgamma(hyper_params['a_g'],
                                  scale=hyper_params['b_g']),
        'g': mvnorm(hyper_params['prior_mu_g'], hyper_params['prior_cov_g'])
    }
    initials = {'g': g[:n], 'sigma_g': sigma_g}
    FCP_samplers = {'g': ground_height_step(), 'sigma_g': sigma_ground_step()}

    model = Model()
    model.set_variable_names(variable_names)
    model.set_known_params(known_params)
    model.set_hyper_params(hyper_params)
    model.set_priors(priors)
    model.set_initials(initials)
    model.set_FCP_samplers(FCP_samplers)
    model.set_data(data)

    return model
Пример #23
0
 def _muaConstraints( self ):
     '''
     This function ...
 
     Aguments
     --------
     
     Keyword arguments
     -----------------
     '''
     
     npop = self.npop
     ub = self.ub
 
     # Create linear constraints
     A = pl.eye(3*npop)
     b = pl.zeros(3*npop)
     for i in xrange(npop-1):
         # setting extra constraints on z0
         A[i, i+1] = -1
         # setting constraints on pop width
         A[i + npop, i] = 1
         A[i + npop, i + 1] = -1
         A[i + npop, i + npop + 1] = 1
         # no additional constraints on slope
     # Treat the last pop width separately
     A[2*npop-1, npop - 2] = 1
     A[2*npop-1, npop - 1] = -1
     # b is set for the rows where no constraints are present
     b[npop-1] = ub[npop-1]
     # Try this with and without the maxslopewidth        
     b[2*npop:] = ub[2*npop:]
     
     return A, b
Пример #24
0
def define_model(data):
    # Builds model object
    n = len(data)
    variable_names = ['g', 'sigma_g', 'p_type', 'T']
    known_params = {'sigma_z_g': sigma_z_g}
    hyper_params = {'prior_mu_g': 0*ones(n),
                    'prior_cov_g': 100*eye(n),
                    'alpha_type': (1., 1.),
                    'a_g': 3.,
                    'b_g': 1.}
    priors = {'sigma_g': stats.invgamma(hyper_params['a_g'], scale=hyper_params['b_g']),
              'p_type': dirichlet(hyper_params['alpha_type']),
              'T': iid_dist(categorical((1., 1.)), n),
              'g': mvnorm(hyper_params['prior_mu_g'], hyper_params['prior_cov_g'])}
    #initials = {'g': g[:n],
    #            'sigma_g': sigma_g}
    FCP_samplers = {'g': ground_height_step(),
                    'p_type': p_type_step(),
                    'T': type_step(),
                    'sigma_g': sigma_ground_step()}

    model = Model()
    model.set_variable_names(variable_names)
    model.set_known_params(known_params)
    model.set_hyper_params(hyper_params)
    model.set_priors(priors)
    #model.set_initials(initials)
    model.set_FCP_samplers(FCP_samplers)
    model.set_data(data)

    return model
def define_model(data):
    # Builds model object
    n = len(data)
    z = data.get('z')
    variable_names = ['surfaces', 'T', 'p_type', 'sigma_g', 'sigma_h']
    known_params = {'sigma_z_g': sigma_z_g,
                    'sigma_z_h': sigma_z_h,
                    'mu_h': mu_h,
                    'phi': phi}
    hyper_params = {'alpha_type': array((1., 1., 1.)),
                    'prior_mu_g': -25.+zeros(n),
                    'prior_cov_g': 100.*eye(n),
                    'prior_mu_h': 30.+zeros(n),
                    'prior_cov_h': 100.*eye(n),
                    'a_g': 11,
                    'b_g': .1,
                    'a_h': 11,
                    'b_h': 40}
    initials = {'surfaces': [-25, 30]*ones((n,2)),
                'sigma_g': .1,
                'sigma_h': 1,
                'T': array([(0 if abs(z[i]+25)>1 else 1) for i in xrange(n)])}
    #initials = {'sigma_g': sigma_g,
    #            'sigma_h': sigma_h,
    #            'T': T[:n],
    #            'g': g[:n],
    #            'h': h[:n]}
    priors = {'p_type': dirichlet(hyper_params['alpha_type']),
              'sigma_g': stats.invgamma(hyper_params['a_g'], scale=hyper_params['b_g']),
              'sigma_h': stats.invgamma(hyper_params['a_h'], scale=hyper_params['b_h']),
              'T': iid_dist(categorical(hyper_params['alpha_type']), n)}
    FCP_samplers = {'p_type': p_type_step(),
                    'surfaces': surfaces_step(),
                    'sigma_g': sigma_ground_step(),
                    'sigma_h': sigma_height_step(),
                    'T': type_step()}

    model = Model()
    model.set_variable_names(variable_names)
    model.set_known_params(known_params)
    model.set_hyper_params(hyper_params)
    model.set_priors(priors)
    model.set_initials(initials)
    model.set_FCP_samplers(FCP_samplers)
    model.set_data(data)

    return model
Пример #26
0
def randcov(n):
    """
    
    Generates a random, but valid, covariance matrix (size nxn)

    """
    L = pylab.randn(n, n)
    D = pylab.eye(n)
    return L.T.dot(D).dot(L)
Пример #27
0
def project_perp(A):
    """
    Creates a projection matrix onto the space perpendicular to the
    rowspace of A.
    """
    A = pl.matrix(A)
    I = pl.matrix(pl.eye(A.shape[1]))
    P = project(A)
    return  I - P
Пример #28
0
def project_perp(A):
    """
    Creates a projection matrix onto the space perpendicular to the
    rowspace of A.
    """
    A = np.matrix(A)
    I = np.matrix(pl.eye(A.shape[1]))
    P = project(A)
    return I - P
Пример #29
0
def randcov (n):
    """
    
    Generates a random, but valid, covariance matrix (size nxn)

    """
    L = pylab.randn (n, n)
    D = pylab.eye (n)
    return L.T.dot (D).dot (L)
Пример #30
0
    def sample(self, model, evidence):
        z = evidence['z']
        g = evidence['g']
        h = evidence['h']
        T = evidence['T']
        phi = evidence['phi']
        transition_var_h = evidence['transition_var_h']
        shot_id = evidence['shot_id']

        observation_var_h = model.known_params['observation_var_h']
        mu_h = model.known_params['mu_h']
        prior_mu_h = model.hyper_params['h']['mu']
        prior_cov_h = model.hyper_params['h']['cov']
        n = len(h)
        N = len(z)

        # Making g, h, and z vector valued to avoid ambiguity
        g = g.copy().reshape((n, 1))
        h = h.copy().reshape((n, 1))

        z_h = ma.asarray(nan + zeros((n, 1)))
        obs_cov = ma.asarray(inf + zeros((n, 1, 1)))
        for i in xrange(n):
            z_i = z[shot_id == i]
            T_i = T[shot_id == i]
            if 2 in T_i:
                # Sample mean and variance for multiple observations
                n_obs = sum(T_i == 2)
                z_h[i] = mean(z_i[T_i == 2])
                obs_cov[i] = observation_var_h / n_obs

        z_h[isnan(z_h)] = ma.masked
        obs_cov[isinf(obs_cov)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean = array([
            prior_mu_h[0],
        ])
        kalman.initial_state_covariance = array([
            prior_cov_h[0],
        ])
        kalman.transition_matrices = array([
            phi,
        ])
        kalman.transition_covariance = array([
            transition_var_h,
        ])
        kalman.transition_offsets = mu_h * (1 - phi) * ones((n, 1))
        kalman.observation_matrices = eye(1)
        kalman.observation_offsets = g
        kalman.observation_covariance = obs_cov
        sampled_h = forward_filter_backward_sample(kalman, z_h, prior_mu_h,
                                                   prior_cov_h)

        return sampled_h.reshape((n, ))
Пример #31
0
    def sample(self, model, evidence):
        z = evidence['z']
        T, g, h, sigma_g = [
            evidence[var] for var in ['T', 'g', 'h', 'sigma_g']
        ]
        sigma_z_g = model.known_params['sigma_z_g']
        sigma_z_h = model.known_params['sigma_z_h']
        prior_mu_g, prior_cov_g = [
            model.hyper_params[var] for var in ['prior_mu_g', 'prior_cov_g']
        ]
        n = len(g)

        # Must be a more concise way to deal with scalar vs vector
        g = g.copy().reshape((n, 1))
        h = h.copy().reshape((n, 1))
        z_g = ma.asarray(z.copy().reshape((n, 1)))
        obs_cov = sigma_z_g**2 * ones((n, 1, 1))
        if sum(T == 0) > 0:
            z_g[T == 0] = nan
        if sum(T == 2) > 0:
            z_g[T == 2] -= h[T == 2]
            obs_cov[T == 2] = sigma_z_h**2
        z_g[isnan(z_g)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean = array([
            prior_mu_g[0],
        ])
        kalman.initial_state_covariance = array([
            prior_cov_g[0, 0],
        ])
        kalman.transition_matrices = eye(1)
        kalman.transition_covariance = array([
            sigma_g**2,
        ])
        kalman.observation_matrices = eye(1)
        kalman.observation_covariance = obs_cov
        sampled_g = forward_filter_backward_sample(kalman, z_g)

        return sampled_g.reshape((n, ))
Пример #32
0
def scale_to_h(img, target_height, order=1, dtype=dtype('f'), cval=0):
    h, w = img.shape
    scale = target_height * 1.0 / h
    target_width = int(scale * w)
    output = interpolation.affine_transform(1.0 * img,
                                            eye(2) / scale,
                                            order=order,
                                            output_shape=(target_height,
                                                          target_width),
                                            mode='constant',
                                            cval=cval)
    output = array(output, dtype=dtype)
    return output
Пример #33
0
def train_readout(states, targets, reg_fact=0):
    # train readout with linear regression
    # states... numpy array with states[i,j] the state of neuron j in example i
    # targets.. the targets for training/testing. targets[i] is target of example i
    # reg_fact..regularization factor. If set to 0, no regularization is performed
    # returns:
    #    w...weight vector
    if reg_fact == 0:
        w = np.linalg.lstsq(states, targets)[0]
    else:
        w = np.dot(np.dot(pylab.inv(reg_fact * pylab.eye(np.size(states, 1)) + np.dot(states.T, states)), states.T),
                   targets)
    return w
Пример #34
0
    def convert(node):
        if node is 'root':
            transf = [pl.deg2rad(x) for x in frame['root']]
            return pl.dot(Rz(transf[5]), pl.dot(Ry(transf[4]), Rx(transf[3])))

        cbone = skeleton.bones[node]
        #Motion matrix
        M = pl.eye(4)
        try:
            for dof, val in zip(cbone.dof, frame[node]):
                val = pl.deg2rad(val)
                R = pl.eye(4)
                if dof == 'rx':
                    R = Rx(val)
                elif dof == 'ry':
                    R = Ry(val)
                elif dof == 'rz':
                    R = Rz(val)

                M = pl.dot(R, M)
        except:  #We might not have dof data for the current bone
            pass
        return M
Пример #35
0
def lfilter_zi(b,a):
    #compute the zi state from the filter parameters. see [Gust96].

    #Based on:
    # [Gust96] Fredrik Gustafsson, Determining the initial states in forward-backward 
    # filtering, IEEE Transactions on Signal Processing, pp. 988--992, April 1996, 
    # Volume 44, Issue 4

    n=max(len(a),len(b))

    zin = (  pylab.eye(n-1) - pylab.hstack( (-a[1:n,pylab.newaxis],
                                 pylab.vstack((pylab.eye(n-2), pylab.zeros(n-2))))))

    zid=  b[1:n] - a[1:n]*b[0]

    zi_matrix=pylab.linalg.inv(zin)*(pylab.matrix(zid).transpose())
    zi_return=[]

    #convert the result into a regular array (not a matrix)
    for i in range(len(zi_matrix)):
      zi_return.append(float(zi_matrix[i][0]))

    return pylab.array(zi_return)
Пример #36
0
def adjointVectors(ksi):
    ''' Return the adjoint vectors of ksi where ksi is composed of m vector of size > 1
    '''
    from pylab import matrix, eye
    A = matrix(dot(ksi, ksi.T))
    A = A + 1e-1 * eye(A.shape[0])
    try:
        #A = matrix( dot(ksi, ksi.T) ).I
        A = A.I
    except:
        A = zeros(dot(ksi, ksi.T).shape)
        print(
            'Non inversible dot(ksi, ksi.T) building adjoint vectors where ksi are the patterns'
        )
    return dot(A, ksi)
Пример #37
0
def lfilter_zi(b, a):
    #compute the zi state from the filter parameters. see [Gust96].

    #Based on:
    # [Gust96] Fredrik Gustafsson, Determining the initial states in forward-backward
    # filtering, IEEE Transactions on Signal Processing, pp. 988--992, April 1996,
    # Volume 44, Issue 4

    n = max(len(a), len(b))

    zin = (pylab.eye(n - 1) - pylab.hstack(
        (-a[1:n, pylab.newaxis],
         pylab.vstack((pylab.eye(n - 2), pylab.zeros(n - 2))))))

    zid = b[1:n] - a[1:n] * b[0]

    zi_matrix = pylab.linalg.inv(zin) * (pylab.matrix(zid).transpose())
    zi_return = []

    #convert the result into a regular array (not a matrix)
    for i in range(len(zi_matrix)):
        zi_return.append(float(zi_matrix[i][0]))

    return pylab.array(zi_return)
Пример #38
0
 def compute_readout_weights(states, targets, reg_fact=0):
     """
     Train readout with linear regression
     :param states: numpy array with states[i, j], the state of neuron j in example i
     :param targets: numpy array with targets[i], while target i corresponds to example i
     :param reg_fact: regularization factor; 0 results in no regularization
     :return: numpy array with weights[j]
     """
     if reg_fact == 0:
         w = np.linalg.lstsq(states, targets)[0]
     else:
         w = np.dot(
             np.dot(
                 pylab.inv(reg_fact * pylab.eye(np.size(states, 1)) +
                           np.dot(states.T, states)), states.T), targets)
     return w
Пример #39
0
    def calc_k_matrix(self):
        '''Calculate the K-matrix used by to calculate E-matrices'''
        el_len = self.coord_electrode.size
        # expanding electrode grid
        z_js = pl.zeros(el_len+2)
        z_js[1:-1] = self.coord_electrode
        z_js[-1] = self.coord_electrode[-1] + \
            pl.diff(self.coord_electrode).mean()
        
        c_vec = 1./pl.diff(z_js)
        # Define transformation matrices
        c_jm1 = pl.matrix(pl.zeros((el_len+2, el_len+2)))
        c_j0 = pl.matrix(pl.zeros((el_len+2, el_len+2)))
        c_jall = pl.matrix(pl.zeros((el_len+2, el_len+2)))
        c_mat3 = pl.matrix(pl.zeros((el_len+1, el_len+1)))
        
        for i in xrange(el_len+1):
            for j in xrange(el_len+1):
                if i == j:
                    c_jm1[i+1, j+1] = c_vec[i]
                    c_j0[i, j] = c_jm1[i+1, j+1]
                    c_mat3[i, j] = c_vec[i]
        
        c_jm1[-1, -1] = 0
        
        c_jall = c_j0
        c_jall[0, 0] = 1
        c_jall[-1, -1] = 1
        
        c_j0 = 0
        
        tjp1 = pl.matrix(pl.zeros((el_len+2, el_len+2)))
        tjm1 = pl.matrix(pl.zeros((el_len+2, el_len+2)))
        tj0 = pl.matrix(pl.eye(el_len+2))
        tj0[0, 0] = 0
        tj0[-1, -1] = 0

        for i in xrange(1, el_len+2):
            for j in xrange(el_len+2):
                if i == j-1:
                    tjp1[i, j] = 1
                elif i == j+1:
                    tjm1[i, j] = 1
        
        # Defining K-matrix used to calculate e_mat1-3
        return (c_jm1*tjm1 + 2*c_jm1*tj0 + 2*c_jall + c_j0*tjp1)**-1 * 3 * \
            (c_jm1**2 * tj0 - c_jm1**2 * tjm1 + c_j0**2 * tjp1 - c_j0**2 * tj0)
Пример #40
0
    def sample(self, model, evidence):
        z = evidence['z']
        g = evidence['g']
        h = evidence['h']
        T = evidence['T']
        phi  = evidence['phi']
        transition_var_h = evidence['transition_var_h']
        shot_id = evidence['shot_id']

        observation_var_h = model.known_params['observation_var_h']
        mu_h = model.known_params['mu_h']
        prior_mu_h = model.hyper_params['h']['mu']
        prior_cov_h = model.hyper_params['h']['cov']
        n = len(h)
        N = len(z)

        # Making g, h, and z vector valued to avoid ambiguity
        z_h = ma.asarray(nan + zeros(n))
        obs_cov = ma.asarray(inf + zeros(n))
        if 2 in T:
            z_h[T==2] = z[T==2]
            obs_cov[T==2] = observation_var_h
        pdb.set_trace()
        #for i in xrange(n):
        #    z_i = z[shot_id == i]
        #    T_i = T[shot_id == i]
        #    if 2 in T_i:
        #        # Sample mean and variance for multiple observations
        #        n_obs = sum(T_i == 2)
        #        z_h[i] = mean(z_i[T_i == 2])
        #        obs_cov[i] = observation_var_h/n_obs

        z_h[isnan(z_h)] = ma.masked
        obs_cov[isinf(obs_cov)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean = array([prior_mu_h[0],])
        kalman.initial_state_covariance = array([prior_cov_h[0],])
        kalman.transition_matrices = array([phi,])
        kalman.transition_covariance = array([transition_var_h,])
        kalman.transition_offsets = mu_h*(1-phi)*ones((n, 1))
        kalman.observation_matrices = eye(1)
        kalman.observation_offsets = g
        kalman.observation_covariance = obs_cov
        sampled_h = forward_filter_backward_sample(kalman, z_h, prior_mu_h, prior_cov_h)

        return sampled_h.reshape((n,))
def regions_nested_in_superregions(n, sigma):
    C = pl.eye(n)
    for ii in range(n-2):
        for jj in range(n-2):
            C[ii,jj] += .1
    for S in superregions:
        for ii in S:
            for jj in S:
                C[ii,jj] += 10.

    C[n-2,n-2] = 11.
    C[n-1,n-1] = 11.

    #print "making prior for sex effect very uninformative"
    #C[n-1,n-1] = 1001.
    
    C *= sigma**2.

    return C
Пример #42
0
def inverse(A):
    def LUdecomp(A):
        # use Crout's algorithm to perform LU decomposition of A
        n = len(A)
        L = pylab.zeros(A.shape)
        U = pylab.zeros(A.shape)
        for i in range(n):
            L[i, i] = 1.0
        for j in range(n):
            for i in range(j + 1):
                U[i, j] = A[i, j]
                for k in range(i):
                    U[i, j] -= L[i, k] * U[k, j]
            for i in range(j + 1, n):
                L[i, j] = A[i, j]
                for k in range(j):
                    L[i, j] -= L[i, k] * U[k, j]
                L[i, j] /= U[j, j]
        return L, U

    def solve(A, b):
        # solves the linear system A.x = b for x
        n = len(A)
        L, U = LUdecomp(A)
        x = pylab.zeros(b.shape)
        y = pylab.zeros(b.shape)
        # forward substitute to solve equation L.y = b for y
        for i in range(n):
            y[i] = b[i]
            for j in range(i):
                y[i] -= L[i, j] * y[j]
            y[i] /= L[i, i]
        # back substitute to solve equation U.x = y for x
        for i in reversed(range(n)):
            x[i] = y[i]
            for j in range(i + 1, n):
                x[i] -= U[i, j] * x[j]
            x[i] /= U[i, i]
        return x

    # when b is a matrix, solve(A,b) gives inverse of A
    B = pylab.eye(len(A))
    return solve(A, B)
Пример #43
0
    def __init__(self, A=None, IMAGES=None, gamma=0.01, lbda=0.3, eta=0.1,
                 adapt=0.96, iter=150, soft=1, display_every=1, run_gd=0,
                 run_cg=1, eta_gd=0.01, cg_maxiter=20, cg_epsilon=0.00000001,
                 cg_gtol=0.0001, L=64, M=64):
        if (A == None):
            # init basis functions, normalize
            self.A = matrix(rand(L,M)-0.5)
            self.A *= matrix(eye(M)*1/sqrt(sum(self.A.A**2)))
        else:
            self.A = A

        if (IMAGES == None):
            self.IMAGES = pickle.load(open(images_filename))
        else:
            self.IMAGES = IMAGES

        self.update = 0

        self.run_gd = run_gd
        self.run_cg = run_cg

        # gd params
        self.eta_gd = eta_gd

        # cg params
        self.cg_maxiter = cg_maxiter
        self.cg_epsilon = cg_epsilon
        self.cg_gtol = cg_gtol

        self.gamma = gamma

        self.display_every = display_every
        self.display_bf = 1
        self.display_coef = 0
        self.display_norm = 1
        self.display_recon = 0

        # threshold circuit params
        self.lbda = float(lbda)
        self.eta = eta
        self.adapt = adapt
        self.iter = iter
        self.soft = soft
Пример #44
0
    def sample(self, model, evidence):
        z, T, g, h, sigma_h, phi = [
            evidence[var] for var in ['z', 'T', 'g', 'h', 'sigma_h', 'phi']
        ]
        sigma_z_h = model.known_params['sigma_z_h']
        mu_h = model.known_params['mu_h']
        prior_mu_h = model.hyper_params['prior_mu_h']
        prior_cov_h = model.hyper_params['prior_cov_h']
        n = len(h)

        g = g.copy().reshape((n, 1))
        h = h.copy().reshape((n, 1))
        z_h = ma.asarray(z.copy().reshape((n, 1)))
        if sum(T == 0) > 0:
            z_h[T == 0] = nan
        if sum(T == 1) > 0:
            z_h[T == 1] = nan
        if sum(T == 2) > 0:
            z_h[T == 2] -= g[T == 2]
        z_h[isnan(z_h)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean = array([
            prior_mu_h[0],
        ])
        kalman.initial_state_covariance = array([
            prior_cov_h[0, 0],
        ])
        kalman.transition_matrices = array([
            phi,
        ])
        kalman.transition_covariance = array([
            sigma_h**2,
        ])
        kalman.transition_offsets = mu_h * (1 - phi) * ones((n, 1))
        kalman.observation_matrices = eye(1)
        kalman.observation_covariance = array([
            sigma_z_h**2,
        ])
        sampled_h = forward_filter_backward_sample(kalman, z_h)

        return sampled_h.reshape((n, ))
Пример #45
0
def mpow(A, n):
    """ 
    Returns the n-th power of A.
    If n is no integer, the next smaller integer will be assumed.

    ==========
    Parameter:
    ==========
    A : *array*
        the square matrix from which the n-th power should be returned
    n : *integer*
        the power

    ========
    Returns:
    ========
    B : *array*
        B = A^n

    """
    return reduce(dot, [eye(A.shape[0]), ] * 2 + [A, ] * int(n))
Пример #46
0
def ridgeFit(X, Y, phi, params, verbose=False):
    #phi = designMatrix(X, params['order'], includeConstantTerm=False)
    l = params['lambda']
    phi_avg = sum(phi)*1.0 / len(phi)
    Z = phi - phi_avg
    Y_avg = sum(Y)*1.0 / len(Y)
    Yc = Y - Y_avg
    if verbose:
        print('phi', phi)
        print('phi_avg', phi_avg)
        print('Z', Z)
        print('Yc', Yc)

    a = pl.dot(Z.T, Z) + l * pl.eye(len(Z.T))
    b = np.linalg.inv(a).dot(Z.T)
    W = b.dot(Yc)
    W_0 = np.array([Y_avg - W.T.dot(phi_avg)])
    if verbose:
        print('W_0', W_0)
        print('W', W)
    return np.hstack((W_0, W.T)).T
Пример #47
0
def define_model(data):
    # Builds model object
    n = len(data)
    variable_names = ['g', 'sigma_g', 'p_type', 'T']
    known_params = {'sigma_z_g': sigma_z_g}
    hyper_params = {
        'prior_mu_g': 0 * ones(n),
        'prior_cov_g': 100 * eye(n),
        'alpha_type': (1., 1.),
        'a_g': 3.,
        'b_g': 1.
    }
    priors = {
        'sigma_g': stats.invgamma(hyper_params['a_g'],
                                  scale=hyper_params['b_g']),
        'p_type': dirichlet(hyper_params['alpha_type']),
        'T': iid_dist(categorical((1., 1.)), n),
        'g': mvnorm(hyper_params['prior_mu_g'], hyper_params['prior_cov_g'])
    }
    #initials = {'g': g[:n],
    #            'sigma_g': sigma_g}
    FCP_samplers = {
        'g': ground_height_step(),
        'p_type': p_type_step(),
        'T': type_step(),
        'sigma_g': sigma_ground_step()
    }

    model = Model()
    model.set_variable_names(variable_names)
    model.set_known_params(known_params)
    model.set_hyper_params(hyper_params)
    model.set_priors(priors)
    #model.set_initials(initials)
    model.set_FCP_samplers(FCP_samplers)
    model.set_data(data)

    return model
def bad_eta_prior_complex_sim(n=[12, 13, 9, 17, 11, 11, 13, 8, 15], n_mis=2):
    # generate full data
    d = data.complex_hierarchical_data(n)
    
    # make some data missing
    y_w_missing = [y_j.copy() for y_j in d['y']]
    for k in range(n_mis):
        j = random.choice(range(d['J']))
        i = random.choice(range(d['n'][j]))
        y_w_missing[j][i] = pl.nan

    # generate model
    m = models.complex_hierarchical_model(y_w_missing, d['X'], d['t'])

    # replace eta prior with wrong covariance
    m['eta'].parents['C'] = pl.eye(4)

    # fit model with Normal Approx
    na = mc.NormApprox(m)
    na.fit(method='fmin_powell', verbose=True)
    na.sample(5000)

    return d, m
Пример #49
0
 def compute_readout_weights(states, targets, reg_fact=0):
     """
     Train readout with linear regression
     :param states: numpy array with states[i, j], the state of neuron j in example i
     :param targets: numpy array with targets[i], while target i corresponds to example i
     :param reg_fact: regularization factor; 0 results in no regularization
     :return: numpy array with weights[j]
     """
     if reg_fact == 0:
         # lstsq solves the equation Xw = b for the best w 
         w = np.linalg.lstsq(states, targets)[0]
     else:
         # pylab.inv -> inverse 
         # pylab.eye -> identity matrix
         # Note that the inverse of kI_n = 1/k I_n for a scalar k. 
         
         # This is somewhat related to the least squares equation.
         # A^TA x = A^T b 
         # for vectors x and b 
         w = np.dot(np.dot(pylab.inv(reg_fact * pylab.eye(np.size(states, 1)) + np.dot(states.T, states)),
                           states.T),
                    targets)
     return w
Пример #50
0
def check_actuated_torques(hrp, q, qd, qdd, tau, J, F_ext):
    """Check actuated torques `tau` computed against the external wrench F_ext
    using Equation (8) from (Mistry, Buchli and Schall, 2010).

    robot -- robot object
    q -- full-body configuration
    qd -- full-body velocity
    qdd -- full-body acceleration
    tau -- active joint torques
    J -- contact Jacobian
    F_ext -- contact wrench

    """
    with hrp.rave:
        hrp.rave.SetDOFValues(q)
        hrp.rave.SetDOFVelocities(qd)
        _, tc, tg = hrp.rave.ComputeInverseDynamics(qdd, returncomponents=True)
    M = hrp.compute_inertia_matrix(hrp.q)
    S = hstack([eye(50), zeros((50, 6))])
    SMS_inv = inv(dot(S, dot(inv(M), S.T)))
    S_bar = dot(SMS_inv, dot(S, inv(M))).T
    v = dot(S_bar.T, (tc + tg - dot(J.T, F_ext)))
    tau_check = dot(SMS_inv, qdd[:50]) + v
    return norm(tau - tau_check) < 1e-5
Пример #51
0
def pseudoSpect(A,
                npts=200,
                s=2.,
                gridPointSelect=100,
                verbose=True,
                lstSqSolve=True):
    """ 
    original code from http://www.cs.ox.ac.uk/projects/pseudospectra/psa.m
    % psa.m - Simple code for 2-norm pseudospectra of given matrix A.
    %         Typically about N/4 times faster than the obvious SVD method.
    %         Comes with no guarantees!   - L. N. Trefethen, March 1999.
    
    parameter: A: the matrix to analyze
               npts: number of points at the grid
               s: axis limits (-s ... +s)
               gridPointSelect: ???
               verbose: prints progress messages
               lstSqSolve: if true, use least squares in algorithm where
                  solve could be used (probably) instead. (replacement for
                  ldivide in MatLab)
    """

    from scipy.linalg import schur, triu
    from pylab import (meshgrid, norm, dot, zeros, eye, diag, find, linspace,
                       arange, isreal, inf, ones, lstsq, solve, sqrt, randn,
                       eig, all)

    ldiv = lambda M1, M2: lstsq(M1, M2)[
        0] if lstSqSolve else lambda M1, M2: solve(M1, M2)

    def planerot(x):
        '''
        return (G,y)
        with a matrix G such that y = G*x with y[1] = 0    
        '''
        G = zeros((2, 2))
        xn = x / norm(x)
        G[0, 0] = xn[0]
        G[1, 0] = -xn[1]
        G[0, 1] = xn[1]
        G[1, 1] = xn[0]
        return G, dot(G, x)

    xmin = -s
    xmax = s
    ymin = -s
    ymax = s
    x = linspace(xmin, xmax, npts, endpoint=False)
    y = linspace(ymin, ymax, npts, endpoint=False)
    xx, yy = meshgrid(x, y)
    zz = xx + 1j * yy

    #% Compute Schur form and plot eigenvalues:
    T, Z = schur(A, output='complex')

    T = triu(T)
    eigA = diag(T)

    # Reorder Schur decomposition and compress to interesting subspace:
    select = find(eigA.real > -250)  # % <- ALTER SUBSPACE SELECTION
    n = len(select)
    for i in arange(n):
        for k in arange(select[i] - 1, i, -1):  #:-1:i
            G = planerot([T[k, k + 1],
                          T[k, k] - T[k + 1, k + 1]])[0].T[::-1, ::-1]
            J = slice(k, k + 2)
            T[:, J] = dot(T[:, J], G)
            T[J, :] = dot(G.T, T[J, :])

    T = triu(T[:n, :n])
    I = eye(n)

    # Compute resolvent norms by inverse Lanczos iteration and plot contours:
    sigmin = inf * ones((len(y), len(x)))
    #A = eye(5)
    niter = 0
    for i in arange(len(y)):  # 1:length(y)
        if all(isreal(A)) and (ymax == -ymin) and (i > len(y) / 2):
            sigmin[i, :] = sigmin[len(y) - i, :]
        else:
            for jj in arange(len(x)):
                z = zz[i, jj]
                T1 = z * I - T
                T2 = T1.conj().T
                if z.real < gridPointSelect:  # <- ALTER GRID POINT SELECTION
                    sigold = 0
                    qold = zeros((n, 1))
                    beta = 0
                    H = zeros((100, 100))
                    q = randn(n, 1) + 1j * randn(n, 1)
                    while norm(q) < 1e-8:
                        q = randn(n, 1) + 1j * randn(n, 1)
                    q = q / norm(q)
                    for k in arange(99):
                        v = ldiv(T1, (ldiv(T2, q))) - dot(beta, qold)
                        #stop
                        alpha = dot(q.conj().T, v).real
                        v = v - alpha * q
                        beta = norm(v)
                        qold = q
                        q = v / beta
                        H[k + 1, k] = beta
                        H[k, k + 1] = beta
                        H[k, k] = alpha
                        if (alpha > 1e100):
                            sig = alpha
                        else:
                            sig = max(abs(eig(H[:k + 1, :k + 1])[0]))
                        if (abs(sigold / sig - 1) < .001) or (sig < 3
                                                              and k > 2):
                            break
                        sigold = sig
                        niter += 1
                        #print 'niter = ', niter

                #%text(x(jj),y(i),num2str(k))         % <- SHOW ITERATION COUNTS
                    sigmin[i, jj] = 1. / sqrt(sig)
                #end
                #  end
        if verbose:
            print 'finished line ', str(i), ' out of ', str(len(y))

    return x, y, sigmin
Пример #52
0
def pseudoSpect(A, npts=200, s=2., gridPointSelect=100, verbose=True,
                lstSqSolve=True):
    """ 
    original code from http://www.cs.ox.ac.uk/projects/pseudospectra/psa.m
    % psa.m - Simple code for 2-norm pseudospectra of given matrix A.
    %         Typically about N/4 times faster than the obvious SVD method.
    %         Comes with no guarantees!   - L. N. Trefethen, March 1999.
    
    parameter: A: the matrix to analyze
               npts: number of points at the grid
               s: axis limits (-s ... +s)
               gridPointSelect: ???
               verbose: prints progress messages
               lstSqSolve: if true, use least squares in algorithm where
                  solve could be used (probably) instead. (replacement for
                  ldivide in MatLab)
    """
    
    from scipy.linalg import schur, triu
    from pylab import (meshgrid, norm, dot, zeros, eye, diag, find,  linspace,                       
                       arange, isreal, inf, ones, lstsq, solve, sqrt, randn,
                       eig, all)

    ldiv = lambda M1,M2 :lstsq(M1,M2)[0] if lstSqSolve else lambda M1,M2: solve(M1,M2)

    def planerot(x):
        '''
        return (G,y)
        with a matrix G such that y = G*x with y[1] = 0    
        '''
        G = zeros((2,2))
        xn = x / norm(x)
        G[0,0] = xn[0]
        G[1,0] = -xn[1]
        G[0,1] = xn[1]
        G[1,1] = xn[0]
        return G, dot(G,x)

    xmin = -s
    xmax = s
    ymin = -s
    ymax = s;  
    x = linspace(xmin,xmax,npts,endpoint=False)
    y = linspace(ymin,ymax,npts,endpoint=False)
    xx,yy = meshgrid(x,y)
    zz = xx + 1j*yy
     
    #% Compute Schur form and plot eigenvalues:
    T,Z = schur(A,output='complex');
        
    T = triu(T)
    eigA = diag(T)
    
    # Reorder Schur decomposition and compress to interesting subspace:
    select = find( eigA.real > -250)           # % <- ALTER SUBSPACE SELECTION
    n = len(select)
    for i in arange(n):
        for k in arange(select[i]-1,i,-1): #:-1:i
            G = planerot([T[k,k+1],T[k,k]-T[k+1,k+1]] )[0].T[::-1,::-1]
            J = slice(k,k+2)
            T[:,J] = dot(T[:,J],G)
            T[J,:] = dot(G.T,T[J,:])
          
    T = triu(T[:n,:n])
    I = eye(n);
    
    # Compute resolvent norms by inverse Lanczos iteration and plot contours:
    sigmin = inf*ones((len(y),len(x)));
    #A = eye(5)
    niter = 0
    for i in arange(len(y)): # 1:length(y)        
        if all(isreal(A)) and (ymax == -ymin) and (i > len(y)/2):
            sigmin[i,:] = sigmin[len(y) - i,:]
        else:
            for jj in arange(len(x)):
                z = zz[i,jj]
                T1 = z * I - T 
                T2 = T1.conj().T
                if z.real < gridPointSelect:    # <- ALTER GRID POINT SELECTION
                    sigold = 0
                    qold = zeros((n,1))
                    beta = 0
                    H = zeros((100,100))                
                    q = randn(n,1) + 1j*randn(n,1)                
                    while norm(q) < 1e-8:
                        q = randn(n,1) + 1j*randn(n,1)                
                    q = q/norm(q)
                    for k in arange(99):
                        v = ldiv(T1,(ldiv(T2,q))) - dot(beta,qold)
                        #stop
                        alpha = dot(q.conj().T, v).real
                        v = v - alpha*q
                        beta = norm(v)
                        qold = q
                        q = v/beta
                        H[k+1,k] = beta
                        H[k,k+1] = beta
                        H[k,k] = alpha
                        if (alpha > 1e100):
                            sig = alpha 
                        else:
                            sig = max(abs(eig(H[:k+1,:k+1])[0]))
                        if (abs(sigold/sig-1) < .001) or (sig < 3 and k > 2):
                            break
                        sigold = sig
                        niter += 1
                        #print 'niter = ', niter
                
                  #%text(x(jj),y(i),num2str(k))         % <- SHOW ITERATION COUNTS
                    sigmin[i,jj] = 1./sqrt(sig);
                #end
                #  end
        if verbose:
            print 'finished line ', str(i), ' out of ', str(len(y))
    
    return x,y,sigmin
def uninformative(n, sigma):
    print 'using uninformative regional similarity prior'
    C = pl.eye(n) * 11. * sigma**2.
    return C
Пример #54
0
def T(t):
    M = pl.eye(4)
    M[:3, 3] = t
    return M
#Define sensor geometry
sensor_center=pb.matrix([[0],[0]])
sensor_width=0.9**2 
sensor_kernel=basis(sensor_center,sensor_width,dimension)

#Define sigmoidal activation function and inverse synaptic time constant
fmax=10
v0=2
varsigma=0.8
act_fun=ActivationFunction(v0,fmax,varsigma)
#inverse synaptic time constant
zeta=100
#define field initialasation and number of iterations for estimation
mean=[0]*len(Phi)
P0=10*pb.eye(len(mean))
x0=pb.matrix(pb.multivariate_normal(mean,P0,[1])).T
number_of_iterations=10
#ignore first 100 observations allowing the model's initial transients to die out
First_n_observations=100

#populate the model
NF_model=NF(NF_Connectivity_kernel,sensor_kernel,obs_locns,observation_locs_mm,gamma,gamma_weight,Sigma_varepsilon,act_fun,zeta,Ts,field_space,spacestep)
IDE_model=IDE(IDE_Connectivity_kernel,IDE_field,sensor_kernel,obs_locns,gamma,gamma_weight,Sigma_varepsilon,act_fun,x0,P0,zeta,Ts,field_space,spacestep)
#generate the Neural Field model
NF_model.gen_ssmodel()
V,Y=NF_model.simulate(T)
#generate the reduced model (state space model)
IDE_model.gen_ssmodel()
#estimate the states, the connectivity kernel parameters and the synaptic dynamics
ps_estimate=para_state_estimation(IDE_model)