def arc_distance_theano_broadcast_prepare(dtype='float64'):
    """
    Calculates the pairwise arc distance between all points in vector a and b.
    """
    a = tensor.matrix(dtype=str(dtype))
    b = tensor.matrix(dtype=str(dtype))

    theta_1 = a[:, 0][None, :]
    theta_2 = b[:, 0][None, :]
    phi_1 = a[:, 1][:, None]
    phi_2 = b[:, 1][None, :]

    temp = (tensor.sin((theta_2 - theta_1) / 2)**2
            +
            tensor.cos(theta_1) * tensor.cos(theta_2)
            * tensor.sin((phi_2 - phi_1) / 2)**2)
    distance_matrix = 2 * (tensor.arctan2(tensor.sqrt(temp),
                                          tensor.sqrt(1 - temp)))
    name = "arc_distance_theano_broadcast"
    rval = theano.function([a, b],
                           distance_matrix,
                           name=name)
    rval.__name__ = name

    return rval
Ejemplo n.º 2
0
def poseDiff2D(startingPose, endingPose):
    x1, y1, theta1 = endingPose[0], endingPose[1], endingPose[2]
    x2, y2, theta2 = startingPose[0], startingPose[1], startingPose[2]
    dx = (x1 - x2)*T.cos(theta2) + (y1 - y2)*T.sin(theta2)
    dy = -(x1 - x2)*T.sin(theta2) + (y1 - y2)*T.cos(theta2)
    dtheta = normalizeAngle(theta1 - theta2)
    return dx, dy, dtheta
def arc_distance_theano_alloc_prepare(dtype='float64'):
    """
    Calculates the pairwise arc distance between all points in vector a and b.
    """
    a = tensor.matrix(dtype=str(dtype))
    b = tensor.matrix(dtype=str(dtype))
    # Theano don't implement all case of tile, so we do the equivalent with alloc.
    #theta_1 = tensor.tile(a[:, 0], (b.shape[0], 1)).T
    theta_1 = tensor.alloc(a[:, 0], b.shape[0], b.shape[0]).T
    phi_1 = tensor.alloc(a[:, 1], b.shape[0], b.shape[0]).T

    theta_2 = tensor.alloc(b[:, 0], a.shape[0], a.shape[0])
    phi_2 = tensor.alloc(b[:, 1], a.shape[0], a.shape[0])

    temp = (tensor.sin((theta_2 - theta_1) / 2)**2
            +
            tensor.cos(theta_1) * tensor.cos(theta_2)
            * tensor.sin((phi_2 - phi_1) / 2)**2)
    distance_matrix = 2 * (tensor.arctan2(tensor.sqrt(temp),
                                          tensor.sqrt(1 - temp)))
    name = "arc_distance_theano_alloc"
    rval = theano.function([a, b],
                           distance_matrix,
                           name=name)
    rval.__name__ = name

    return rval
Ejemplo n.º 4
0
    def get_celerite_matrices(self, x, diag):
        x = tt.as_tensor_variable(x)
        diag = tt.as_tensor_variable(diag)
        ar, cr, ac, bc, cc, dc = self.coefficients
        a = diag + tt.sum(ar) + tt.sum(ac)
        U = tt.concatenate((
            ar[None, :] + tt.zeros_like(x)[:, None],
            ac[None, :] * tt.cos(dc[None, :] * x[:, None])
            + bc[None, :] * tt.sin(dc[None, :] * x[:, None]),
            ac[None, :] * tt.sin(dc[None, :] * x[:, None])
            - bc[None, :] * tt.cos(dc[None, :] * x[:, None]),
        ), axis=1)

        V = tt.concatenate((
            tt.zeros_like(ar)[None, :] + tt.ones_like(x)[:, None],
            tt.cos(dc[None, :] * x[:, None]),
            tt.sin(dc[None, :] * x[:, None]),
        ), axis=1)

        dx = x[1:] - x[:-1]
        P = tt.concatenate((
            tt.exp(-cr[None, :] * dx[:, None]),
            tt.exp(-cc[None, :] * dx[:, None]),
            tt.exp(-cc[None, :] * dx[:, None]),
        ), axis=1)

        return a, U, V, P
Ejemplo n.º 5
0
def get_uhs_operator(uhs, depth, n_hidden, rhos):
    """

    :param uhs:
    :param depth:
    :param n_hidden:
    :param rhos: can be shared variable or constant of shape (depth, )!!
    :return:
    """
    # Will use a Fourier matrix (will be O(n^2)...)
    # Doesn't seem to slow things down much though!
    exp_phases = [T.cos(uhs), T.sin(uhs)]
    neg_exp_phases = [T.cos(uhs[:, ::-1]), -T.sin(uhs[:, ::-1])]
    ones_ = [T.ones((depth, 1), dtype=theano.config.floatX), T.zeros((depth, 1), dtype=theano.config.floatX)]

    rhos_reshaped = T.reshape(rhos, (depth, 1), ndim=2)
    rhos_reshaped = T.addbroadcast(rhos_reshaped, 1)

    eigvals_re = rhos_reshaped * T.concatenate((ones_[0], exp_phases[0], -ones_[0], neg_exp_phases[0]), axis=1)
    eigvals_im = rhos_reshaped * T.concatenate((ones_[1], exp_phases[1], -ones_[1], neg_exp_phases[1]), axis=1)
    phase_array = -2 * np.pi * np.outer(np.arange(n_hidden), np.arange(n_hidden)) / n_hidden
    f_array_re_val = np.cos(phase_array) / n_hidden
    f_array_im_val = np.sin(phase_array) / n_hidden
    f_array_re = theano.shared(f_array_re_val.astype(theano.config.floatX), name="f_arr_re")
    f_array_im = theano.shared(f_array_im_val.astype(theano.config.floatX), name="f_arr_im")

    a_k = T.dot(eigvals_re, f_array_re) + T.dot(eigvals_im, f_array_im)
    uhs_op = rep_vec(a_k, n_hidden, n_hidden)  # shape (depth, 2 * n_hidden - 1)

    return uhs_op
Ejemplo n.º 6
0
  def get_output_for(self, inputs, **kwargs):
    # see eq. (1) and sec 3.1 in [1]
    input, para = inputs
    num_batch, channels, height, width = input.shape
    _w = T.cast(width, dtype = self.dtype)
    _h = T.cast(height, dtype = self.dtype)
    mat = T.zeros((num_batch, 3, 3), dtype = self.dtype)
    mat = T.set_subtensor(mat[:, 0, 0], const(1.0))
    mat = T.set_subtensor(mat[:, 1, 1], const(1.0))
    mat = T.set_subtensor(mat[:, 2, 2], const(1.0))

    if self.method == 'perspective':
      mat = T.set_subtensor(mat[:, 2, 0], (para[:, 0] / 1e4 - 1e-3) * _w)
      mat = T.set_subtensor(mat[:, 2, 1], (para[:, 1] / 1e4 - 1e-3) * _h)
    elif self.method == 'angle':
      angle = T.cast(T.argmax(para, axis = 1), dtype = self.dtype) * np.pi / 90 - np.pi / 3.0
      # ss = np.sqrt(2.0)
      mat = T.set_subtensor(mat[:, :, :], T.stacklists([
        [T.cos(angle), T.sin(angle), -(T.cos(angle) * _w + T.sin(angle) * _h - _w) / (2.0 * _w)],
        [-T.sin(angle), T.cos(angle), -(-T.sin(angle) * _w + T.cos(angle) * _h - _h) / (2.0 * _h)],
        [constv(0, num_batch, self.dtype), constv(0, num_batch, self.dtype), constv(1, num_batch, self.dtype)]]).dimshuffle(2, 0, 1))
      # return [mat, _w, _h]
    elif self.method == 'all':
      mat = T.reshape(para, [-1, 3, 3])
      mat = T.set_subtensor(mat[:, 0, 2], mat[:, 0, 2] / T.cast(width, dtype))
      mat = T.set_subtensor(mat[:, 1, 2], mat[:, 1, 2] / T.cast(height, dtype))
      mat = T.set_subtensor(mat[:, 2, 0], mat[:, 2, 0] * T.cast(width, dtype))
      mat = T.set_subtensor(mat[:, 2, 1], mat[:, 2, 1] * T.cast(height, dtype))
    else:
      raise Exception('method not understood.')
    return transform_affine(mat, input, self.method, scale_factor = self.scale_factor)
Ejemplo n.º 7
0
    def grad(self, inputs, gradients):
        M, e = inputs
        E, f = self(M, e)

        bM = tt.zeros_like(M)
        be = tt.zeros_like(M)
        ecosE = e * tt.cos(E)

        if not isinstance(gradients[0].type, theano.gradient.DisconnectedType):
            # Backpropagate E_bar
            bM = gradients[0] / (1 - ecosE)
            be = tt.sin(E) * bM

        if not isinstance(gradients[1].type, theano.gradient.DisconnectedType):
            # Backpropagate f_bar
            sinf2 = tt.sin(0.5*f)
            cosf2 = tt.cos(0.5*f)
            tanf2 = sinf2 / cosf2
            e2 = e**2
            ome2 = 1 - e2
            ome = 1 - e
            ope = 1 + e
            cosf22 = cosf2**2
            twoecosf22 = 2 * e * cosf22
            factor = tt.sqrt(ope/ome)
            inner = (twoecosf22+ome) * tt.as_tensor_variable(gradients[1])

            bM += factor*(ome*tanf2**2+ope)*inner*cosf22/(ope*ome2)
            be += -2*cosf22*tanf2/ome2**2*inner*(ecosE-2+e2)

        return [bM, be]
Ejemplo n.º 8
0
 def _getrz(self,d):
     r=theano.shared(np.zeros((3,3),dtype='float32'))
     r=T.set_subtensor(r[2,2],1.0)
     r=T.set_subtensor(r[0,0],T.cos(d))
     r=T.set_subtensor(r[0,1],-T.sin(d))
     r=T.set_subtensor(r[1,0],T.sin(d))
     r=T.set_subtensor(r[1,1],T.cos(d))        
     return r
Ejemplo n.º 9
0
def xyz(R1, theta, R2, phi):
    theta_ = T.scalar('theta')
    phi_ = T.scalar('phi')

    x = R1 * T.cos(theta_) * R2 * T.cos(phi_)
    y = R2 * T.sin(theta_) * R2 * T.cos(phi_)
    z = R2 * T.sin(phi_)
    func = function([theta_, phi_], [x, y, z], allow_input_downcast=True)
    vals = func(theta, phi)
    return vals[0].flatten(), vals[1].flatten(), vals[2].flatten()
Ejemplo n.º 10
0
def xyz2(theta, a, b, m, n1, n2, n3, rho, a2, b2, m2, n4, n5, n6):
    theta_ = T.scalar('theta')
    rho_ = T.scalar('rho')
    R1 = R(theta, a, b, m, n1, n2, n3)
    R2 = R(rho, a2, b2, m2, n4, n5, n6)

    x = R1 * T.cos(theta_) * R2 * T.cos(rho_)
    y = R1 * T.sin(theta_) * R2 * T.cos(rho_)
    z = R2 * T.sin(rho_)
    func = function([theta_, rho_], [x, y, z], allow_input_downcast=True)
    vals = func(theta, rho)
    return vals[0].flatten(), vals[1].flatten(), vals[2].flatten()
Ejemplo n.º 11
0
 def get_output(self, train=False):
     rnorm = self.omega / np.sqrt(2*np.pi)*self.kappa
     val = - self.omega**2 / (8 * self.kappa**2)
     dir1 = 4 * (self._outter(self.x, tensor.cos(self.theta)) +
                 self._outter(self.y, tensor.sin(self.theta)))**2
     dir2 = (-self._outter(self.x, tensor.sin(self.theta)) +
             self._outter(self.y, tensor.cos(self.theta)))**2
     ex = 1j * (self.omega * self._outter(tensor.cos(self.theta), self.x) +
                self.omega * self._outter(tensor.sin(self.theta), self.y))
     output = rnorm * tensor.exp(val * (dir1 + dir2)) * (tensor.exp(ex)
                                                         - tensor.exp(-self.kappa**2 / 2))
     return output
Ejemplo n.º 12
0
    def theano_dynamics(self, x, u):
        G, L1, L2, M1, M2 = self.extract_constants()
        # TODO: this is just an approximation

        dydx = T.alloc(0.0, 4)

        dydx = T.set_subtensor(dydx[0], x[1])

        del_ = x[2]-x[0]

        den1 = (M1+M2)*L1 - M2*L1*T.cos(del_)*T.cos(del_)

        dydx = T.set_subtensor(dydx[1],
            (  M2*L1      *  x[1] * x[1] * T.sin(del_) * T.cos(del_)
               + M2*G       *  T.sin(x[2]) * T.cos(del_) +
                 M2*L2      *  x[3] * x[3] * T.sin(del_)
               - (M1+M2)*G  *  T.sin(x[0]))/den1 )


        dydx = T.set_subtensor(dydx[2], x[3])

        den2 = (L2/L1)*den1
        dydx = T.set_subtensor(dydx[3], (-M2*L2        *   x[3]*x[3]*T.sin(del_)*T.cos(del_)
                   + (M1+M2)*G   *   T.sin(x[0])*T.cos(del_)
                   - (M1+M2)*L1  *   x[1]*x[1]*T.sin(del_)
                   - (M1+M2)*G   *   T.sin(x[2]))/den2  + u )
        return x + dydx * self.dt
Ejemplo n.º 13
0
def get_theano_func():
    a = tt.dvector("a")
    v = tt.dvector("v")
    freq = tt.dscalar("freq")
    t = tt.dscalar("t")
    dt = tt.dscalar("dt")
    tau = tt.dscalar("tau")
    return theano.function(
        [a, v, freq, t, dt, tau],
        a * tt.sin(2.0 * freq * pi * t)
        + b
        + v * tt.exp(-dt / tau)
        + (-a * tt.sin(2.0 * freq * pi * t) - b) * tt.exp(-dt / tau),
    )
Ejemplo n.º 14
0
 def __init__(self, target, initial_phi, profile_s=None, A0=1.0):
     self.target = target
     self.n_pixels = int(target.shape[0] / 2)   # target should be 512x512, but SLM pattern calculated should be 256x256.
     self.intensity_calc = None
     
     self.cost = None   # placeholder for cost function.
     
     if profile_s is None:
         profile_s = np.ones((self.n_pixels, self.n_pixels))
         
     assert profile_s.shape == (self.n_pixels, self.n_pixels), 'profile_s is wrong shape, should be ({n},{n})'.format(n=self.n_pixels)
     self.profile_s_r = profile_s.real.astype('float64')
     self.profile_s_i = profile_s.imag.astype('float64')
     
     assert initial_phi.shape == (self.n_pixels**2,), "initial_phi must be a vector of phases of size N^2 (not (N,N)).  Shape is " + str(initial_phi.shape)
     
     self.A0 = A0
     
     # Set zeros matrix:
     self.zero_frame = np.zeros((2*self.n_pixels, 2*self.n_pixels), dtype='float64')
     
     # Phi and its momentum for use in gradient descent with momentum:
     self.phi    = theano.shared(value=initial_phi.astype('float64'),
                                 name='phi')
     self.phi_rate = theano.shared(value=np.zeros_like(initial_phi).astype('float64'),
                                   name='phi_rate')
     
     self.S_r = theano.shared(value=self.profile_s_r,
                              name='s_r')
     self.S_i = theano.shared(value=self.profile_s_i,
                              name='s_i')
     self.zero_matrix = theano.shared(value=self.zero_frame,
                                      name='zero_matrix')
     
     # E_in: (n_pixels**2)
     phi_reshaped = self.phi.reshape((self.n_pixels, self.n_pixels))
     self.E_in_r = self.A0 * (self.S_r*T.cos(phi_reshaped) - self.S_i*T.sin(phi_reshaped))
     self.E_in_i = self.A0 * (self.S_i*T.cos(phi_reshaped) + self.S_r*T.sin(phi_reshaped))
     
     # E_in padded: (4n_pixels**2)
     idx_0, idx_1 = get_centre_range(self.n_pixels)
     self.E_in_r_pad = T.set_subtensor(self.zero_matrix[idx_0:idx_1,idx_0:idx_1], self.E_in_r)
     self.E_in_i_pad = T.set_subtensor(self.zero_matrix[idx_0:idx_1,idx_0:idx_1], self.E_in_i)
     
     # E_out:
     self.E_out_r, self.E_out_i = fft(self.E_in_r_pad, self.E_in_i_pad)        
     
     # finally, the output intensity:
     self.E_out_2 = T.add(T.pow(self.E_out_r, 2), T.pow(self.E_out_i, 2))
Ejemplo n.º 15
0
def hdist(a, b):
    lat1 = a[:, 0] * deg2rad
    lon1 = a[:, 1] * deg2rad
    lat2 = b[:, 0] * deg2rad
    lon2 = b[:, 1] * deg2rad

    dlat = abs(lat1-lat2)
    dlon = abs(lon1-lon2)

    al = tensor.sin(dlat/2)**2  + tensor.cos(lat1) * tensor.cos(lat2) * (tensor.sin(dlon/2)**2)
    d = tensor.arctan2(tensor.sqrt(al), tensor.sqrt(const(1)-al))

    hd = const(2) * rearth * d

    return tensor.switch(tensor.eq(hd, float('nan')), (a-b).norm(2, axis=1), hd)
Ejemplo n.º 16
0
    def symbolic_call(self,x,u):

        u = TT.clip(u, -self.max_force, self.max_force) #pylint: disable=E1111

        dt = self.dt

        z = TT.take(x,0,axis=x.ndim-1)
        zdot = TT.take(x,1,axis=x.ndim-1)    
        th = TT.take(x,2,axis=x.ndim-1)
        thdot = TT.take(x,3,axis=x.ndim-1)
        u0 = TT.take(u,0,axis=u.ndim-1)

        th1 = np.pi - th

        g = 10.
        mc = 1. # mass of cart
        mp = .1 # mass of pole
        muc = .0005 # coeff friction of cart
        mup = .000002 # coeff friction of pole
        l = 1. # length of pole

        def sign(x):
            return TT.switch(x>0, 1, -1)

        thddot = -(-g*TT.sin(th1)
         + TT.cos(th1) * (-u0 - mp * l *thdot**2 * TT.sin(th1) + muc*sign(zdot))/(mc+mp)
          - mup*thdot / (mp*l))  \
        / (l*(4/3. - mp*TT.cos(th1)**2 / (mc + mp)))
        zddot = (u0 + mp*l*(thdot**2 * TT.sin(th1) - thddot * TT.cos(th1)) - muc*sign(zdot))  \
            / (mc+mp)

        newzdot = zdot + dt*zddot
        newz = z + dt*newzdot
        newthdot = thdot + dt*thddot
        newth = th + dt*newthdot

        done = (z > self.max_cart_pos) | (z < -self.max_cart_pos) | (th > self.max_pole_angle) | (th < -self.max_pole_angle) 

        ucost = 1e-5*(u**2).sum(axis=u.ndim-1)
        xcost = 1-TT.cos(th)
        # notdone = TT.neg(done) #pylint: disable=W0612,E1111
        notdone = 1-done
        costs = TT.stack((done-1)*10., notdone*xcost, notdone*ucost).T #pylint: disable=E1103


        newx = TT.stack(newz, newzdot, newth, newthdot).T #pylint: disable=E1103

        return [newx,newx,costs,done]
Ejemplo n.º 17
0
def rotate(angle, axis):
    """Returns a transform to represent a rotation"""

    angle = T.as_tensor_variable(angle)
    axis = T.as_tensor_variable(axis)
    a = axis

    radians = angle*np.pi/180.0
    s = T.sin(radians)
    c = T.cos(radians)

    m = T.alloc(0., 4, 4)

    m = T.set_subtensor(m[0,0], a[0] * a[0] + (1. - a[0] * a[0]) * c)
    m = T.set_subtensor(m[0,1], a[0] * a[1] * (1. - c) - a[2] * s)
    m = T.set_subtensor(m[0,2], a[0] * a[2] * (1. - c) + a[1] * s)

    m = T.set_subtensor(m[1,0], a[0] * a[1] * (1. - c) + a[2] * s)
    m = T.set_subtensor(m[1,1], a[1] * a[1] + (1. - a[1] * a[1]) * c)
    m = T.set_subtensor(m[1,2], a[1] * a[2] * (1. - c) - a[0] * s)

    m = T.set_subtensor(m[2,0], a[0] * a[2] * (1. - c) - a[1] * s)
    m = T.set_subtensor(m[2,1], a[1] * a[2] * (1. - c) + a[0] * s)
    m = T.set_subtensor(m[2,2], a[2] * a[2] + (1. - a[2] * a[2]) * c)

    m = T.set_subtensor(m[3,3], 1)

    return Transform(m, m.T)
 def get_output_for(self, input, **kwargs):
     cosT = T.cos(self.Degree * 3.1415926 / 180.0)
     sinT = T.sin(self.Degree * 3.1415926 / 180.0)
     zeros = T.zeros_like(cosT)
     # zeros = self.Translation
     theta = T.stack([cosT, sinT, zeros, -sinT, cosT, zeros], axis = 1)
     return transform_affine(theta, input)
Ejemplo n.º 19
0
	def _step(self,xg_t, xo_t, xc_t, mask_tm1,h_tm1, c_tm1, u_g, u_o, u_c):

		h_mask_tm1 = mask_tm1 * h_tm1
		c_mask_tm1 = mask_tm1 * c_tm1
		act = T.tensordot( xg_t + h_mask_tm1, u_g , [[1],[2]])
		gate = T.nnet.softmax(act.reshape((-1, act.shape[-1]))).reshape(act.shape)

		c_tilda = self.activation(xc_t + T.dot(h_mask_tm1, u_c))

		sigma_se = self.k_parameters[0]
		sigma_per = self.k_parameters[1]
		sigma_b_lin = self.k_parameters[2]
		sigma_v_lin = self.k_parameters[3]
		sigma_rq = self.k_parameters[4]

		l_se = self.k_parameters[5]
		l_per = self.k_parameters[6]
		l_lin = self.k_parameters[7]
		l_rq = self.k_parameters[8]

		alpha_rq = self.k_parameters[9]
		p_per = self.k_parameters[10]

		k_se = T.pow(sigma_se,2) * T.exp( -T.pow(c_mask_tm1 - c_tilda,2) / (2* T.pow(l_se,2) + self.EPS))
		k_per = T.pow(sigma_per,2) * T.exp( -2*T.pow(T.sin( math.pi*(c_mask_tm1 - c_tilda)/ (p_per + self.EPS) ),2)	 / ( T.pow(l_per,2) + self.EPS ))
		k_lin = T.pow(sigma_b_lin,2) + T.pow(sigma_v_lin,2)	 * (c_mask_tm1 - l_lin) * (c_tilda - l_lin )
		k_rq = T.pow(sigma_rq,2) * T.pow( 1 + T.pow( (c_mask_tm1 - c_tilda),2)	/ ( 2 * alpha_rq * T.pow(l_rq,2) + self.EPS), -alpha_rq)

		ops = [c_mask_tm1,c_tilda,k_se, k_per, k_lin,k_rq]
		yshuff = T.as_tensor_variable( ops, name='yshuff').dimshuffle(1,2,0)
		c_t = (gate.reshape((-1,gate.shape[-1])) * yshuff.reshape((-1,yshuff.shape[-1]))).sum(axis = 1).reshape(gate.shape[:2])
		o_t = self.inner_activation(xo_t + T.dot(h_mask_tm1, u_o))
		h_t = o_t * self.activation(c_t)
		return h_t, c_t
 def get_output_for(self, input, **kwargs):
     cosT = T.cos(self.Degree * 3.1415926 / 180.0)
     sinT = T.sin(self.Degree * 3.1415926 / 180.0)
     zeros = T.zeros_like(cosT)
     # zeros = self.Translation
     finalResult = T.stack([cosT, sinT, zeros, -sinT, cosT, zeros], axis = 1)
     return finalResult
 def get_output_for(self, input, **kwargs):
     cosT = T.cos(self.Degree[:, 0] * 3.1415926 / 180.0)
     sinT = T.sin(self.Degree[:, 0] * 3.1415926 / 180.0)
     ones = self.scaling[:, 0]
     zeros = T.zeros_like(self.translation[:,0])
     theta = T.stack([ones * cosT, sinT, self.translation[:,0], -sinT, ones * cosT, self.translation[:,1]], axis = 1)
     return transform_affine(theta, input)
Ejemplo n.º 22
0
    def create_transport_gradient(self):
        HL2 = T.sqrt((self.search_direction ** 2).sum(axis=0,keepdims=True)) + np.spacing(np.single(1))

        transported = self.last_G - (self.last_G*self.search_direction).sum(axis=0,keepdims=True)/(HL2**2) * \
                               (self.X*HL2 * T.sin(self.t_prev*HL2) + self.search_direction * (1.0-T.cos(self.t_prev*HL2)))
        f = theano.function([],[],updates=[(self.last_G,transported)])
        return f
Ejemplo n.º 23
0
def test_opt_gpujoin_joinvectors_elemwise_then_minusone():
    # from a bug in gpu normal sampling
    _a = numpy.asarray([1, 2, 3, 4], dtype='float32')
    _b = numpy.asarray([5, 6, 7, 8], dtype='float32')
    a = cuda.shared_constructor(_a)
    b = cuda.shared_constructor(_b)

    a_prime = tensor.cos(a)
    b_prime = tensor.sin(b)

    c = tensor.join(0, a_prime, b_prime)

    d = c[:-1]

    f = theano.function([], d, mode=mode_with_gpu)

    graph_nodes = f.maker.fgraph.toposort()

    assert isinstance(graph_nodes[-1].op, cuda.HostFromGpu)
    assert isinstance(graph_nodes[-2].op, cuda.GpuSubtensor)
    assert isinstance(graph_nodes[-3].op, cuda.GpuJoin)

    concat = numpy.concatenate([numpy.cos(_a), numpy.sin(_b)], axis=0)
    concat = concat[:-1]

    assert numpy.allclose(numpy.asarray(f()), concat)
Ejemplo n.º 24
0
 def For_MMD_Sub_class(self,target,data,omega,num_FF,Xlabel):
     
     Num=T.sum(Xlabel,0)
     D_num=Xlabel.shape[1]
     N=data.shape[0]
     
     F_times_Omega = T.dot(data, omega)#minibatch_size*n_rff
     Phi = (self.sf2**0.5 /num_FF**0.5 ) * T.concatenate([T.cos(F_times_Omega), T.sin(F_times_Omega)],1)
     
     #各RFFは2N_rffのたてベクトル
     Phi_total=T.sum(Phi.T,-1)/N
     
     #Domain_number*2N_rffの行列
     Phi_each_domain, updates = theano.scan(fn=lambda a,b: T.switch(T.neq(b,0), Phi.T*a/b, 0),
                           sequences=[Xlabel.T,Num])
     each_Phi=T.sum(Phi_each_domain,-1)
     #まず自分自身との内積 結果はD次元のベクトル
     each_domain_sum=T.sum(each_Phi*each_Phi,-1)
     
     #全体の内積
     tot_sum=T.dot(Phi_total,Phi_total)
     
     #全体とドメインのクロス内積
     tot_domain_sum, updates=theano.scan(fn=lambda a: a*Phi_total,
                           sequences=[each_Phi])
     
     #MMDの計算
     MMD_central=T.sum(each_domain_sum)+D_num*tot_sum-2*T.sum(tot_domain_sum)
     
     return MMD_central     
    def get_output_for(self, input, **kwargs):
        #self.translation = T.zeros((n, 2))
        #ones = T.exp(self.scaling[:, 0])
        ones = T.ones((self.n,))
        zeros = T.zeros((self.n,))
        cosTx = T.cos(self.DegreeX * 3.1415926 / 180.0)
        sinTx = T.sin(self.DegreeX * 3.1415926 / 180.0)
        cosTy = T.cos(self.DegreeY * 3.1415926 / 180.0)
        sinTy = T.sin(self.DegreeY * 3.1415926 / 180.0)
        cosTz = T.cos(self.DegreeZ * 3.1415926 / 180.0)
        sinTz = T.sin(self.DegreeZ * 3.1415926 / 180.0)
        thetaX = T.stack([ones, zeros, zeros, zeros, zeros, cosTx, -sinTx, zeros, zeros, sinTx, cosTx, zeros, zeros, zeros, zeros, ones], axis = 1)
        thetaY = T.stack([cosTy, zeros, sinTy, zeros, zeros, ones, zeros, zeros, -sinTy, zeros, cosTy, zeros, zeros, zeros, zeros, ones], axis = 1)
        thetaZ = T.stack([cosTz, -sinTz, zeros, zeros, sinTz, cosTz, zeros, zeros, zeros, zeros, ones, zeros, zeros, zeros, zeros, ones], axis = 1)

        return transform_affine(thetaX, thetaY, thetaZ, input)
Ejemplo n.º 26
0
def beta_noise(rng, input):

    srng = theano.tensor.shared_randomstreams.RandomStreams(rng.randint(999999))
    
    # Beta(.5,.5) multiplicative noise                       
    mask = T.cast(T.sin( (np.pi / 2.0) * srng.uniform(size=input.shape, low=0.0, high=1.0) )**2, theano.config.floatX)
    return mask * input
Ejemplo n.º 27
0
 def _get_position(self, a, t):
     f = self._get_true_anomaly(t)
     cosf = tt.cos(f)
     if self.ecc is None:
         r = a
     else:
         r = a * (1.0 - self.ecc**2) / (1 + self.ecc * cosf)
     return self._rotate_vector(r * cosf, r * tt.sin(f))
Ejemplo n.º 28
0
    def xyzt_2_param(xyzt):
        # get individual xyz
        dx = xyzt[:, 0]  # x and y are already between -1 and 1
        dy = xyzt[:, 1]  # x and y are already between -1 and 1
        z = xyzt[:, 2]
        t = xyzt[:, 3]
        # compute the resize from the largest scale image
        dr = (np.cast[floatX](reduc_ratio) * np.cast[floatX]
              (2.0)**z / np.cast[floatX](max_scale))

        # dimshuffle before concatenate
        params = [dr * T.cos(t), -dr * T.sin(t), dx, dr * T.sin(t),
                  dr * T.cos(t), dy]
        params = [_p.flatten().dimshuffle(0, 'x') for _p in params]

        # concatenate to have (1 0 0 0 1 0) when identity transform
        return T.concatenate(params, axis=1)
 def get_output_for(self, inputs, **kwargs):
     input, degree = inputs
     cosT = T.cos(degree)
     sinT = T.sin(degree)
     zeros = T.zeros_like(cosT)
     # zeros = self.Translation
     theta = T.stack([cosT, sinT, zeros, -sinT, cosT, zeros], axis = 1)
     return transform_affine(theta, input)
Ejemplo n.º 30
0
 def alpha_perfect(self, w, u):
     alpha = sharedX(1.5)
     beta = sharedX(0.5)
     S_var = self.S(alpha,beta)
     B_var = self.B(alpha,beta)
     first = T.sin(alpha*(u+B_var)/(T.cos(u)**(1/alpha)))
     second = T.cos(u-alpha*(u+B_var))/w
     return S_var*first*(second**((1-alpha)/alpha))
Ejemplo n.º 31
0
Archivo: compare.py Proyecto: dfm/rvhmc
def build_model(peaks, t, y=None, yerr=None, model=None):
    model = pm.modelcontext(model)

    n_planets = len(peaks)

    if yerr is None:
        yerr = np.random.uniform(0.01, 0.1, len(t))
    if y is None:
        y = yerr * np.random.randn(len(t))

    trend = PolynomialTrend("trend", order=3)
    logs = pm.Normal("logs", mu=-5.0, sd=5.0, testval=-5.0)
    meanrv = pm.Normal("meanrv", mu=0.0, sd=10.0, testval=0.0)
    dataset = RVDataset("data",
                        t,
                        y,
                        yerr,
                        logs=logs,
                        trend=trend,
                        meanrv=meanrv)

    logamps = pm.Uniform("logamps",
                         lower=np.log(min_amp),
                         upper=np.log(max_amp),
                         shape=n_planets,
                         testval=np.log([
                             np.clip(peak["amp"], min_amp + 1e-2,
                                     max_amp - 1e-2) for peak in peaks
                         ]))

    planets = []
    for i, (peak, name) in enumerate(zip(peaks, string.ascii_lowercase[1:])):
        logP = pm.Uniform(name + ":logP",
                          lower=np.log(min_period),
                          upper=np.log(max_period),
                          testval=np.log(peak["period"]))
        logK = pm.Deterministic(name + ":logK", logamps[i])

        eccen = pm.Beta(name + ":eccen",
                        alpha=0.867,
                        beta=3.03,
                        testval=peak["eccen"])
        omegabase = pm.Uniform(name + ":omegabase",
                               -2 * np.pi,
                               2 * np.pi,
                               testval=peak["omega"])
        omegavec = pm.Deterministic(
            name + ":omegavec",
            tt.stack([tt.cos(omegabase), tt.sin(omegabase)]))

        phibase = pm.Uniform(name + ":phibase",
                             -2 * np.pi,
                             2 * np.pi,
                             testval=peak["phase"])
        phivec = pm.Deterministic(name + ":phivec",
                                  tt.stack([tt.cos(phibase),
                                            tt.sin(phibase)]))
        planets.append(
            RVPlanet(name,
                     logP,
                     logK,
                     phivec=phivec,
                     eccen=eccen,
                     omegavec=omegavec))

    rvmodel = RVModel("rv", dataset, planets)
    pm.Deterministic("logp", model.logpt)

    return rvmodel
Ejemplo n.º 32
0
 def backward(self, y):
     return tt.arctan2(tt.sin(y), tt.cos(y))
Ejemplo n.º 33
0
 def _get_true_anomaly(self, t, _pad=True):
     M = (self._warp_times(t, _pad=_pad) - self.tref) * self.n
     if self.ecc is None:
         return tt.sin(M), tt.cos(M)
     sinf, cosf = kepler(M, self.ecc + tt.zeros_like(M))
     return sinf, cosf
Ejemplo n.º 34
0
 def f(x, u):
     return tt.stacklists([
         x[3] * tt.cos(x[2]), x[3] * tt.sin(x[2]), x[3] * u[0],
         u[1] - x[3] * friction
     ])
Ejemplo n.º 35
0
 def _get_true_anomaly(self, t):
     M = (self._warp_times(t) - self.tref) * self.n
     if self.ecc is None:
         return tt.sin(M), tt.cos(M)
     sinf, cosf = self.kepler_op(M, self.ecc + tt.zeros_like(M))
     return sinf, cosf
Ejemplo n.º 36
0
def week_modulation(
        new_cases_inferred,
        week_modulation_type="abs_sine",
        pr_mean_weekend_factor=0.7,
        pr_sigma_weekend_factor=0.2,
        week_end_days=(6, 7),
        model=None,
        save_in_trace=True,
):
    """

    Parameters
    ----------
    new_cases_inferred
    week_modulation_type
    pr_mean_weekend_factor
    pr_sigma_weekend_factor
    week_end_days
    model

    Returns
    -------

    """
    model = modelcontext(model)
    shape_modulation = list(model.shape_sim)
    diff_data_sim = model.diff_data_sim
    shape_modulation[0] -= diff_data_sim
    date_begin_sim = model.date_begin_sim

    len_L2 = () if model.ndim_sim == 1 else model.shape_sim[1]

    week_end_factor, _ = hierarchical_normal(
        "weekend_factor",
        "sigma_weekend_factor",
        pr_mean=pr_mean_weekend_factor,
        pr_sigma=pr_sigma_weekend_factor,
        len_L2=len_L2,
    )
    if week_modulation_type == "step":
        modulation = np.zeros(shape_modulation[0])
        for i in range(shape_modulation[0]):
            date_curr = date_begin_sim + datetime.timedelta(days=i +
                                                            diff_data_sim + 1)
            if date_curr.isoweekday() in week_end_days:
                modulation[i] = 1
    elif week_modulation_type == "abs_sine":
        offset_rad = pm.VonMises("offset_modulation_rad", mu=0, kappa=0.01)
        offset = pm.Deterministic("offset_modulation",
                                  offset_rad / (2 * np.pi) * 7)
        t = np.arange(shape_modulation[0])
        date_begin = date_begin_sim + datetime.timedelta(days=diff_data_sim +
                                                         1)
        weekday_begin = date_begin.weekday()
        t -= weekday_begin  # Sunday is zero
        modulation = 1 - tt.abs_(tt.sin(t / 7 * np.pi + offset_rad / 2))

    if model.ndim_sim == 2:
        modulation = tt.shape_padaxis(modulation, axis=-1)

    multiplication_vec = np.ones(
        shape_modulation) - (1 - week_end_factor) * modulation
    new_cases_inferred_eff = new_cases_inferred * multiplication_vec
    if save_in_trace:
        pm.Deterministic("new_cases", new_cases_inferred_eff)
    return new_cases_inferred_eff
Ejemplo n.º 37
0
# yをxに関して微分
gy2 = T.grad(cost=y, wrt=x)

# 微分係数を求める関数を定義
f = theano.function(inputs=[x], outputs=gy2)
print(theano.pp(f.maker.fgraph.outputs[0]))

print(f(2))
print(f(3))
print(f(4))

# y = sin(x)を微分

# 微分される数式のシンボルを定義
y = T.sin(x)

# yをxに関して微分
gy = T.grad(cost=y, wrt=x)

# 微分係数を求める関数を定義
f = theano.function(inputs=[x], outputs=gy)
print(theano.pp(f.maker.fgraph.outputs[0]))

print(f(0))
print(f(np.pi / 2))
print(f(np.pi))

# y = (x-4)(x^2+6)を微分
y = (x - 4) * (x**2 + 6)
gy = T.grad(cost=y, wrt=x)
Ejemplo n.º 38
0
 def forward(self, x):
     return tt.concatenate((
         tt.shape_padleft(tt.sin(x)),
         tt.shape_padleft(tt.cos(x))
     ), axis=0)
Ejemplo n.º 39
0
 def get_apodization(self, nyquist):
     x = (np.pi * self.f) / (2 * nyquist)
     return tt.sqr((tt.sin(x)/x))
Ejemplo n.º 40
0
def gTrig2(m, v, angi, D=None):
    '''
    Replaces angle dimensions with their complex representation.
    i.e. if x[i] is an angle ( i in angi ), then x[i] will be replaced
    with cos[x[i]] and sin[x[i]].
    Since the input is a gaussian distribution, the output mean and covariance
    are computed via moment matching
    '''
    if D is None:
        D = m.shape[0]
    if isinstance(angi, list) or isinstance(angi, tuple):
        angi = np.array(angi, dtype=np.int32)

    idx = tt.arange(D)
    na_dims = (1 - tt.eq(idx, angi[:, None])).prod(0).nonzero()[0]
    Da = 2 * angi.size
    Dna = na_dims.size
    Ma = tt.zeros((Da, ))
    Va = tt.zeros((Da, Da))
    Ca = tt.zeros((D, Da))

    # compute the mean
    mi = m[angi]
    vi = v[angi, :][:, angi]
    vii = v[angi, angi]
    exp_vii_h = tt.exp(-vii / 2)

    Ma = tt.set_subtensor(Ma[::2], exp_vii_h * tt.sin(mi))
    Ma = tt.set_subtensor(Ma[1::2], exp_vii_h * tt.cos(mi))

    # compute the entries in the augmented covariance matrix
    vii_c = vii.dimshuffle(0, 'x')
    vii_r = vii.dimshuffle('x', 0)
    lq = -0.5 * (vii_c + vii_r)
    q = tt.exp(lq)
    exp_lq_p_vi = tt.exp(lq + vi)
    exp_lq_m_vi = tt.exp(lq - vi)
    mi_c = mi.dimshuffle(0, 'x')
    mi_r = mi.dimshuffle('x', 0)
    U1 = (exp_lq_p_vi - q) * (tt.sin(mi_c - mi_r))
    U2 = (exp_lq_m_vi - q) * (tt.sin(mi_c + mi_r))
    U3 = (exp_lq_p_vi - q) * (tt.cos(mi_c - mi_r))
    U4 = (exp_lq_m_vi - q) * (tt.cos(mi_c + mi_r))

    Va = tt.set_subtensor(Va[::2, ::2], U3 - U4)
    Va = tt.set_subtensor(Va[1::2, 1::2], U3 + U4)
    U12 = U1 + U2
    Va = tt.set_subtensor(Va[::2, 1::2], U12)
    Va = tt.set_subtensor(Va[1::2, ::2], U12.T)
    Va = 0.5 * Va

    # inv times input output covariance
    Is = 2 * tt.arange(angi.size)
    Ic = Is + 1
    Ca = tt.set_subtensor(Ca[angi, Is], Ma[1::2])
    Ca = tt.set_subtensor(Ca[angi, Ic], -Ma[::2])

    # construct mean vectors ( non angle dimensions come first,
    # then angle dimensions)
    Mna = m[na_dims]
    M = tt.concatenate([Mna, Ma])

    # construct the corresponding covariance matrices
    # just the blocks for the non angle dimensions and the angle dimensions
    # separately
    V = tt.zeros((Dna + Da, Dna + Da))
    Vna = v[na_dims, :][:, na_dims]
    V = tt.set_subtensor(V[:Dna, :Dna], Vna)
    V = tt.set_subtensor(V[Dna:, Dna:], Va)

    # fill in the cross covariances
    q = v.dot(Ca)[na_dims, :]
    V = tt.set_subtensor(V[:Dna, Dna:], q)
    V = tt.set_subtensor(V[Dna:, :Dna], q.T)

    return [M, V, Ca]
Ejemplo n.º 41
0
        mu=rv2_dupont,
        observed=d.dupont2[1],
        sd=get_err(d.dupont2[2], logjit_dupont),
    )

    # get the astrometric predictions
    # since there is only one measurement no jitter
    rho_model, theta_model = orbit.get_relative_angles(d.anthonioz[0],
                                                       parallax)  # arcsec

    # evaluate the astrometric likelihood functions
    pm.Normal("obs_rho",
              mu=rho_model,
              observed=d.anthonioz[1],
              sd=d.anthonioz[2])
    theta_diff = tt.arctan2(tt.sin(theta_model - d.anthonioz[3]),
                            tt.cos(theta_model - d.anthonioz[3]))  # wrap-safe
    pm.Normal("obs_theta", mu=theta_diff, observed=0.0, sd=d.anthonioz[4])

# iterate through the list of free_RVs in the model to get things like
# ['logKAa_interval__', etc...] then use a regex to strip away
# the transformations (in this case, _interval__ and _angle__)
# \S corresponds to any character that is not whitespace
# https://docs.python.org/3/library/re.html
sample_vars = [re.sub("_\S*__", "", var.name) for var in model.free_RVs]

all_vars = [
    var.name for var in model.unobserved_RVs
    if ("_interval__" not in var.name) and ("_angle__" not in var.name) and (
        "_lowerbound__" not in var.name)
]
Ejemplo n.º 42
0
import theano
import theano.tensor as tt

import numpy

x = tt.matrix('x')
y = tt.matrix('y')
z = tt.matrix('z')

output = (z.T * (tt.cos(x)**2 + tt.sin(x)**2)).sum(1)

f = theano.function([x, y, z], output)

print theano.printing.debugprint(f)

xx = numpy.ones((5, 5), dtype='float64')
print f(xx, xx, xx)
Ejemplo n.º 43
0
 def logp(self, value):
     upper = self.upper
     lower = self.lower
     return pymc3.distributions.dist_math.bound(
         tt.log(tt.sin(value) / self.norm),
         lower <= value, value <= upper)
Ejemplo n.º 44
0
 def forward(self, inputtensor):
     inputimage = inputtensor[0]
     return (T.concatenate(
         [T.sin(inputimage), T.cos(inputimage)], axis=1), )
Ejemplo n.º 45
0
                C2 = 0.0
                sin_omc = T.zeros(len(fixed_ephem)) 
                
            elif local_trend_type[npl][ng-1] == "quadratic":
                C2 = pm.Normal("C2", mu=0.0, sd=2*ttv_rms_amp[npl])
                sin_omc = T.zeros(len(fixed_ephem)) 

            elif local_trend_type[npl][ng-1] == "sinusoid":
                C2 = 0.0
                
                freq = pm.Normal("freq", mu=sin_priors[npl][0], sd=0.1*sin_priors[npl][0])
                A = pm.Normal("A", mu=0, sd=ttv_rms_amp[npl], testval=sin_priors[npl][1])
                B = pm.Normal("B", mu=0, sd=ttv_rms_amp[npl], testval=sin_priors[npl][2])
                
                sin_omc = pm.Deterministic("sin_omc", 
                                           A*T.sin(2*pi*freq*fixed_ephem) + 
                                           B*T.cos(2*pi*freq*fixed_ephem))
                
                
            else:
                raise ValueError("local_trend_type must be 'linear', 'quadratic', or 'sinusoid'")
                
                
            # hierarchical (hyper)parameters
            if USE_HBM:
                log_pop_var = pm.Normal('log_pop_var', mu=2*np.log(ttv_rms_amp[npl]), 
                                        sd=np.log(4))
            
                pop_sd = pm.Deterministic('pop_sd', T.sqrt(T.exp(log_pop_var)))
                
            else:
Ejemplo n.º 46
0
 def forward(self, inputtensor):
     inputimage = inputtensor[0]
     return (T.sin(self.a[0] * inputimage), )
Ejemplo n.º 47
0
    def __init__(self,
                 period=None,
                 a=None,
                 t0=None,
                 t_periastron=None,
                 incl=None,
                 b=None,
                 duration=None,
                 ecc=None,
                 omega=None,
                 sin_omega=None,
                 cos_omega=None,
                 Omega=None,
                 m_planet=0.0,
                 m_star=None,
                 r_star=None,
                 rho_star=None,
                 ror=None,
                 m_planet_units=None,
                 rho_star_units=None,
                 model=None,
                 contact_points_kwargs=None,
                 **kwargs):
        add_citations_to_model(self.__citations__, model=model)

        self.jacobians = defaultdict(lambda: defaultdict(None))

        daordtau = None
        if ecc is None and duration is not None:
            if r_star is None:
                r_star = as_tensor_variable(1.0)
            if b is None:
                raise ValueError(
                    "'b' must be provided for a circular orbit with a "
                    "'duration'")
            aor, daordtau = get_aor_from_transit_duration(duration,
                                                          period,
                                                          b,
                                                          ror=ror)
            a = r_star * aor
            duration = None

        # Parameters
        if m_planet_units is not None:
            warnings.warn(
                "The `m_planet_units` argument has been deprecated. "
                "Use `with_unit` instead.",
                DeprecationWarning,
            )
            m_planet = with_unit(m_planet, m_planet_units)
        if rho_star_units is not None:
            warnings.warn(
                "The `rho_star_units` argument has been deprecated. "
                "Use `with_unit` instead.",
                DeprecationWarning,
            )
            rho_star = with_unit(rho_star, rho_star_units)
        inputs = _get_consistent_inputs(a, period, rho_star, r_star, m_star,
                                        m_planet)
        (
            self.a,
            self.period,
            self.rho_star,
            self.r_star,
            self.m_star,
            self.m_planet,
        ) = inputs
        self.m_total = self.m_star + self.m_planet

        self.n = 2 * np.pi / self.period
        self.a_star = self.a * self.m_planet / self.m_total
        self.a_planet = -self.a * self.m_star / self.m_total

        # Track the Jacobian between the duration and a
        if daordtau is not None:
            dadtau = self.r_star * daordtau
            self.jacobians["duration"]["a"] = dadtau
            self.jacobians["duration"]["a_star"] = (dadtau * self.m_planet /
                                                    self.m_total)
            self.jacobians["duration"]["a_planet"] = (-dadtau * self.m_star /
                                                      self.m_total)

            # rho = 3 * pi * (a/R)**3 / (G * P**2)
            # -> drho / d(a/R) = 9 * pi * (a/R)**2 / (G * P**2)
            self.jacobians["duration"]["rho_star"] = (
                9 * np.pi * (self.a / self.r_star)**2 * daordtau *
                gcc_per_sun / (G_grav * self.period**2))

        self.K0 = self.n * self.a / self.m_total

        # Set up the contact points calculation
        if contact_points_kwargs is None:
            self.contact_points = contact_points
        else:
            self.contact_points = ContactPoints(**contact_points_kwargs)

        if Omega is None:
            self.Omega = None
        else:
            self.Omega = as_tensor_variable(Omega)
            self.cos_Omega = tt.cos(self.Omega)
            self.sin_Omega = tt.sin(self.Omega)

        # Eccentricity
        if ecc is None:
            self.ecc = None
            self.M0 = 0.5 * np.pi + tt.zeros_like(self.n)
            incl_factor = 1
        else:
            self.ecc = as_tensor_variable(ecc)
            if omega is not None:
                if sin_omega is not None and cos_omega is not None:
                    raise ValueError(
                        "either 'omega' or 'sin_omega' and 'cos_omega' can be "
                        "provided")
                self.omega = as_tensor_variable(omega)
                self.cos_omega = tt.cos(self.omega)
                self.sin_omega = tt.sin(self.omega)
            elif sin_omega is not None and cos_omega is not None:
                self.cos_omega = as_tensor_variable(cos_omega)
                self.sin_omega = as_tensor_variable(sin_omega)
                self.omega = tt.arctan2(self.sin_omega, self.cos_omega)

            else:
                raise ValueError("both e and omega must be provided")

            opsw = 1 + self.sin_omega
            E0 = 2 * tt.arctan2(
                tt.sqrt(1 - self.ecc) * self.cos_omega,
                tt.sqrt(1 + self.ecc) * opsw,
            )
            self.M0 = E0 - self.ecc * tt.sin(E0)

            ome2 = 1 - self.ecc**2
            self.K0 /= tt.sqrt(ome2)
            incl_factor = (1 + self.ecc * self.sin_omega) / ome2

        # The Jacobian for the transform cos(i) -> b
        self.dcosidb = self.jacobians["b"]["cos_incl"] = (incl_factor *
                                                          self.r_star / self.a)

        if b is not None:
            if incl is not None or duration is not None:
                raise ValueError(
                    "only one of 'incl', 'b', and 'duration' can be given")
            self.b = as_tensor_variable(b)
            self.cos_incl = self.dcosidb * self.b
            self.incl = tt.arccos(self.cos_incl)
        elif incl is not None:
            if duration is not None:
                raise ValueError(
                    "only one of 'incl', 'b', and 'duration' can be given")
            self.incl = as_tensor_variable(incl)
            self.cos_incl = tt.cos(self.incl)
            self.b = self.cos_incl / self.dcosidb
        elif duration is not None:
            if self.ecc is None:
                raise ValueError(
                    "fitting with duration only works for eccentric orbits")
            self.duration = as_tensor_variable(to_unit(duration, u.day))
            c = tt.sin(np.pi * self.duration * incl_factor / self.period)
            c2 = c * c
            aor = self.a_planet / self.r_star
            esinw = self.ecc * self.sin_omega
            self.b = tt.sqrt(
                (aor**2 * c2 - 1) / (c2 * esinw**2 + 2 * c2 * esinw + c2 -
                                     self.ecc**4 + 2 * self.ecc**2 - 1))
            self.b *= 1 - self.ecc**2
            self.cos_incl = self.dcosidb * self.b
            self.incl = tt.arccos(self.cos_incl)
        else:
            zla = tt.zeros_like(self.a)
            self.incl = 0.5 * np.pi + zla
            self.cos_incl = zla
            self.b = zla

        if t0 is not None and t_periastron is not None:
            raise ValueError("you can't define both t0 and t_periastron")
        if t0 is None and t_periastron is None:
            t0 = tt.zeros_like(self.period)

        if t0 is None:
            self.t_periastron = as_tensor_variable(t_periastron)
            self.t0 = self.t_periastron + self.M0 / self.n
        else:
            self.t0 = as_tensor_variable(t0)
            self.t_periastron = self.t0 - self.M0 / self.n

        self.tref = self.t_periastron - self.t0

        self.sin_incl = tt.sin(self.incl)
Ejemplo n.º 48
0
# -*- coding: utf-8 -*-
"""3.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1usas99e-lvo-jD_7TDeg6fCoZ4qklog6
"""

import numpy
import theano
import theano.tensor as T

t = T.dscalar("t")
x1 = 5 * T.cos(2 * t) - 2
y1 = 5 * T.sin(2 * t)
x2 = 10 * T.cos(t) - 2
y2 = 10 * T.sin(t)

# ヘロンの公式
a = T.sqrt(x1**2 + y1**2)
b = T.sqrt(x2**2 + y2**2)
c = T.sqrt((x1 - x2)**2 + (y1 - y2)**2)
s = (a + b + c) / 2
S = T.sqrt(s * (s - a) * (s - b) * (s - c))

# tで微分
dS = T.grad(S, t)

f = theano.function([t], dS)
g = theano.function([t], S)
Ejemplo n.º 49
0
    def normal(self,
               size,
               avg=0.0,
               std=1.0,
               ndim=None,
               dtype=None,
               nstreams=None):
        """
        :param size: Can be a list of integers or Theano variables (ex: the
        shape of another Theano Variable)

        :param dtype: The output data type. If dtype is not specified, it will
        be inferred from the dtype of low and high, but will be at least as
        precise as floatX.

        :param nstreams: Number of streams.
        """
        # We need an even number of ]0,1[ samples. Then we split them
        # in two halves. First half becomes our U1's for Box-Muller,
        # second half our U2's. See Wikipedia page:
        # http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform
        avg = as_tensor_variable(avg)
        std = as_tensor_variable(std)

        if dtype is None:
            dtype = scal.upcast(config.floatX, avg.dtype, std.dtype)

        avg = cast(avg, dtype)
        std = cast(std, dtype)

        evened = False
        constant = False
        if isinstance(size, tuple) and all(
            [isinstance(i, (numpy.integer, int)) for i in size]):
            constant = True
            # Force dtype because it defaults to float when size is empty
            n_samples = numpy.prod(size, dtype='int64')

            if n_samples % 2 == 1:
                n_samples += 1
                evened = True
        else:
            #if even, don't change, if odd, +1
            n_samples = prod(size) + (prod(size) % 2)
        flattened = self.uniform(size=(n_samples, ),
                                 dtype=dtype,
                                 nstreams=nstreams)

        if constant:
            U1 = flattened[:n_samples // 2]
            U2 = flattened[n_samples // 2:]
        else:
            U1 = flattened[:prod(flattened.shape) // 2]
            U2 = flattened[prod(flattened.shape) // 2:]

        #normal_samples = zeros_like(flattened)
        sqrt_ln_U1 = sqrt(-2.0 * log(U1))
        # TypeError: 'TensorVariable' object does not support item assignment
        # so this doesn't work...
        #normal_samples[:n_samples/2] = sqrt_ln_U1 * cos(2.0*numpy.pi*U2)
        #normal_samples[n_samples/2:] = sqrt_ln_U1 * sin(2.0*numpy.pi*U2)

        # so trying this instead
        first_half = sqrt_ln_U1 * cos(
            numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
        second_half = sqrt_ln_U1 * sin(
            numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
        normal_samples = join(0, first_half, second_half)

        final_samples = None
        if evened:
            final_samples = normal_samples[:-1]
        elif constant:
            final_samples = normal_samples
        else:
            final_samples = normal_samples[:prod(size)]

        if not size:
            # Force the dtype to be int64, otherwise reshape complains
            size = tensor.constant(size, dtype='int64')
        final_samples = final_samples.reshape(size)

        final_samples = avg + std * final_samples

        assert final_samples.dtype == dtype
        return final_samples
#### LINEAR REGRESSION USING THEANO

import theano
import theano.tensor as T
import theano.tensor.nnet as nnet
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import colors

y_train = y2

X_train = x2

x = T.dscalar()
fx = T.exp(T.sin(x**2))
f = theano.function(inputs=[x], outputs=[fx])
fp = T.grad(fx, wrt=x)
fprime = theano.function([x], fp)
x = T.dvector()
y = T.dscalar()


def layer(x, w):
    b = np.array([1], dtype=theano.config.floatX)
    new_x = T.concatenate([x, b])
    m = T.dot(w.T, new_x)  #theta1: 3x3 * x: 3x1 = 3x1 ;;; theta2: 1x4 * 4x1
    h = nnet.sigmoid(m)
    return h

Ejemplo n.º 51
0
    def __init__(self,
                 period=None,
                 a=None,
                 t0=None,
                 t_periastron=None,
                 incl=None,
                 b=None,
                 duration=None,
                 ecc=None,
                 omega=None,
                 Omega=None,
                 m_planet=0.0,
                 m_star=None,
                 r_star=None,
                 rho_star=None,
                 m_planet_units=None,
                 rho_star_units=None,
                 model=None,
                 contact_points_kwargs=None,
                 **kwargs):
        add_citations_to_model(self.__citations__, model=model)

        self.gcc_to_sun = ((constants.M_sun / constants.R_sun**3).to(
            u.g / u.cm**3).value)
        self.au_to_R_sun = (constants.au / constants.R_sun).value
        self.G_grav = constants.G.to(u.R_sun**3 / u.M_sun / u.day**2).value

        self.kepler_op = KeplerOp(**kwargs)

        # Parameters
        # self.period = tt.as_tensor_variable(period)
        self.m_planet = tt.as_tensor_variable(m_planet)
        if m_planet_units is not None:
            self.m_planet *= (1 * m_planet_units).to(u.M_sun).value

        self.a, self.period, self.rho_star, self.r_star, self.m_star = self._get_consistent_inputs(
            a, period, rho_star, r_star, m_star, rho_star_units)
        self.m_total = self.m_star + self.m_planet

        self.n = 2 * np.pi / self.period
        self.a_star = self.a * self.m_planet / self.m_total
        self.a_planet = -self.a * self.m_star / self.m_total

        self.K0 = self.n * self.a / self.m_total

        # Set up the contact points calculation
        if contact_points_kwargs is None:
            contact_points_kwargs = dict()

        if Omega is None:
            self.Omega = None
        else:
            self.Omega = tt.as_tensor_variable(Omega)
            self.cos_Omega = tt.cos(self.Omega)
            self.sin_Omega = tt.sin(self.Omega)

        # Eccentricity
        self.contact_points_op = ContactPointsOp(**contact_points_kwargs)
        if ecc is None:
            self.ecc = None
            self.M0 = 0.5 * np.pi + tt.zeros_like(self.n)
            incl_factor = 1
        else:
            self.ecc = tt.as_tensor_variable(ecc)
            if omega is None:
                raise ValueError("both e and omega must be provided")
            self.omega = tt.as_tensor_variable(omega)

            self.cos_omega = tt.cos(self.omega)
            self.sin_omega = tt.sin(self.omega)

            opsw = 1 + self.sin_omega
            E0 = 2 * tt.arctan2(
                tt.sqrt(1 - self.ecc) * self.cos_omega,
                tt.sqrt(1 + self.ecc) * opsw,
            )
            self.M0 = E0 - self.ecc * tt.sin(E0)

            ome2 = 1 - self.ecc**2
            self.K0 /= tt.sqrt(ome2)
            incl_factor = (1 + self.ecc * self.sin_omega) / ome2

        if b is not None:
            if incl is not None or duration is not None:
                raise ValueError("only one of 'incl', 'b', and 'duration' can "
                                 "be given")
            self.b = tt.as_tensor_variable(b)
            self.cos_incl = incl_factor * self.b * self.r_star / self.a_planet
            self.incl = tt.arccos(self.cos_incl)
        elif incl is not None:
            if duration is not None:
                raise ValueError("only one of 'incl', 'b', and 'duration' can "
                                 "be given")
            self.incl = tt.as_tensor_variable(incl)
            self.cos_incl = tt.cos(self.incl)
            self.b = (self.a_planet * self.cos_incl /
                      (incl_factor * self.r_star))
        elif duration is not None:
            if self.ecc is None:
                raise ValueError("fitting with duration only works for "
                                 "eccentric orbits")
            self.duration = tt.as_tensor_variable(duration)
            c = tt.sin(np.pi * self.duration * incl_factor / self.period)
            c2 = c * c
            aor = self.a_planet / self.r_star
            esinw = self.ecc * self.sin_omega
            self.b = tt.sqrt(
                (aor**2 * c2 - 1) / (c2 * esinw**2 + 2 * c2 * esinw + c2 -
                                     self.ecc**4 + 2 * self.ecc**2 - 1))
            self.b *= 1 - self.ecc**2
            self.cos_incl = incl_factor * self.b * self.r_star / self.a_planet
            self.incl = tt.arccos(self.cos_incl)
        else:
            zla = tt.zeros_like(self.a)
            self.incl = 0.5 * np.pi + zla
            self.cos_incl = zla
            self.b = zla

        if t0 is not None and t_periastron is not None:
            raise ValueError("you can't define both t0 and t_periastron")
        if t0 is None and t_periastron is None:
            t0 = 0.0

        if t0 is None:
            self.t_periastron = tt.as_tensor_variable(t_periastron)
            self.t0 = self.t_periastron + self.M0 / self.n
        else:
            self.t0 = tt.as_tensor_variable(t0)
            self.t_periastron = self.t0 - self.M0 / self.n

        self.tref = self.t_periastron

        self.sin_incl = tt.sin(self.incl)
Ejemplo n.º 52
0
def bengio_RNN(n_input,
               n_hidden,
               n_output,
               input_type='real',
               out_every_t=False,
               loss_function='CE'):
    #Initialize states
    #This is the matrix from input -> hidden
    V = initialize_matrix(n_input, 2 * n_hidden, 'V')
    print V
    print V.get_value()
    #Matrix from hidden -> out
    U = initialize_matrix(2 * n_hidden, n_output, "U")
    print U
    #Now the output bias. No input bias?
    U_bias_values = np.zeros((n_output, 1), dtype=theano.config.floatX)
    U_bias = theano.shared(U_bias_values, name='U Bias')

    #Now the hidden state
    hidden_bias_mean = 0
    #The hidden_to_hidden matrix is a list of the different weight matrices
    hidden_bias, h_0, hidden_to_hidden_matrix = initialize_complex_RNN_layer(
        n_hidden, 0, "H")

    #Don't actually understand what this is for
    swap_re_im = np.concatenate((np.arange(n_hidden,
                                           2 * n_hidden), np.arange(n_hidden)))
    print swap_re_im

    theta = hidden_to_hidden_matrix[0]
    reflection = hidden_to_hidden_matrix[1]
    index_permute_long = hidden_to_hidden_matrix[2]

    #This is the set of all params. h_0 is the initial parameters for h_0.

    parameters = [V, U, hidden_bias, reflection, U_bias, theta, h_0]

    #for i_layer in xrange(2,n_layers+1):
    #    Wvparams = initialize_unitary
    #I am not doing multiple layers right now
    x, y = initialize_data_nodes(loss_function, input_type, out_every_t)

    #These are structued to be sequences, non-sequences
    def recurrence(x_t, y_t, ymask_t, h_prev, cost_prev, acc_prev, V,
                   hidden_bias, out_bias, U, *argv):

        # h_prev is of size n_batch x n_layers*2*n_hidden
        Wparams = argv[0:3]
        argv = argv[3:]
        print "Testing recurrence..."
        print n_hidden
        #print h_prev.get_value()
        h_prev_layer1 = h_prev[:, 0:2 * n_hidden]
        hidden_lin_output = times_unitary(h_prev_layer1, n_hidden, swap_re_im,
                                          Wparams)
        msg = theano.printing.Print('T')(x_t)

        #if (input_type=='categorical'):
        data_lin_output = V[T.cast(x_t, 'int32')]
        #else:
        #    data_lin_output = T.dot(x_t,V)

        lin_output = data_lin_output

        #Non linearity

        modulus = T.sqrt(1e-5 + lin_output**2 + lin_output[:, swap_re_im]**2)
        firstval = modulus + T.tile(hidden_bias, [2]).dimshuffle('x', 0)
        rescale = T.maximum(firstval, 0.) / (modulus + 1e-5)
        h_t = lin_output * rescale

        if (out_every_t):
            lin_output = T.dot(h_t, U) + out.bias.dimshuffle('x', 0)
            cost_t, acc_t = compute_cost_t(lin_output, loss_function, y_t)
        else:
            cost_t = theano.shared(np.float32(0.0))
            acc_t = theano.shared(np.float32(0.0))

        return h_t, cost_t, acc_t, msg

    h_0_batch = T.tile(h_0, [x.shape[1], 1])

    non_sequences = [V, hidden_bias, U_bias, U] + hidden_to_hidden_matrix

    if (out_every_t):
        print "My x: ", x
        print x.shape
        sequences = [
            x, y,
            T.tile(theano.shared(np.ones((1, 1), dtype=theano.config.floatX)),
                   [x.shape[0], 1, 1])
        ]
    else:
        sequences = [
            x,
            T.tile(theano.shared(np.zeros((1, 1), dtype=theano.config.floatX)),
                   [x.shape[0], 1, 1]),
            T.tile(theano.shared(np.ones((1, 1), dtype=theano.config.floatX)),
                   [x.shape[0], 1, 1])
        ]

    #outputs_info = [h_0_batch,theano.shared(np.float32(0,0)),theano.shared(np.float32(0.0))]
    outputs_info = [
        h_0_batch,
        theano.shared(np.float32(0.0)),
        theano.shared(np.float32(0.0))
    ]

    print "Ready for scan:"
    print recurrence
    print sequences
    print non_sequences
    print outputs_info

    print "N Hidden: ", n_hidden
    print "H prev: ", non_sequences[1].get_value()

    [hidden_states, cost_steps,
     acc_steps], updates = theano.scan(fn=recurrence,
                                       sequences=sequences,
                                       non_sequences=non_sequences,
                                       outputs_info=outputs_info)

    if (cost_transform == 'magTimesPhase'):
        cosPhase = T.cos(lin_output)
        sinPhase = T.sin(lin_output)
        linMag = np.sqrt(10**(x / 10.0) - 1e-5)
        yest_real = linMag * cosPhase
        yest_imag = linMag * sinPhase
        yest = T.concatenate([yest_real, yest_imag], axis=2)
        mse = (yest - y)**2
        cost_steps = T.mean(mse * ymask[:, :, 0].dimshuffle(0, 1, 'x'), axis=2)
    elif cost_transform is not None:
        # assume that cost_transform is an inverse DFT followed by synthesis windowing
        lin_output_real = lin_output[:, :, :n_output]
        lin_output_imag = lin_output[:, :, n_output:]
        lin_output_sym_real = T.concatenate(
            [lin_output_real, lin_output_real[:, :, n_output - 2:0:-1]],
            axis=2)
        lin_output_sym_imag = T.concatenate(
            [-lin_output_imag, lin_output_imag[:, :, n_output - 2:0:-1]],
            axis=2)
        lin_output_sym = T.concatenate(
            [lin_output_sym_real, lin_output_sym_imag], axis=2)
        yest_xform = T.dot(lin_output_sym, cost_transform)
        # apply synthesis window
        yest_xform = yest_xform * cost_weight.dimshuffle('x', 'x', 0)
        y_real = y[:, :, :n_output]
        y_imag = y[:, :, n_output:]
        y_sym_real = T.concatenate([y_real, y_real[:, :, n_output - 2:0:-1]],
                                   axis=2)
        y_sym_imag = T.concatenate([-y_imag, y_imag[:, :, n_output - 2:0:-1]],
                                   axis=2)
        y_sym = T.concatenate([y_sym_real, y_sym_imag], axis=2)
        y_xform = T.dot(y_sym, cost_transform)
        # apply synthesis window
        y_xform = y_xform * cost_weight.dimshuffle('x', 'x', 0)
        mse = (y_xform - yest_xform)**2
        cost_steps = T.mean(mse * ymask[:, :, 0].dimshuffle(0, 1, 'x'), axis=2)
    cost = cost_steps.mean()
    accuracy = acc_steps.mean()

    costs = [cost, accuracy]
    return [x, y], parameters, costs
Ejemplo n.º 53
0
 def build_graph(x, depth=5):
     z = x
     for d in range(depth):
         z = tensor.sin(-z + 1)
     return z
Ejemplo n.º 54
0
 def passage(self,F,omega,W,num_FF):
     F_times_Omega = T.dot(F, omega)#minibatch_size*n_rff
     Phi = (self.sf2**0.5 /num_FF**0.5 ) * T.concatenate([T.cos(F_times_Omega), T.sin(F_times_Omega)],1)
     F_next=T.dot(Phi,W)
     
     return F_next    
Ejemplo n.º 55
0
def SIR_with_change_points(new_cases_obs,
                           change_points_list,
                           date_begin_simulation,
                           num_days_sim,
                           diff_data_sim,
                           N,
                           priors_dict=None,
                           weekends_modulated=False,
                           weekend_modulation_type='step',
                           student_nu=4):
    """
        Parameters
        ----------
        new_cases_obs : list or array
            Timeseries (day over day) of newly reported cases (not the total number)

        change_points_list : list of dicts
            List of dictionaries, each corresponding to one change point.

            Each dict can have the following key-value pairs. If a pair is not provided,
            the respective default is used.
                * pr_mean_date_begin_transient :     datetime.datetime, NO default
                * pr_median_lambda :                 number, same as default priors, below
                * pr_sigma_lambda :                  number, same as default priors, below
                * pr_sigma_date_begin_transient :    number, 3
                * pr_median_transient_len :          number, 3
                * pr_sigma_transient_len :           number, 0.3

        date_begin_simulation: datetime.datetime
            The begin of the simulation data

        num_days_sim : integer
            Number of days to forecast into the future

        diff_data_sim : integer
            Number of days that the simulation-begin predates the first data point in
            `new_cases_obs`. This is necessary so the model can fit the reporting delay.
            Set this parameter to a value larger than what you expect to find
            for the reporting delay.

        N : number
            The population size. For Germany, we used 83e6

        priors_dict : dict
            Dictionary of the prior assumptions

            Possible key-value pairs (and default values) are:
                * pr_beta_I_begin :        number, default = 100
                * pr_median_lambda_0 :     number, default = 0.4
                * pr_sigma_lambda_0 :      number, default = 0.5
                * pr_median_mu :           number, default = 1/8
                * pr_sigma_mu :            number, default = 0.2
                * pr_median_delay :        number, default = 8
                * pr_sigma_delay :         number, default = 0.2
                * pr_beta_sigma_obs :      number, default = 10
                * week_end_days :          tuple,  default = (6,7)
                * pr_mean_weekend_factor : number, default = 0.7
                * pr_sigma_weekend_factor :number, default = 0.17

        weekends_modulated : bool
            Whether to add the prior that cases are less reported on week ends. Multiplies the new cases numbers on weekends
            by a number between 0 and 1, given by a prior beta distribution. The beta distribution is parametrised
            by pr_mean_weekend_factor and pr_sigma_weekend_factor
        weekend_modulation_type : 'step' or 'abs_sine':
            whether the weekends are modulated by a step function, which only multiplies the days given by  week_end_days
            by the week_end_factor, or whether the whole week is modulated by an abs(sin(x)) function, with an offset
            with flat prior.
        Returns
        -------
        : pymc3.Model
            Returns an instance of pymc3 model with the change points

    """
    if priors_dict is None:
        priors_dict = dict()

    default_priors = dict(pr_beta_I_begin=100,
                          pr_median_lambda_0=0.4,
                          pr_sigma_lambda_0=0.5,
                          pr_median_mu=1 / 8,
                          pr_sigma_mu=0.2,
                          pr_median_delay=8,
                          pr_sigma_delay=0.2,
                          pr_beta_sigma_obs=10,
                          week_end_days=(6, 7),
                          pr_mean_weekend_factor=0.7,
                          pr_sigma_weekend_factor=0.17)
    default_priors_change_points = dict(
        pr_median_lambda=default_priors["pr_median_lambda_0"],
        pr_sigma_lambda=default_priors["pr_sigma_lambda_0"],
        pr_sigma_date_begin_transient=3,
        pr_median_transient_len=3,
        pr_sigma_transient_len=0.3,
        pr_mean_date_begin_transient=None,
    )

    if not weekends_modulated:
        del default_priors['week_end_days']
        del default_priors['pr_mean_weekend_factor']
        del default_priors['pr_sigma_weekend_factor']

    for prior_name in priors_dict.keys():
        if prior_name not in default_priors:
            raise RuntimeError(f"Prior with name {prior_name} not known")
    for change_point in change_points_list:
        for prior_name in change_point.keys():
            if prior_name not in default_priors_change_points:
                raise RuntimeError(f"Prior with name {prior_name} not known")

    for prior_name, value in default_priors.items():
        if prior_name not in priors_dict:
            priors_dict[prior_name] = value
            print(f"{prior_name} was set to default value {value}")
    for prior_name, value in default_priors_change_points.items():
        for i_cp, change_point in enumerate(change_points_list):
            if prior_name not in change_point:
                change_point[prior_name] = value
                print(
                    f"{prior_name} of change point {i_cp} was set to default value {value}"
                )

    if (diff_data_sim < priors_dict["pr_median_delay"] + 3 *
            priors_dict["pr_median_delay"] * priors_dict["pr_sigma_delay"]):
        print(
            "WARNING: diff_data_sim could be to small compared to the prior delay"
        )
    if num_days_sim < len(new_cases_obs) + diff_data_sim:
        raise RuntimeError(
            "Simulation ends before the end of the data. Increase num_days_sim."
        )

    # ------------------------------------------------------------------------------ #
    # Model and prior implementation
    # ------------------------------------------------------------------------------ #

    with pm.Model() as model:
        # all pm functions now apply on the model instance
        # true cases at begin of loaded data but we do not know the real number
        I_begin = pm.HalfCauchy(name="I_begin",
                                beta=priors_dict["pr_beta_I_begin"])

        # fraction of people that are newly infected each day
        lambda_list = []
        lambda_list.append(
            pm.Lognormal(
                name="lambda_0",
                mu=np.log(priors_dict["pr_median_lambda_0"]),
                sigma=priors_dict["pr_sigma_lambda_0"],
            ))
        for i, cp in enumerate(change_points_list):
            lambda_list.append(
                pm.Lognormal(
                    name=f"lambda_{i + 1}",
                    mu=np.log(cp["pr_median_lambda"]),
                    sigma=cp["pr_sigma_lambda"],
                ))

        # list of start dates of the transient periods of the change points
        tr_begin_list = []
        dt_before = date_begin_simulation
        for i, cp in enumerate(change_points_list):
            dt_begin_transient = cp["pr_mean_date_begin_transient"]
            if dt_before is not None and dt_before > dt_begin_transient:
                raise RuntimeError(
                    "Dates of change points are not temporally ordered")

            prior_mean = (
                dt_begin_transient - date_begin_simulation
            ).days - 1  # convert the provided date format (argument) into days (a number)

            tr_begin = pm.Normal(
                name=f"transient_begin_{i}",
                mu=prior_mean,
                sigma=cp["pr_sigma_date_begin_transient"],
            )
            tr_begin_list.append(tr_begin)
            dt_before = dt_begin_transient

        # same for transient times
        tr_len_list = []
        for i, cp in enumerate(change_points_list):
            tr_len = pm.Lognormal(
                name=f"transient_len_{i}",
                mu=np.log(cp["pr_median_transient_len"]),
                sigma=cp["pr_sigma_transient_len"],
            )
            tr_len_list.append(tr_len)

        # build the time-dependent spreading rate
        lambda_t_list = [lambda_list[0] * tt.ones(num_days_sim)]
        lambda_before = lambda_list[0]

        for tr_begin, tr_len, lambda_after in zip(tr_begin_list, tr_len_list,
                                                  lambda_list[1:]):
            lambda_t = smooth_step_function(
                start_val=0,
                end_val=1,
                t_begin=tr_begin,
                t_end=tr_begin + tr_len,
                t_total=num_days_sim,
            ) * (lambda_after - lambda_before)
            lambda_before = lambda_after
            lambda_t_list.append(lambda_t)
        lambda_t = sum(lambda_t_list)

        # fraction of people that recover each day, recovery rate mu
        mu = pm.Lognormal(
            name="mu",
            mu=np.log(priors_dict["pr_median_mu"]),
            sigma=priors_dict["pr_sigma_mu"],
        )

        # delay in days between contracting the disease and being recorded
        delay = pm.Lognormal(
            name="delay",
            mu=np.log(priors_dict["pr_median_delay"]),
            sigma=priors_dict["pr_sigma_delay"],
        )

        # prior of the error of observed cases
        sigma_obs = pm.HalfCauchy("sigma_obs",
                                  beta=priors_dict["pr_beta_sigma_obs"])

        # -------------------------------------------------------------------------- #
        # training the model with loaded data provided as argument
        # -------------------------------------------------------------------------- #

        S_begin = N - I_begin
        S, I, new_I = _SIR_model(lambda_t=lambda_t,
                                 mu=mu,
                                 S_begin=S_begin,
                                 I_begin=I_begin,
                                 N=N)

        new_cases_inferred = delay_cases(
            new_I_t=new_I,
            len_new_I_t=num_days_sim,
            len_out=num_days_sim - diff_data_sim,
            delay=delay,
            delay_diff=diff_data_sim,
        )

        if weekends_modulated:
            week_end_factor = pm.Beta(
                'weekend_factor',
                mu=priors_dict['pr_mean_weekend_factor'],
                sigma=priors_dict['pr_sigma_weekend_factor'])
            if weekend_modulation_type == 'step':
                modulation = np.zeros(num_days_sim - diff_data_sim)
                for i in range(num_days_sim - diff_data_sim):
                    date_curr = date_begin_simulation + datetime.timedelta(
                        days=i + diff_data_sim + 1)
                    if date_curr.isoweekday() in priors_dict['week_end_days']:
                        modulation[i] = 1
            elif weekend_modulation_type == 'abs_sine':
                offset_rad = pm.VonMises('offset_modulation_rad',
                                         mu=0,
                                         kappa=0.01)
                offset = pm.Deterministic('offset_modulation',
                                          offset_rad / (2 * np.pi) * 7)
                t = np.arange(num_days_sim - diff_data_sim)
                date_begin = date_begin_simulation + datetime.timedelta(
                    days=diff_data_sim + 1)
                weekday_begin = date_begin.weekday()
                t -= weekday_begin  # Sunday is zero
                modulation = 1 - tt.abs_(
                    tt.sin(t / 7 * np.pi + offset_rad / 2))

            multiplication_vec = np.ones(num_days_sim - diff_data_sim) - (
                1 - week_end_factor) * modulation
            new_cases_inferred_eff = new_cases_inferred * multiplication_vec
        else:
            new_cases_inferred_eff = new_cases_inferred

        # likelihood of the model:
        # observed cases are distributed following studentT around the model.
        # we want to approximate a Poisson distribution of new cases.
        # we choose nu=4 to get heavy tails and robustness to outliers.
        # https://www.jstor.org/stable/2290063
        num_days_data = new_cases_obs.shape[-1]
        pm.StudentT(
            name="_new_cases_studentT",
            nu=student_nu,
            mu=new_cases_inferred_eff[:num_days_data],
            sigma=tt.abs_(new_cases_inferred[:num_days_data] + 1)**0.5 *
            sigma_obs,  # +1 and tt.abs to avoid nans
            observed=new_cases_obs,
        )

        # add these observables to the model so we can extract a time series of them
        # later via e.g. `model.trace['lambda_t']`
        pm.Deterministic("lambda_t", lambda_t)
        pm.Deterministic("new_cases", new_cases_inferred_eff)
        pm.Deterministic("new_cases_raw", new_cases_inferred)
    return model
Ejemplo n.º 56
0
def sin(x):
    return T.sin(x)
Ejemplo n.º 57
0
def sin(var, name=None):
    """Implement element-wise sin"""
    _tensor = T.sin(var.unwrap())
    return Tensor(tensor=_tensor, shape=var.shape, name=name)
Ejemplo n.º 58
0
# -*- coding=utf8 -*-
"""
    a simple example to show how to user theano
"""
import theano.tensor as T
from theano import function

x = T.dscalar('x')
y = 0.5 * x * x + x * T.sin(x)
dy = T.grad(y, x)

func = function(inputs=[x], outputs=dy)

start = 5.0
iter_num = 10000
rate = 0.01
for i in xrange(iter_num):
    d = func(start)
    if d < 0.005:
        break
    start -= rate * d

print start
Ejemplo n.º 59
0
	def __init__(self, model):
		
		#### theano params 
		learnrate=T.scalar('lr')
		imgin=T.vector('imgin')
		ang=T.vector('orient')
		
		
		#### load model
		sz=self.boxsz=model["boxsz"]
		sym=str(model["sym"]).lower()
		balls_pos=np.asarray(model["center"],dtype=theano.config.floatX)
		balls_wt=np.asarray(model["weight"],dtype=theano.config.floatX)
		
		
		ballzero = theano.shared(value=balls_pos, name='balls', borrow=True)
		weight = theano.shared(value=balls_wt, name='wts', borrow=True)
		wts=T.nnet.sigmoid(weight)
		nballs=len(balls_pos)
		
		### motion vector of the balls
		mov_vec= np.zeros((nballs,3),dtype=theano.config.floatX)
		movvec=theano.shared(value=mov_vec, name="move_vec", borrow=True)
		
		
		cfval=np.array(0,dtype=theano.config.floatX)
		conf=theano.shared(value=cfval, name="conf", borrow=True)
		
		ball=ballzero+conf*movvec
		
		
		### deal with symmetry.. painfully...
		nsym=Transform.get_nsym(sym)
		if sym!="c1":
			
			if sym.startswith('d'):
				asym=[]
				for i in range(nsym/2):	
					a=6.28/(nsym/2)*i
					asym.append(T.stacklists(
						[ball[:,0]*T.cos(a)-ball[:,1]*T.sin(a),
						 ball[:,0]*T.sin(a)+ball[:,1]*T.cos(a),
						 ball[:,2]]))
					asym.append(T.stacklists(
						[ball[:,0]*T.cos(a)-ball[:,1]*T.sin(a),
						 -(ball[:,0]*T.sin(a)+ball[:,1]*T.cos(a)),
						 -ball[:,2]]))
				#print asym[0].T.eval()
				balls=T.concatenate(asym,axis=1).T
				nballs*=nsym
			
			if sym.startswith('c'):
				asym=[]
				for i in range(nsym):	
					a=6.28/(nsym)*i
					asym.append(T.stacklists(
						[ball[:,0]*T.cos(a)-ball[:,1]*T.sin(a),
						 ball[:,0]*T.sin(a)+ball[:,1]*T.cos(a),
						 ball[:,2]]))
				#print asym[0].T.eval()
				balls=T.concatenate(asym,axis=1).T
				nballs*=nsym
			ww=[wts for i in range(nsym)]
			wtss=T.concatenate(ww,axis=0)
		else:
			balls=ball
			wtss=wts
		print(balls.shape.eval())
		print(wtss.shape.eval())
		#numpy2pdb(balls.eval(), "tmp.pdb")
		#exit()

		### get the 3d density map for initial tunning
		ind_np=np.indices((sz,sz,sz)).astype(theano.config.floatX)
		ind_np=np.transpose(ind_np,axes=(3,2,1,0))
		ind=theano.shared(value=ind_np,borrow=True)
		def make_3d(p,w,den,ind):
			d=(ind-p)**2
			v=w*T.exp(-T.sum(d,axis=3)/(model["width"]))
			den+=v
			return den
		
		map_3d_all,update=theano.scan(fn=make_3d,
				outputs_info=T.zeros((sz,sz,sz)),
				sequences=[balls+sz/2,wtss],
				non_sequences=ind,
				)
		map_3d=map_3d_all[-1]
		#map_3d=map_3d.dimshuffle([2,1,0])

		self.target_map=T.tensor3('tar_map')
		self.map_err=T.sum((self.target_map-map_3d)**2)
		map_grad_w=T.grad(self.map_err, weight)
		self.map_update_w=[(weight,weight-map_grad_w*learnrate)]
		map_grad_p=T.grad(self.map_err, ballzero)
		self.map_update_p=[(ballzero,ballzero-map_grad_p*learnrate)]
		
		### make rotation matrix
		azp=ang[2]+3.14/2
		altp=3.14-ang[1]
		phip=6.28-ang[0]

		matrix=[(T.cos(phip)*T.cos(azp) - T.cos(altp)*T.sin(azp)*T.sin(phip)),
		(T.cos(phip)*T.sin(azp) + T.cos(altp)*T.cos(azp)*T.sin(phip)),
		(T.sin(altp)*T.sin(phip)),
		
		(-T.sin(phip)*T.cos(azp) - T.cos(altp)*T.sin(azp)*T.cos(phip)),
		(-T.sin(phip)*T.sin(azp) + T.cos(altp)*T.cos(azp)*T.cos(phip)),
		(T.sin(altp)*T.cos(phip)),
		
		(T.sin(altp)*T.sin(azp)),
		(-T.sin(altp)*T.cos(azp)),
		T.cos(altp)]

		mat=T.stacklists(matrix).T.reshape((3,3))
		newpos=T.dot(balls,mat)#+sz/2

		tx=ang[5]
		ty=ang[4]

		newpos=T.inc_subtensor(newpos[:,0],tx)
		newpos=T.inc_subtensor(newpos[:,1],ty)

		mirror=ang[3]
		newpos=T.set_subtensor(newpos[:,1], newpos[:,1]*mirror)
		newpos=newpos+sz/2
		#newpos[:,1]+=ty
		
		
		### gird for 2d images
		grid_x =T.arange(sz).astype(theano.config.floatX)
		grid_x=grid_x.repeat(sz,axis=0).reshape((sz,sz))
		grid_y=grid_x.copy().T
		
		### now make 2d projections
		
		iy=T.arange(nballs)

		def make_img(iy,r,x,y,pos, wt):
			ret=r+ wt[iy] *T.exp((-(x-pos[iy,0])**2 -(y-pos[iy,1])**2)/(15))
			return ret
			
		img,update=theano.scan(fn=make_img,
			outputs_info=T.zeros((sz,sz)),
			sequences=[iy],
			non_sequences=[grid_x,grid_y,newpos,wtss],
			)
		out=img[-1]#[:-1]
		
		
		
		xx=out.flatten()
		zz=imgin.flatten()
		L = T.sum(xx*zz)/T.sqrt(T.sum(xx**2))/T.sqrt(T.sum(zz**2))
		cost=-L#T.mean(L)
		grad_conf=T.grad(cost, conf)
		self.update_conf=[(conf, conf-learnrate*grad_conf)]

		self.grad_ballpos=T.grad(cost, ballzero)
		self.imgin=imgin
		self.orientin=ang
		self.balls=balls
		self.cost=cost
		self.learnrate=learnrate
		self.map3d=map_3d
		self.conf=conf
		#self.updates=updates
		#self.updates_ang=updates_ang
		self.out=out
		
		self.weight=weight
		self.ballzero=ballzero
		self.movvec=movvec
Ejemplo n.º 60
0
    Da2 = pm.Normal('Da2',0, 1, shape=len(f2_))

    a0 = pm.Deterministic('a0', sigmaA * Da0 + mod.A0(f0_, [numax, w, A, V1, V2]))
    a1 = pm.Deterministic('a1', sigmaA * Da1 + mod.A1(f1_, [numax, w, A, V1, V2]))
    a2 = pm.Deterministic('a2', sigmaA * Da2 + mod.A2(f2_, [numax, w, A, V1, V2]))

    h0 = pm.Deterministic('h0', 2*tt.sqr(a0)/np.pi/g0)
    h1 = pm.Deterministic('h1', 2*tt.sqr(a1)/np.pi/g1)
    h2 = pm.Deterministic('h2', 2*tt.sqr(a2)/np.pi/g2)

    # Mode splitting
    xsplit = pm.HalfNormal('xsplit', sigma=2.0, testval = init['xsplit'])
    cosi = pm.Uniform('cosi', 0., 1., testval = init['cosi'])

    i = pm.Deterministic('i', tt.arccos(cosi))
    split = pm.Deterministic('split', xsplit/tt.sin(i))

    # Background treatment
    aphi = pm.MvNormal('aphi', mu=aphi_, chol=aphi_cholesky, testval=aphi_, shape=len(aphi_))
    bphi = pm.Normal('bphi', mu=bphi_, sigma=bphi_sigma, testval=bphi_, shape=len(bphi_))

    # Construct model
    fit = mod.model([f0, f1, f2, g0, g1, g2, h0, h1, h2, split, i, aphi, bphi])

    like = pm.Gamma('like', alpha=1., beta=1./fit, observed=p)


# In[ ]:


for RV in pm_model.basic_RVs: