def build_model(self, name='cot_uniform_model'):
        # Define Stochastic variables
        with pm.Model(name=name) as self.model:
            self.cot_phi = pm.Uniform('cot_phi_gal',
                                      lower=1,
                                      upper=4,
                                      shape=len(self.galaxies))
            self.phi_gal = pm.Deterministic(
                'phi_gal', 180 / np.pi * tt.arctan(1 / self.cot_phi))
            # note we don't model inter-galaxy dispersion here
            # intra-galaxy dispersion
            self.sigma_gal = pm.InverseGamma('sigma_gal',
                                             alpha=2,
                                             beta=20,
                                             testval=5)
            # arm offset parameter
            self.c = pm.Cauchy('c',
                               alpha=0,
                               beta=10,
                               shape=self.n_arms,
                               testval=np.tile(0, self.n_arms))

            # radial noise
            self.sigma_r = pm.InverseGamma('sigma_r', alpha=2, beta=0.5)

            # Define Dependent variables
            self.phi_arm = pm.TruncatedNormal(
                'phi_arm',
                mu=self.phi_gal[self.gal_arm_map],
                sd=self.sigma_gal,
                lower=0,
                upper=90,
                shape=self.n_arms)

            # convert to a gradient for a linear fit
            self.b = tt.tan(np.pi / 180 * self.phi_arm)
            r = pm.Deterministic(
                'r',
                tt.exp(self.b[self.data['arm_index'].values] *
                       self.data['theta'] +
                       self.c[self.data['arm_index'].values]))

            # likelihood function
            self.likelihood = pm.Normal(
                'Likelihood',
                mu=r,
                sigma=self.sigma_r,
                observed=self.data['r'],
            )
예제 #2
0
    def logp(self, value):
        """
		Calculate log-probability of King distribution at specified value.
		Parameters
		----------
		value : numeric
			Value(s) for which log-probability is calculated. If the log probabilities for multiple
			values are desired the values must be provided in a numpy array or theano tensor
		Returns
		-------
		TensorVariable
		"""
        r = (value - self.location) / self.scale
        v = 1.0 / tt.sqrt(1. + r**2)
        u = 1.0 / tt.sqrt(1. + self.rt**2)

        cte = 2 * self.scale * (self.rt / (1 + self.rt**2) + tt.arctan(
            self.rt) - 2. * tt.arcsinh(self.rt) / np.sqrt(1. + self.rt**2))

        log_d = 2. * tt.log(v - u) - tt.log(cte)

        return tt.switch(tt.abs_(r) < self.rt, log_d,
                         -1e20)  #avoids inf in advi
 def SQUASH(self, sample):
     """ Perform the boxing operation symbolically (see .squash). """
     
     return 0.5 + tns.arctan(sample)/np.pi
예제 #4
0
    def _model_setup(self):
        with self._model:
            # COSMOLOGY



            omega_m = pm.Uniform("OmegaM", lower=0, upper=1.)

            # Dark energy EOS
            w = pm.Normal("w", mu=-1, sd=1)

            # My custom distance mod. function to enable
            # ADVI and HMC smapling.

            dm = distmod_w_flat(omega_m, self._h0, w, self._zcmb)

            # PHILIPS PARAMETERS

            # M0 is the location parameter for the distribution
            # sys_scat is the scale parameter for the M0 distribution
            # rather than "unexpalined variance"
            M0 = pm.Uniform("M0", lower=-20., upper=-18.)
            sys_scat = pm.HalfCauchy('sys_scat', beta=2.5)  # Gelman recommendation for variance parameter
            M_true = pm.Normal('M_true', M0, sys_scat, shape=self._n_SN)

            # following Rubin's Unity model... best idea? not sure
            taninv_alpha = pm.Uniform("taninv_alpha", lower=-.2, upper=.3)
            taninv_beta = pm.Uniform("taninv_beta", lower=-1.4, upper=1.4)

            # Transform variables
            alpha = pm.Deterministic('alpha', T.tan(taninv_alpha))
            beta = pm.Deterministic('beta', T.tan(taninv_beta))

            # Z correction parameters
            delta_beta = pm.Uniform('delta_beta', lower=-1.5, upper=1.5)
            zt = pm.Uniform('zt', lower=0.2, upper=1)

            # Again using Rubin's Unity model.
            # After discussion with Rubin, the idea is that
            # these parameters are ideally sampled from a Gaussian,
            # but we know they are not entirely correct. So instead,
            # the Cauchy is less informative around the mean, while
            # still having informative tails.

            xm = pm.Cauchy('xm', alpha=0, beta=1)
            cm = pm.Cauchy('cm', alpha=0, beta=1)

            Rx_log = pm.Uniform('Rx_log', lower=-0.5, upper=0.5)
            Rc_log = pm.Uniform('Rc_log', lower=-1.5, upper=1.5)

            # Transformed variables
            Rx = pm.Deterministic("Rx", T.pow(10., Rx_log))
            Rc = pm.Deterministic("Rc", T.pow(10., Rc_log))

            x_true = pm.Normal('x_true', mu=xm, sd=Rx, shape=self._n_SN)
            c_true = pm.Normal('c_true', mu=cm, sd=Rc, shape=self._n_SN)

            # Do the correction
            mb = pm.Deterministic("mb", M_true + dm - alpha * x_true + beta * c_true + delta_beta * (
                0.5 + 1. / np.pi * T.arctan((self._zcmb - zt) / 0.01)) * c_true)

            # Likelihood and measurement error

            obsc = pm.Normal("obsc", mu=c_true, sd=self._dcolor, observed=self._color)
            obsx = pm.Normal("obsx", mu=x_true, sd=self._dx1, observed=self._x1)
            obsm = pm.Normal("obsm", mu=mb, sd=self._dmb_obs, observed=self._mb_obs)
예제 #5
0
    def recurrence(x_t, h_prev, theta, V_re, V_im, hidden_bias, scale):    
        def scale_diag(input, n_hidden, diag):
            input_re = input[:, :n_hidden]
            input_im = input[:, n_hidden:]
            Diag = T.nlinalg.AllocDiag()(diag)
            input_re_times_Diag = T.dot(input_re, Diag)
            input_im_times_Diag = T.dot(input_im, Diag)

            return T.concatenate([input_re_times_Diag, input_im_times_Diag], axis=1)

        def times_diag(input, n_hidden, diag):
            input_re = input[:, :n_hidden]
            input_im = input[:, n_hidden:]
            Re = T.nlinalg.AllocDiag()(T.cos(diag))
            Im = T.nlinalg.AllocDiag()(T.sin(diag))
            input_re_times_Re = T.dot(input_re, Re)
            input_re_times_Im = T.dot(input_re, Im)
            input_im_times_Re = T.dot(input_im, Re)
            input_im_times_Im = T.dot(input_im, Im)

            return T.concatenate([input_re_times_Re - input_im_times_Im,
                                  input_re_times_Im + input_im_times_Re], axis=1)

        def vec_permutation(input, n_hidden, index_permute):
            re = input[:, :n_hidden]
            im = input[:, n_hidden:]
            re_permute = re[:, index_permute]
            im_permute = im[:, index_permute]

            return T.concatenate([re_permute, im_permute], axis=1)      
        
        def times_reflection(input, n_hidden, reflection):
            input_re = input[:, :n_hidden]
            input_im = input[:, n_hidden:]
            reflect_re = reflection[n_hidden:]
            reflect_im = reflection[:n_hidden]
            
            vstarv = (reflect_re**2 + reflect_im**2).sum()
            input_re_reflect = input_re - 2 / vstarv * (T.outer(T.dot(input_re, reflect_re), reflect_re) +
                                                        T.outer(T.dot(input_im, reflect_im), reflect_im))
            input_im_reflect = input_im - 2 / vstarv * (-T.outer(T.dot(input_re, reflect_im), reflect_im) +
                                                        T.outer(T.dot(input_im, reflect_re), reflect_re))

            return T.concatenate([input_re_reflect, input_im_reflect], axis=1)      


        # Compute hidden linear transform
        step1 = times_diag(h_prev, n_hidden, theta[0,:])
        step2 = times_reflection(step1, n_hidden, reflection[0,:])
        step3 = vec_permutation(step2, n_hidden, index_permute)
        step4 = times_diag(step3, n_hidden, theta[1,:])
        step5 = times_reflection(step4, n_hidden, reflection[1,:])
        step6 = times_diag(step5, n_hidden, theta[2,:])     
        step7 = scale_diag(step6, n_hidden, scale)
        
        hidden_lin_output = step7
        
        # Compute data linear transform
        data_lin_output_re = T.dot(x_t, V_re)
        data_lin_output_im = T.dot(x_t, V_im)
        data_lin_output = T.concatenate([data_lin_output_re, data_lin_output_im], axis=1)
        
        # Total linear output        
        lin_output = hidden_lin_output + data_lin_output
        lin_output_re = lin_output[:, :n_hidden]
        lin_output_im = lin_output[:, n_hidden:] 


        # Apply non-linearity ----------------------------

        # nonlinear mod and phase operations
        lin_output_mod = T.sqrt(lin_output_re ** 2 + lin_output_im ** 2)
        lin_output_phase = T.arctan(lin_output_im / (lin_output_re + 1e-5))
        nonlin_output_mod = T.maximum(lin_output_mod + hidden_bias.dimshuffle('x',0), 0.) \
            / (lin_output_mod + 1e-5)
        
        warp_phase_params = T.tanh(phase_params) 
        warp_phase_x = warp_phase_params[:n_hidden] * np.pi / 2
        warp_phase_y = warp_phase_params[n_hidden:] * np.pi / 2
        m1 = (warp_phase_y - 0.5 * np.pi) / (warp_phase_x - 0.5 * np.pi + 1e-5)
        m2 = (warp_phase_y + 0.5 * np.pi) / (warp_phase_x + 0.5 * np.pi + 1e-5)
        lin1 = m1 * (lin_output_phase - 0.5 * np.pi) + 0.5 * np.pi
        lin2 = m2 * (lin_output_phase + 0.5 * np.pi) - 0.5 * np.pi
        nonlin_output_phase = T.switch(T.lt(lin_output_phase, warp_phase_x), lin1, lin2)

        nonlin_output_re = nonlin_output_mod * T.cos(nonlin_output_phase)
        nonlin_output_im = nonlin_output_mod * T.sin(nonlin_output_phase)


        # scale RELU nonlinearity
#        modulus = T.sqrt(lin_output_re ** 2 + lin_output_im ** 2)
#        rescale = T.maximum(modulus + hidden_bias.dimshuffle('x',0), 0.) / (modulus + 1e-5)
#        nonlin_output_re = lin_output_re * rescale
#        nonlin_output_im = lin_output_im * rescale      

        h_t = T.concatenate([nonlin_output_re, 
                             nonlin_output_im], axis=1)

        return h_t
def arcTan(X):
    return T.arctan(X)
예제 #7
0
 def B(self, alpha, beta):
     return (1 / alpha) * T.arctan(beta * T.tan(self.pi * alpha / 2))
def tangential_distance(glxy_position, halo_position):
    # foo_position should be a 2-d numpy array
    delta = glxy_position - halo_position
    t = (2*T.arctan(delta[:,1]/delta[:,0]))
    return T.stack([-T.cos(t), -T.sin(t)], axis=1)
예제 #9
0
파일: ops.py 프로젝트: yancz1989/tunas
def atan(x):
  return T.arctan(x)
예제 #10
0
파일: ann.py 프로젝트: kittleik/aiprog
 def arctan(self, X):
     return T.arctan(X)
예제 #11
0
파일: ann.py 프로젝트: kittleik/aiprog
 def arctan(self, X):
     return T.arctan(X)
예제 #12
0
def maape(y_true, y_pred):
    return K.mean(arctan(K.abs((y_pred - y_true) / y_true)))
예제 #13
0
 def B(self, alpha, beta):
     return (1/alpha)*T.arctan(beta*T.tan(self.pi*alpha/2))