Example #1
0
 def __init__(self, C, taud, tauf, U):
     if isinstance(C, DelayConnection):
         raise AttributeError, "STP does not handle heterogeneous connections yet."
     NetworkOperation.__init__(self, lambda: None, clock=C.source.clock)
     N = len(C.source)
     P = STPGroup(N, clock=C.source.clock)
     P.x = 1
     P.u = U
     P.ux = U
     if (isscalar(taud) & isscalar(tauf) & isscalar(U)):
         updater = STPUpdater(C.source,
                              P,
                              taud,
                              tauf,
                              U,
                              delay=C.delay * C.source.clock.dt)
     else:
         updater = STPUpdater2(C.source,
                               P,
                               taud,
                               tauf,
                               U,
                               delay=C.delay * C.source.clock.dt)
     self.contained_objects = [updater]
     C.source = P
     C.delay = 0
     C._nstate_mod = 0  # modulation of synaptic weights
     self.vars = P
Example #2
0
    def _oneEvaluation(self, evaluable):
        """ This method should be called by all optimizers for producing an evaluation. """
        if self._wasUnwrapped:
            self.wrappingEvaluable._setParameters(evaluable)
            res = self.__evaluator(self.wrappingEvaluable)
        elif self._wasWrapped:
            res = self.__evaluator(evaluable.params)
        else:
            res = self.__evaluator(evaluable)
            ''' added by JPQ '''
            if self.constrained:
                self.feasible = self.__evaluator.outfeasible
                self.violation = self.__evaluator.outviolation
            # ---
        if isscalar(res):
            # detect numerical instability
            if isnan(res) or isinf(res):
                raise DivergenceError
            # always keep track of the best
            if (self.numEvaluations == 0 or self.bestEvaluation is None
                    or (self.minimize and res <= self.bestEvaluation)
                    or (not self.minimize and res >= self.bestEvaluation)):
                self.bestEvaluation = res
                self.bestEvaluable = evaluable.copy()

        self.numEvaluations += 1

        # if desired, also keep track of all evaluables and/or their fitness.
        if self.storeAllEvaluated:
            if self._wasUnwrapped:
                self._allEvaluated.append(self.wrappingEvaluable.copy())
            elif self._wasWrapped:
                self._allEvaluated.append(evaluable.params.copy())
            else:
                self._allEvaluated.append(evaluable.copy())
        if self.storeAllEvaluations:
            if self._wasOpposed and isscalar(res):
                ''' added by JPQ '''
                if self.constrained:
                    self._allEvaluations.append(
                        [-res, self.feasible, self.violation])
                # ---
                else:
                    self._allEvaluations.append(-res)
            else:
                ''' added by JPQ '''
                if self.constrained:
                    self._allEvaluations.append(
                        [res, self.feasible, self.violation])
                # ---
                else:
                    self._allEvaluations.append(res)
        ''' added by JPQ '''
        if self.constrained:
            return [res, self.feasible, self.violation]
        else:
            # ---
            return res
Example #3
0
 def _oneEvaluation(self, evaluable):
     """ This method should be called by all optimizers for producing an evaluation. """
     if self._wasUnwrapped:
         self.wrappingEvaluable._setParameters(evaluable)
         res = self.__evaluator(self.wrappingEvaluable)
     elif self._wasWrapped:            
         res = self.__evaluator(evaluable.params)
     else:            
         res = self.__evaluator(evaluable)
         ''' added by JPQ '''
         if self.constrained :
             self.feasible = self.__evaluator.outfeasible
             self.violation = self.__evaluator.outviolation
         # ---
     if isscalar(res):
         # detect numerical instability
         if isnan(res) or isinf(res):
             raise DivergenceError
         # always keep track of the best
         if (self.numEvaluations == 0
             or self.bestEvaluation is None
             or (self.minimize and res <= self.bestEvaluation)
             or (not self.minimize and res >= self.bestEvaluation)):
             self.bestEvaluation = res
             self.bestEvaluable = evaluable.copy()
     
     self.numEvaluations += 1
     
     # if desired, also keep track of all evaluables and/or their fitness.                        
     if self.storeAllEvaluated:
         if self._wasUnwrapped:            
             self._allEvaluated.append(self.wrappingEvaluable.copy())
         elif self._wasWrapped:            
             self._allEvaluated.append(evaluable.params.copy())
         else:            
             self._allEvaluated.append(evaluable.copy())        
     if self.storeAllEvaluations:
         if self._wasOpposed and isscalar(res):
             ''' added by JPQ '''
             if self.constrained :
                 self._allEvaluations.append([-res,self.feasible,self.violation])
             # ---
             else:
                 self._allEvaluations.append(-res)
         else:
             ''' added by JPQ '''
             if self.constrained :
                 self._allEvaluations.append([res,self.feasible,self.violation])
             # ---
             else:
                 self._allEvaluations.append(res)
     ''' added by JPQ '''
     if self.constrained :
         return [res,self.feasible,self.violation]
     else:
     # ---
         return res
Example #4
0
def angcomp(ra1,dec1,ra2,dec2,method=3):
    """
    Return the delta_RA and delta_dec (delta = 1-2) components of the angular
    distance between objects.  This is simply an alternate output of the
    angular_distance function above. Distance is returned as degrees, and method chooses a more or less accurate way of determining the distance (with 1 being the fastest/least accurate).
    from astorlib.py

    # UNITS: degrees (output), degrees (input)
    """
    DEGRAD = pi/180.
    import scipy
    if scipy.isscalar(ra1) and scipy.isscalar(ra2):
	from numpy import cos,sin,sqrt
	if ra1-ra2>180:
		ra1 -= 360.
	elif ra2-ra1>180:
		ra2 -= 360.
    else:
	from scipy import cos,sin,sqrt
	if scipy.isscalar(ra1):
		t = ra2.copy()
		ra2 = ra2*0. + ra1
		ra1 = t.copy()
		t = dec2.copy()
		dec2 = dec2*0. + dec1
		dec1 = t.copy()
		del t

	ra1 = scipy.where(ra1-ra2>180,ra1-360.,ra1)
	ra2 = scipy.where(ra2-ra1>180,ra2-360.,ra2)

    ra1 = ra1*DEGRAD
    dec1 = dec1*DEGRAD
    ra2 = ra2*DEGRAD
    dec2 = dec2*DEGRAD

    if method==1:
	deltadec = dec1-dec2
	deltara = (ra1-ra2)*cos(dec2)
    else:
	div = 1.
	if method==3:
		div = sin(dec2)*sin(dec1)+cos(dec2)*cos(dec1)*cos((ra1-ra2))
	deltara = cos(dec2)*sin(ra1-ra2)/div
	deltadec = -(sin(dec2)*cos(dec1)-cos(dec2)*sin(dec1)*cos(ra1-ra2)/div)
	#if sum(div == 0) != 0: #attempt at making array compatable but doesn't
	#work for single integers. Note that could just remove this section of
	#the code since it only here for QA
	if div == 0:
	    import sys
	    print 'tools: div = 0, exiting'
	    sys.exit()

    return deltara/DEGRAD, deltadec/DEGRAD
Example #5
0
	def wrapper(self,s,poles,*args,**kwargs):
	
		squeeze = False
		if scipy.isscalar(poles) or isinstance(poles,tuple):
			squeeze = True
			poles = [poles]
		if scipy.isscalar(s): squeeze = True
		res = scipy.asarray([func(self,s,pole,*args,**kwargs) for pole in poles])
		if squeeze:
			res = scipy.squeeze(res)
			if res.ndim == 0: return res[()]
		return res
Example #6
0
def angcomp(ra1,dec1,ra2,dec2,method=3):
    """
    Return the delta_RA and delta_dec (delta = 1-2) components of the angular
    distance between objects.  This is simply an alternate output of the
    angular_distance function above. Distance is returned as degrees, and method chooses a more or less accurate way of determining the distance (with 1 being the fastest/least accurate).
    from astorlib.py
    """
    DEGRAD = pi/180.
    import scipy
    if scipy.isscalar(ra1) and scipy.isscalar(ra2):
	from math import cos,sin,sqrt
	if ra1-ra2>180:
		ra1 -= 360.
	elif ra2-ra1>180:
		ra2 -= 360.
    else:
	from scipy import cos,sin,sqrt
	if scipy.isscalar(ra1):
		t = ra2.copy()
		ra2 = ra2*0. + ra1
		ra1 = t.copy()
		t = dec2.copy()
		dec2 = dec2*0. + dec1
		dec1 = t.copy()
		del t

	ra1 = scipy.where(ra1-ra2>180,ra1-360.,ra1)
	ra2 = scipy.where(ra2-ra1>180,ra2-360.,ra2)

    ra1 = ra1*DEGRAD
    dec1 = dec1*DEGRAD
    ra2 = ra2*DEGRAD
    dec2 = dec2*DEGRAD

    if method==1:
	deltadec = dec1-dec2
	deltara = (ra1-ra2)*cos(dec2)
    else:
	div = 1.
	if method==3:
		div = sin(dec2)*sin(dec1)+cos(dec2)*cos(dec1)*cos((ra1-ra2))
	deltara = cos(dec2)*sin(ra1-ra2)/div
	deltadec = -(sin(dec2)*cos(dec1)-cos(dec2)*sin(dec1)*cos(ra1-ra2)/div)

    if isinstance(div, float) and div == 0:
        raise ValueError("dividing by div = 0")
    elif isinstance(div, numpy.ndarray) and div.any() == 0:
        raise ValueError("dividing by div = 0")


    return deltara/DEGRAD, deltadec/DEGRAD
Example #7
0
def generate_tolerances(net, rtol, atol=None):
    if rtol == None:
        rtol = global_rtol
    if scipy.isscalar(rtol):
        rtol = scipy.ones(len(net.dynamicVars)) * rtol

    # We set atol to be a minimum of global_atol to avoid variables with large
    # typical values going negative.
    if (scipy.isscalar(atol) or atol==None):
        typ_vals = [abs(net.get_var_typical_val(id))
                    for id in net.dynamicVars.keys()]
        atol = rtol * scipy.asarray(typ_vals)
        if global_atol:
            atol = scipy.minimum(atol, global_atol)
    return rtol, atol
Example #8
0
def generate_tolerances(net, rtol, atol=None):
    if rtol is None:
        rtol = global_rtol
    if scipy.isscalar(rtol):
        rtol = scipy.ones(len(net.dynamicVars)) * rtol

    # We set atol to be a minimum of global_atol to avoid variables with large
    # typical values going negative.
    if (scipy.isscalar(atol) or atol==None):
        typ_vals = [abs(net.get_var_typical_val(id))
                    for id in net.dynamicVars.keys()]
        atol = rtol * scipy.asarray(typ_vals)
        if global_atol:
            atol = scipy.minimum(atol, global_atol)
    return rtol, atol
Example #9
0
    def oscillate_hessian(self, params, eps=1e-5,
                          relativeScale=True, stepSizeCutoff=1e-6,
                          verbose=False, f0_zero=False):
        """
        Same as hessian except uses oscillate cost function

        """

	nOv = len(params)
        if scipy.isscalar(eps):
            eps = scipy.ones(len(params), scipy.float_) * eps
	    
        ## compute cost at f(x), set f0 to zero if f0_zero is true
        if f0_zero:
            f0 = 0
        else:
            f0 = self.oscillate_cost(params)

	hess = scipy.zeros((nOv, nOv), scipy.float_)

	    ## compute all (numParams*(numParams + 1))/2 unique hessian elements
        for i in range(nOv):
            for j in range(i, nOv):
                hess[i][j] = self.hessian_elem(self.oscillate_cost, f0,
                                               params, i, j, eps[i], eps[j], 
                                               relativeScale, stepSizeCutoff,
                                               verbose)
                hess[j][i] = hess[i][j]

        return hess
Example #10
0
    def periodic_hessian_log_params(self, params, eps=1e-5,
                           relativeScale=False, stepSizeCutoff=1e-6,
                           verbose=False):
        """
        Same as hessian_log_params except uses periodic cost function
        Relative scale is false by default here
        """
	nOv = len(params)
        if scipy.isscalar(eps):
            eps = scipy.ones(len(params), scipy.float_) * eps

	## compute cost at f(x)
	f0 = self.periodic_cost_log_params(scipy.log(params))

	hess = scipy.zeros((nOv, nOv), scipy.float_)

	## compute all (numParams*(numParams + 1))/2 unique hessian elements
        for i in range(nOv):
            for j in range(i, nOv):
                hess[i][j] = self.hessian_elem(self.periodic_cost_log_params, f0,
                                               scipy.log(params), 
                                               i, j, eps[i], eps[j], 
                                               relativeScale, stepSizeCutoff,
                                               verbose)
                hess[j][i] = hess[i][j]

        return hess
Example #11
0
    def hessian_log_params(self, params, eps,
                           relativeScale=False, stepSizeCutoff=1e-6,
                           verbose=False):
        """
        Returns the hessian of the model in log parameters.

        eps: Sets the stepsize to try
        relativeScale: If True, step i is of size p[i] * eps, otherwise it is
                       eps
        stepSizeCutoff: The minimum stepsize to take
        vebose: If True, a message will be printed with each hessian element
                calculated
        """
	nOv = len(params)
        if scipy.isscalar(eps):
            eps = scipy.ones(len(params), scipy.float_) * eps

	## compute cost at f(x)
	f0 = self.cost_log_params(scipy.log(params))

	hess = scipy.zeros((nOv, nOv), scipy.float_)

	## compute all (numParams*(numParams + 1))/2 unique hessian elements
        for i in range(nOv):
            for j in range(i, nOv):
                hess[i][j] = self.hessian_elem(self.cost_log_params, f0,
                                               scipy.log(params), 
                                               i, j, eps[i], eps[j], 
                                               relativeScale, stepSizeCutoff,
                                               verbose)
                hess[j][i] = hess[i][j]

        return hess
Example #12
0
    def get_data(self, pos, phase=None):
        """return voltage data for relative position and phase
        
        The relative position vector is matched with neuron_data.horizon and
        
        :Parameters:
            pos : ndarray
                The relative position in the dataset.
            phase : sequence
                The phase within the waveform to retrieve. Either a single
                sample index or a sequence. If None is given, return the
                complete waveform.
                Default=None
        """

        # checking pos
        if vector_norm(pos) > self.horizon:
            raise BeyondHorizonError('norm of relative position [%s] '
                                     'lies beyond the horizon [%s]!' %
                                     (vector_norm(pos), self.horizon))

        # check for phase
        if phase is None:
            phase = xrange(self.intra_v.size)
        if N.isscalar(phase):
            phase = xrange(phase, phase + 1)

        # call get_data implementation
        return self._get_data(pos, phase)
Example #13
0
def _compute_qth_percentile(sorted, q, axis, out):
    if not isscalar(q):
        p = [_compute_qth_percentile(sorted, qi, axis, None) for qi in q]

        if out is not None:
            out.flat = p

        return p

    q = q / 100.0
    if (q < 0) or (q > 1):
        raise ValueError("percentile must be either in the range [0,100]")

    indexer = [slice(None)] * sorted.ndim
    Nx = sorted.shape[axis]
    index = q * (Nx - 1)
    i = int(index)
    if i == index:
        indexer[axis] = slice(i, i + 1)
        weights = array(1)
        sumval = 1.0
    else:
        indexer[axis] = slice(i, i + 2)
        j = i + 1
        weights = array([(j - index), (index - i)], float)
        wshape = [1] * sorted.ndim
        wshape[axis] = 2
        weights.shape = wshape
        sumval = weights.sum()

    # Use add.reduce in both cases to coerce data type as well as
    # check and use out array.
    return add.reduce(sorted[indexer] * weights, axis=axis, out=out) / sumval
Example #14
0
 def __init__(self,net,prior=S.array([100,1])):
     if SP.isscalar(prior):
         self.fixE1=prior
         CNodeEps.__init__(self,net)
     else:
         self.fixE1 = None
         CNodeEps.__init__(self,net,prior)
Example #15
0
	def derived_parameters(self,values,errors={}):
		sigma8 = self.model_sp.sigma8*values.get('rsigma8',1.)
		dvalues = {par:1.*val for par,val in values.items() if par in self.fitargs}
		def mults8(key):
			for par in ['f','b']:
				if key.startswith(par) and not key.startswith('beta') and 'sigma8' not in key: return True
			return False
		for key in dvalues.keys():
			if mults8(key): dvalues['{}sigma8'.format(key)] = values[key]*sigma8
		derrors = {par:1.*val for par,val in errors.items() if par in self.fitargs}
		for key in derrors.keys():
			if key in self.vary: derrors[key] = scipy.sqrt(self.scale_parameter_covariance[key])*errors[key]
			if mults8(key): derrors['{}sigma8'.format(key)] = derrors[key]*sigma8
		dvalues['sigma8'] = sigma8
		tmp = values.values()[0]
		if not scipy.isscalar(tmp): dvalues['sigma8'] = scipy.ones_like(tmp)*dvalues['sigma8'] # to get the correct shape
		derrors['sigma8'] = 0.*dvalues['sigma8']
		dlatex = {par:val for par,val in self.latex.items()}
		for key in dlatex.keys():
			if mults8(key): dlatex['{}sigma8'.format(key)] = '{}\\sigma_{{8}}'.format(self.latex[key])
		dlatex['sigma8'] = '\\sigma_{{8}}'
		dsorted = pyspectrum.utils.sorted_parameters(dlatex.keys())
		params = {key:val for key,val in self.params.items()}
		params['scalecov'] = self.scale_parameter_covariance
		params['scalemockcov'] = self.scale_mock_parameter_covariance

		return dvalues,derrors,dlatex,dsorted,params
Example #16
0
def errorScalingFactor(observable, beta):
  """
  Look up the numerical factors to apply to the sky averaged parallax error in order to obtain error
  values for a given astrometric parameter, taking the Ecliptic latitude and the number of transits into
  account.

  Parameters
  ----------

  observable - Name of astrometric observable (one of: alphaStar, delta, parallax, muAlphaStar, muDelta)
  beta       - Values(s) of the Ecliptic latitude.

  Returns
  -------

  Numerical factors to apply to the errors of the given observable.
  """
  if isscalar(beta):
    index=int(floor(abs(sin(beta))*_numStepsSinBeta))
    if index == _numStepsSinBeta:
      return _astrometricErrorFactors[observable][_numStepsSinBeta-1]
    else:
      return _astrometricErrorFactors[observable][index]
  else:
    indices = array(floor(abs(sin(beta))*_numStepsSinBeta), dtype=int)
    indices[(indices==_numStepsSinBeta)] = _numStepsSinBeta-1
    return _astrometricErrorFactors[observable][indices]
Example #17
0
def RowToLatex(rowin, fmt='%0.4g', eps=1e-12, debug=0):
    if debug:
        print('rowin = ' + str(rowin))
        print('type(rowin) = '+str(type(rowin)))
        test = is_sage(rowin)
        print('is_sage test = '+str(test))
    if hasattr(rowin,'ToLatex'):
        if debug:
            print('case 1')
        return rowin.ToLatex(eps=eps, fmt=fmt)
    elif isscalar(rowin):
        if debug:
            print('case 2')
        #return fmt % rowin
        return ComplexNumToStr(rowin, eps=eps, fmt=fmt)
    elif is_sympy(rowin) or is_sage(rowin):
        if debug:
            print('case 3')
        return sympy.latex(rowin, profile=sympy_profile)
    elif type(rowin) == matrix:
        if debug:
            print('case 4')

        strlist = []
        for i in range(rowin.size):
            item = rowin[0,i]
            strlist.append(ComplexNumToStr(item, eps=eps, fmt=fmt))
    else:
        strlist =  [ComplexNumToStr(item, eps=eps, fmt=fmt) for item in rowin]
    return ' & '.join(strlist)
Example #18
0
def _compute_qth_percentile(sorted, q, axis, out):
    if not isscalar(q):
        p = [_compute_qth_percentile(sorted, qi, axis, None)
             for qi in q]

        if out is not None:
            out.flat = p

        return p

    q = q / 100.0
    if (q < 0) or (q > 1):
        raise ValueError("percentile must be either in the range [0,100]")

    indexer = [slice(None)] * sorted.ndim
    Nx = sorted.shape[axis]
    index = q * (Nx - 1)
    i = int(index)
    if i == index:
        indexer[axis] = slice(i, i + 1)
        weights = array(1)
        sumval = 1.0
    else:
        indexer[axis] = slice(i, i + 2)
        j = i + 1
        weights = array([(j - index), (index - i)], float)
        wshape = [1] * sorted.ndim
        wshape[axis] = 2
        weights.shape = wshape
        sumval = weights.sum()

    # Use add.reduce in both cases to coerce data type as well as
    # check and use out array.
    return add.reduce(sorted[indexer] * weights, axis=axis, out=out) / sumval
Example #19
0
    def set_angular_selection(self,
                              costheta,
                              angular,
                              interpol='lin',
                              copy=False):

        if scipy.isscalar(costheta[0]): costheta = [costheta]
        shapeangular = angular.shape
        self._shape_angular = scipy.array(shapeangular, dtype=ctypes.c_size_t)

        self._costheta = [
            new(x_, dtype=self.C_TYPE, copy=copy) for x_ in costheta
        ]
        self.__costheta = scipy.concatenate(self._costheta)
        shape = tuple(map(len, self._costheta))
        self._angular = new(angular, dtype=self.C_TYPE, copy=copy)

        typecostheta = ctypeslib.ndpointer(dtype=self.C_TYPE,
                                           shape=scipy.sum(shape))
        typeangular = ctypeslib.ndpointer(dtype=self.C_TYPE,
                                          shape=scipy.prod(shape))

        self.anacorr.set_angular_selection.argtypes = (
            typecostheta, typeangular,
            ctypeslib.ndpointer(dtype=ctypes.c_size_t, shape=len(shape)),
            ctypes.c_size_t, ctypes.c_char_p)
        self.anacorr.set_angular_selection(self.__costheta, self._angular,
                                           self._shape_angular,
                                           len(shapeangular),
                                           interpol.encode('utf-8'))

        self._angular.shape = shapeangular
Example #20
0
 def run_mnprofile(self, mnprofile={}):
     #self.profile = {}
     #self.contour = {}
     params = mnprofile.get('params', [])
     npoints = mnprofile.get('npoints', 30)
     bounds = mnprofile.get('bounds', 2)
     subtract_min = mnprofile.get('subtract_min', False)
     if scipy.isscalar(npoints): npoints = [npoints] * len(params)
     if scipy.isscalar(bounds) or isinstance(bounds, tuple):
         bounds = [bounds] * len(params)
     for par, npoint, bound in zip(params, npoints, bounds):
         kwargs = dict(bins=npoint, bound=bound, subtract_min=subtract_min)
         self.logger.info('Profiling {} with kwargs {}.'.format(
             par, kwargs))
         x, y, status = self.minuit.mnprofile(par, **kwargs)
         self.profile[par] = scipy.array([x, y])
Example #21
0
 def callback(x):
     if sp.isscalar(x):
         residuals.append(x)
     else:
         residuals.append(residual_norm(A, x, b))
     if cb is not None:
         cb(x)
    def hessian_log_params(self,
                           params,
                           eps,
                           relativeScale=False,
                           stepSizeCutoff=1e-6,
                           verbose=False):
        """
        Returns the hessian of the model in log parameters.

        eps: Sets the stepsize to try
        relativeScale: If True, step i is of size p[i] * eps, otherwise it is
                       eps
        stepSizeCutoff: The minimum stepsize to take
        vebose: If True, a message will be printed with each hessian element
                calculated
        """
        nOv = len(params)
        if scipy.isscalar(eps):
            eps = scipy.ones(len(params), scipy.float_) * eps

## compute cost at f(x)
        f0 = self.cost_log_params(scipy.log(params))

        hess = scipy.zeros((nOv, nOv), scipy.float_)

        ## compute all (numParams*(numParams + 1))/2 unique hessian elements
        for i in range(nOv):
            for j in range(i, nOv):
                hess[i][j] = self.hessian_elem(self.cost_log_params, f0,
                                               scipy.log(params), i, j, eps[i],
                                               eps[j], relativeScale,
                                               stepSizeCutoff, verbose)
                hess[j][i] = hess[i][j]

        return hess
Example #23
0
 def callback(x):
     if sp.isscalar(x):
         residuals.append(x)
     else:
         residuals.append(residual_norm(A, x, b))
     if cb is not None:
         cb(x)
Example #24
0
def errorScalingFactor(observable, beta):
    """
  Look up the numerical factors to apply to the sky averaged parallax error in order to obtain error
  values for a given astrometric parameter, taking the Ecliptic latitude and the number of transits into
  account.

  Parameters
  ----------

  observable - Name of astrometric observable (one of: alphaStar, delta, parallax, muAlphaStar, muDelta)
  beta       - Values(s) of the Ecliptic latitude.

  Returns
  -------

  Numerical factors to apply to the errors of the given observable.
  """
    if isscalar(beta):
        index = int(floor(abs(sin(beta)) * _numStepsSinBeta))
        if index == _numStepsSinBeta:
            return _astrometricErrorFactors[observable][_numStepsSinBeta - 1]
        else:
            return _astrometricErrorFactors[observable][index]
    else:
        indices = array(floor(abs(sin(beta)) * _numStepsSinBeta), dtype=int)
        indices[(indices == _numStepsSinBeta)] = _numStepsSinBeta - 1
        return _astrometricErrorFactors[observable][indices]
Example #25
0
    def set_window(self, x, nells=None):

        if scipy.isscalar(x[0]): x = [x]
        self.x = [new(x_, dtype=self.C_TYPE) for x_ in x]
        self._x = scipy.concatenate(self.x)
        shapex = tuple(map(len, self.x))
        self._shape_window = scipy.array(shapex, dtype=ctypes.c_size_t)

        if nells is None:
            nells = tuple(
                len(self._ells[num + 1]) for num in range(len(shapex)))
        shapey = shapex + nells
        self.y = scipy.zeros(shapey, dtype=self.C_TYPE).flatten()

        typex = ctypeslib.ndpointer(dtype=self.C_TYPE, shape=scipy.sum(shapex))
        typey = ctypeslib.ndpointer(dtype=self.C_TYPE,
                                    shape=scipy.prod(shapey))

        self.anacorr.set_window.argtypes = (typex, typey,
                                            ctypeslib.ndpointer(
                                                dtype=ctypes.c_size_t,
                                                shape=len(shapex)),
                                            ctypes.c_size_t)
        self.anacorr.set_window(self._x, self.y, self._shape_window,
                                len(shapex))

        self.y.shape = shapey
Example #26
0
    def get_data(self, pos, phase=None):
        """return voltage data for relative position and phase
        
        The relative position vector is matched with neuron_data.horizon and
        
        :Parameters:
            pos : ndarray
                The relative position in the dataset.
            phase : sequence
                The phase within the waveform to retrieve. Either a single
                sample index or a sequence. If None is given, return the
                complete waveform.
                Default=None
        """

        # checking pos
        if vector_norm(pos) > self.horizon:
            raise BeyondHorizonError('norm of relative position [%s] '
                                     'lies beyond the horizon [%s]!'
                                     % (vector_norm(pos), self.horizon))

        # check for phase
        if phase is None:
            phase = xrange(self.intra_v.size)
        if N.isscalar(phase):
            phase = xrange(phase, phase + 1)

        # call get_data implementation
        return self._get_data(pos, phase)
Example #27
0
    def transformSkyCoordinateErrors(self,
                                     phi,
                                     theta,
                                     sigPhiStar,
                                     sigTheta,
                                     rhoPhiTheta=0):
        """
        Converts the sky coordinate errors from one reference system to another, including the covariance
        term. Equations (1.5.4) and (1.5.20) from section 1.5 in the Hipparcos Explanatory Volume 1 are used.

        Parameters
        ----------

        phi         - The longitude-like angle of the position of the source (radians).
        theta       - The latitude-like angle of the position of the source (radians).
        sigPhiStar  - Standard error in the longitude-like angle of the position of the source (radians or
                      sexagesimal units, including cos(latitude) term)
        sigTheta    - Standard error in the latitude-like angle of the position of the source (radians or
                      sexagesimal units)

        Keywords (optional)
        -------------------

        rhoPhiTheta - Correlation coefficient of the position errors. Set to zero if this keyword is not
                      provided.

        Retuns
        ------
        
        sigPhiRotStar  - The transformed standard error in the longitude-like angle (including
                         cos(latitude) factor)
        sigThetaRot    - The transformed standard error in the latitude-like angle.
        rhoPhiThetaRot - The transformed correlation coefficient.
        """
        if isscalar(rhoPhiTheta) and not isscalar(sigTheta):
            rhoPhiTheta = zeros_like(sigTheta) + rhoPhiTheta
        c, s = self._getJacobian(phi, theta)
        cSqr = c * c
        sSqr = s * s
        covar = sigPhiStar * sigTheta * rhoPhiTheta
        varPhiStar = sigPhiStar * sigPhiStar
        varTheta = sigTheta * sigTheta
        varPhiStarRot = cSqr * varPhiStar + sSqr * varTheta + 2.0 * covar * c * s
        varThetaRot = sSqr * varPhiStar + cSqr * varTheta - 2.0 * covar * c * s
        covarRot = (cSqr - sSqr) * covar + c * s * (varTheta - varPhiStar)
        return sqrt(varPhiStarRot), sqrt(varThetaRot), covarRot / sqrt(
            varPhiStarRot * varThetaRot)
Example #28
0
 def set_n_los(self, los, losn=0, n=1):
     if isinstance(los, str): los = [los] * n
     if scipy.isscalar(losn): losn = [losn] * n
     los = [
         self.set_los(ilos + 1, los=los[ilos], n=losn[ilos])
         for ilos in range(n)
     ]
     return los
Example #29
0
 def _bestFound(self):
     """ return the best found evaluable and its associated fitness. """
     bestE = self.bestEvaluable.params.copy() if self._wasWrapped else self.bestEvaluable
     if self._wasOpposed and isscalar(self.bestEvaluation):
         bestF = -self.bestEvaluation
     else:
         bestF = self.bestEvaluation
     return bestE, bestF
 def _pdf(x):
     rlim = np.power(10, 0.2 * (mlim - x + 5))
     if isscalar(rlim):
         if rlim > rmax:
             rlim = rmax
     else:
         rlim[(rlim > rmax)] = rmax
     return (rlim**3 - rmin**3) / A * norm.pdf(x, loc=mu, scale=sigma)
Example #31
0
 def _bestFound(self):
     """ return the best found evaluable and its associated fitness. """
     bestE = self.bestEvaluable.params.copy() if self._wasWrapped else self.bestEvaluable
     if self._wasOpposed and isscalar(self.bestEvaluation):
         bestF = -self.bestEvaluation
     else:
         bestF = self.bestEvaluation
     return bestE, bestF
Example #32
0
 def jacobian(self, reference_point):
     jac = self._element.function_gradient(self._mesh_entity.vertex_coords(),
                                           reference_point)
     if sp.isscalar(jac):
         return sp.array([[jac]])
     elif sp.all(sp.array(jac.shape) == 1):
         return jac.reshape((1, 1))
     else:
         return jac
Example #33
0
File: stp.py Project: sivaven/brian
 def __init__(self, C, taud, tauf, U):
     if isinstance(C, DelayConnection):
         raise AttributeError, "STP does not handle heterogeneous connections yet."
     NetworkOperation.__init__(self, lambda:None, clock=C.source.clock)
     N = len(C.source)
     P = STPGroup(N, clock=C.source.clock)
     P.x = 1
     P.u = U
     P.ux = U
     if (isscalar(taud) & isscalar(tauf) & isscalar(U)):
         updater = STPUpdater(C.source, P, taud, tauf, U, delay=C.delay * C.source.clock.dt)
     else:
         updater = STPUpdater2(C.source, P, taud, tauf, U, delay=C.delay * C.source.clock.dt)
     self.contained_objects = [updater]
     C.source = P
     C.delay = 0
     C._nstate_mod = 0 # modulation of synaptic weights
     self.vars = P
Example #34
0
 def __coerce__(self, other):
     try:
         other = sp.asanyarray(other)
         if other.shape == self.shape or sp.isscalar(other):
             return (self.toarray(), other)
         else:
             return NotImplemented
     except:
         return NotImplemented
Example #35
0
 def __coerce__(self, other):
     try:
         other = sp.asanyarray(other)
         if other.shape == self.shape or sp.isscalar(other):
             return (self.toarray(), other)
         else:
             return NotImplemented
     except:
         return NotImplemented
Example #36
0
def discrete_data(net, params, pts, interval, vars=None, random=False,
                  uncert_func=typ_val_uncert(0.1, 1e-14)):
    """
    Return a set of data points for the given network generated at the given
    parameters.

    net         Network to generate data for
    params      Parameters for this evaluation of the network
    pts         Number of data points to output
    interval    Integration interval
    vars        Variables to output data for, defaults to all species in net
    random      If False data points are distributed evenly over interval
                If True they are spread randomly and uniformly over each
                variable
    uncert_func Function that takes in a trajectory and a variable id and
                returns what uncertainty should be assumed for that variable,
                either as a scalar or a list the same length as the trajectory.
    """
    # Default for vars
    if vars == None:
        vars = net.species.keys()

    # Assign observed times to each variable
    var_times = {}
    for var in vars:
        if random:
            var_times[var] = scipy.rand(pts) * (interval[1]-interval[0]) + interval[0]
        else:
            var_times[var] = scipy.linspace(interval[0], interval[1], pts)

    # Create a sorted list of the unique times in the var_times dict
    int_times = sets.Set(scipy.ravel(var_times.values()))
    int_times.add(0)
    int_times = list(int_times)
    int_times.sort()

    # Get the trajectory
    traj = Dynamics.integrate(net, int_times, params=params, fill_traj=False)

    # Build up our data dictionary
    data = {}; 
    for var, times in var_times.items():
        var_data = {}
        data[var] = var_data
        
        # Calculate our uncertainties
        var_uncerts = uncert_func(traj, var)
        for time in times:
            val = traj.get_var_val(var, time)
            if scipy.isscalar(var_uncerts):
                uncert = var_uncerts
            else:
                index = traj._get_time_index(time)
                uncert = var_uncerts[index]
            var_data[time] = (val, uncert)
    return data
def discrete_data(net, params, pts, interval, vars=None, random=False,
                  uncert_func=typ_val_uncert(0.1, 1e-14)):
    """
    Return a set of data points for the given network generated at the given
    parameters.

    net         Network to generate data for
    params      Parameters for this evaluation of the network
    pts         Number of data points to output
    interval    Integration interval
    vars        Variables to output data for, defaults to all species in net
    random      If False data points are distributed evenly over interval
                If True they are spread randomly and uniformly over each
                variable
    uncert_func Function that takes in a trajectory and a variable id and
                returns what uncertainty should be assumed for that variable,
                either as a scalar or a list the same length as the trajectory.
    """
    # Default for vars
    if vars is None:
        vars = net.species.keys()

    # Assign observed times to each variable
    var_times = {}
    for var in vars:
        if random:
            var_times[var] = scipy.rand(pts) * (interval[1]-interval[0]) + interval[0]
        else:
            var_times[var] = scipy.linspace(interval[0], interval[1], pts)

    # Create a sorted list of the unique times in the var_times dict
    int_times = sets.Set(scipy.ravel(var_times.values()))
    int_times.add(0)
    int_times = list(int_times)
    int_times.sort()

    # Get the trajectory
    traj = Dynamics.integrate(net, int_times, params=params, fill_traj=False)

    # Build up our data dictionary
    data = {}
    for var, times in var_times.items():
        var_data = {}
        data[var] = var_data

        # Calculate our uncertainties
        var_uncerts = uncert_func(traj, var)
        for time in times:
            val = traj.get_var_val(var, time)
            if scipy.isscalar(var_uncerts):
                uncert = var_uncerts
            else:
                index = traj._get_time_index(time)
                uncert = var_uncerts[index]
            var_data[time] = (val, uncert)
    return data
Example #38
0
	def __call__(self,k,pole,kind_interpol='linear'):
		if scipy.isscalar(k): k = [k]
		shape = (len(k))
		if not hasattr(self,'window'):
			return scipy.zeros(shape,dtype=self.TYPE_FLOAT)
		if pole in self:
			interpolated_window = interpolate.interp1d(self.k,self.window[self.index(pole)],kind=kind_interpol,bounds_error=False,fill_value=0.)
			return interpolated_window(k)
		self.logger.warning('Sorry, pole {} is not calculated.'.format(pole))
		return scipy.zeros(shape,dtype=self.TYPE_FLOAT)
Example #39
0
File: wcs.py Project: MCTwo/CodeCDF
def sky2pix(header,ra,dec):
	hdr_info = parse_header(header)
	if scipy.isscalar(ra):
		ra /= raddeg
		dec /= raddeg
	else:
		ra = ra.astype(scipy.float64)/raddeg
		dec = dec.astype(scipy.float64)/raddeg
	if hdr_info[3]=="DEC":
		ra0 = hdr_info[0][1]/raddeg
		dec0 = hdr_info[0][0]/raddeg
	else:
		ra0 = hdr_info[0][0]/raddeg
		dec0 = hdr_info[0][1]/raddeg

	phi = pi + arctan2(-1*cos(dec)*sin(ra-ra0),sin(dec)*cos(dec0)-cos(dec)*sin(dec0)*cos(ra-ra0))
	argtheta = sin(dec)*sin(dec0)+cos(dec)*cos(dec0)*cos(ra-ra0)
	if scipy.isscalar(argtheta):
		theta = arcsin(argtheta)
	else:
		argtheta[argtheta>1.] = 1.
		theta = arcsin(argtheta)

	if hdr_info[5]=="TAN":
		r_theta = raddeg/tan(theta)
		x = r_theta*sin(phi)
		y = -1.*r_theta*cos(phi)
	elif hdr_info[5]=="SIN":
		r_theta = raddeg*cos(theta)
		x = r_theta*sin(phi)
		y = -1.*r_theta*cos(phi)
	if hdr_info[3]=="DEC":
		a = x.copy()
		x = y.copy()
		y = a.copy()
	inv = linalg.inv(hdr_info[2])
	x0 = inv[0,0]*x + inv[0,1]*y
	y0 = inv[1,0]*x + inv[1,1]*y

	x = x0+hdr_info[1][0]-1
	y = y0+hdr_info[1][1]-1

	return x,y
Example #40
0
def sky2pix(header,ra,dec):
	hdr_info = parse_header(header)
	if scipy.isscalar(ra):
		ra /= raddeg
		dec /= raddeg
	else:
		ra = ra.astype(scipy.float64)/raddeg
		dec = dec.astype(scipy.float64)/raddeg
	if hdr_info[3]=="DEC":
		ra0 = hdr_info[0][1]/raddeg
		dec0 = hdr_info[0][0]/raddeg
	else:
		ra0 = hdr_info[0][0]/raddeg
		dec0 = hdr_info[0][1]/raddeg

	phi = pi + arctan2(-1*cos(dec)*sin(ra-ra0),sin(dec)*cos(dec0)-cos(dec)*sin(dec0)*cos(ra-ra0))
	argtheta = sin(dec)*sin(dec0)+cos(dec)*cos(dec0)*cos(ra-ra0)
	if scipy.isscalar(argtheta):
		theta = arcsin(argtheta)
	else:
		argtheta[argtheta>1.] = 1.
		theta = arcsin(argtheta)

	if hdr_info[5]=="TAN":
		r_theta = raddeg/tan(theta)
		x = r_theta*sin(phi)
		y = -1.*r_theta*cos(phi)
	elif hdr_info[5]=="SIN":
		r_theta = raddeg*cos(theta)
		x = r_theta*sin(phi)
		y = -1.*r_theta*cos(phi)
	if hdr_info[3]=="DEC":
		a = x.copy()
		x = y.copy()
		y = a.copy()
	inv = linalg.inv(hdr_info[2])
	x0 = inv[0,0]*x + inv[0,1]*y
	y0 = inv[1,0]*x + inv[1,1]*y

	x = x0+hdr_info[1][0]-1
	y = y0+hdr_info[1][1]-1

	return x,y
Example #41
0
 def run_mncontour(self, mncontour={}):
     params = mncontour.get('params', [])
     npoints = mncontour.get('npoints', 20)
     nsigmas = mncontour.get('nsigmas', 1)
     ndof = mncontour.get('ndof', 1)
     if scipy.isscalar(npoints): npoints = [npoints] * len(params)
     if scipy.isscalar(nsigmas) or len(nsigmas) != len(params):
         nsigmas = [nsigmas] * len(params)
     nsigmas = [[nsigma] if scipy.isscalar(nsigma) else nsigma
                for nsigma in nsigmas]
     for par, npoint, nsigma in zip(params, npoints, nsigmas):
         for sigma in nsigma:
             kwargs = dict(numpoints=npoint,
                           sigma=scipy.sqrt(
                               nsigmas_to_deltachi2(sigma, ndof=ndof)))
             self.logger.info('Profiling {} with kwargs {}.'.format(
                 par, kwargs))
             xy = self.minuit.mncontour(par[0], par[1], **kwargs)[-1]
             if par not in self.contour: self.contour[par] = {}
             self.contour[par][sigma] = scipy.array(xy).T
Example #42
0
 def __mul__(self, other):
     if sp.isscalar(other):
         return simple_diag_matrix(sp.ones(self.shape[0], self.dtype) * other)
     
     try:
         if other.shape == self.shape:
             return simple_diag_matrix(other.diagonal())
     except:
         return NotImplemented
     
     return self.toarray() * other
Example #43
0
 def inverse_jacobian(self, reference_point):
     jac = self.jacobian(reference_point)
     if sp.isscalar(jac):
         return sp.array([[1 / jac]])
     elif sp.all(sp.array(jac.shape) == 1):
         return 1 / jac.reshape((1, 1))
     elif len(jac.shape) == 1 or jac.shape[0] == 1 or jac.shape[1] == 1:
         raise NotImplementedError("Maybe a projection approach is needed here.")
     elif jac.shape[0] == jac.shape[1]:
         print(jac.shape, jac)
         return spl.inv(jac)
Example #44
0
	def __call__(self,s,pole,kind_interpol='linear'):
		ell1,ell2 = pole
		s = [[s_] if scipy.isscalar(s_) else s_ for s_ in s]
		shape = map(len,s)
		if not hasattr(self,'window'):
			return scipy.zeros(shape,dtype=self.TYPE_FLOAT)
		if pole in self:
			interpolated_window = interpolate.interp2d(self.s[0],self.s[-1],self.window[self.index(pole)].T,kind=kind_interpol,bounds_error=False,fill_value=0.)
			return interpolated_window(*s).T
		self.logger.warning('Sorry, pole {} is not calculated.'.format(pole))
		return scipy.zeros(shape,dtype=self.TYPE_FLOAT)
Example #45
0
    def getclosest(self, coords, timelist=None):
        """
        This method will get the closest set of parameters in the coordinate space. It will return
        the parameters from all times.
        Input
        coords - A list of x,y and z coordinates.
        Output
        paramout - A NtxNp array from the closes output params
        sphereout - A Nc length array The sphereical coordinates of the closest point.
        cartout -  Cartisian coordinates of the closes point.
        distance - The spatial distance between the returned location and the 
            desired location.
        minidx - The spatial index point.
        tvec - The times of the returned data. 
        """
        X_vec = self.Cart_Coords[:, 0]
        Y_vec = self.Cart_Coords[:, 1]
        Z_vec = self.Cart_Coords[:, 2]

        xdiff = X_vec - coords[0]
        ydiff = Y_vec - coords[1]
        zdiff = Z_vec - coords[2]
        distall = xdiff**2 + ydiff**2 + zdiff**2
        minidx = np.argmin(distall)
        paramout = self.Param_List[minidx]
        velout = self.Velocity[minidx]
        datatime = self.Time_Vector
        tvec = self.Time_Vector
        if sp.ndim(self.Time_Vector) > 1:
            datatime = datatime[:, 0]

        if isinstance(timelist, list):
            timelist = sp.array(timelist)
        if timelist is not None:
            timeindx = []
            for itime in timelist:
                if sp.isscalar(itime):
                    timeindx.append(sp.argmin(sp.absolute(itime - datatime)))
                else:
                    # look for overlap
                    log1 = (tvec[:, 0] >= itime[0]) & (tvec[:, 0] < itime[1])
                    log2 = (tvec[:, 1] > itime[0]) & (tvec[:, 1] <= itime[1])
                    log3 = (tvec[:, 0] <= itime[0]) & (tvec[:, 1] > itime[1])
                    log4 = (tvec[:, 0] > itime[0]) & (tvec[:, 1] < itime[1])
                    tempindx = sp.where(log1 | log2 | log3 | log4)[0]

                    timeindx = timeindx + tempindx.tolist()
            paramout = paramout[timeindx]
            velout = velout[timeindx]
            tvec = tvec[timeindx]
        sphereout = self.Sphere_Coords[minidx]
        cartout = self.Cart_Coords[minidx]
        return (paramout, velout, sphereout, cartout, np.sqrt(distall[minidx]),
                minidx, tvec)
Example #46
0
    def transformSkyCoordinateErrors(self, phi, theta, sigPhiStar, sigTheta, rhoPhiTheta=0):
        """
        Converts the sky coordinate errors from one reference system to another, including the covariance
        term. Equations (1.5.4) and (1.5.20) from section 1.5 in the Hipparcos Explanatory Volume 1 are used.

        Parameters
        ----------

        phi         - The longitude-like angle of the position of the source (radians).
        theta       - The latitude-like angle of the position of the source (radians).
        sigPhiStar  - Standard error in the longitude-like angle of the position of the source (radians or
                      sexagesimal units, including cos(latitude) term)
        sigTheta    - Standard error in the latitude-like angle of the position of the source (radians or
                      sexagesimal units)

        Keywords (optional)
        -------------------

        rhoPhiTheta - Correlation coefficient of the position errors. Set to zero if this keyword is not
                      provided.

        Retuns
        ------
        
        sigPhiRotStar  - The transformed standard error in the longitude-like angle (including
                         cos(latitude) factor)
        sigThetaRot    - The transformed standard error in the latitude-like angle.
        rhoPhiThetaRot - The transformed correlation coefficient.
        """
        if isscalar(rhoPhiTheta) and not isscalar(sigTheta):
            rhoPhiTheta=zeros_like(sigTheta)+rhoPhiTheta
        c, s = self._getJacobian(phi,theta)
        cSqr = c*c
        sSqr = s*s
        covar = sigPhiStar*sigTheta*rhoPhiTheta
        varPhiStar = sigPhiStar*sigPhiStar
        varTheta = sigTheta*sigTheta
        varPhiStarRot = cSqr*varPhiStar+sSqr*varTheta+2.0*covar*c*s
        varThetaRot = sSqr*varPhiStar+cSqr*varTheta-2.0*covar*c*s
        covarRot = (cSqr-sSqr)*covar+c*s*(varTheta-varPhiStar)
        return sqrt(varPhiStarRot), sqrt(varThetaRot), covarRot/sqrt(varPhiStarRot*varThetaRot)
Example #47
0
 def subvolume(self, ranges=[[0, 1], [0, 1], [0, 1]]):
     if scipy.isscalar(ranges[0]): ranges = [ranges] * 3
     position = self.Position
     mask = self.trues()
     for i, r in enumerate(ranges):
         mask &= (position[:, i] >= r[0]) & (position[:, i] <= r[1])
     new = self[mask]
     new.BoxSize = scipy.diff(ranges, axis=-1)[:, 0]
     new._boxcenter = scipy.array(
         [r[0] for r in ranges], dtype=scipy.float64
     ) + new.BoxSize / 2. + self._boxcenter - self._translation
     return new
Example #48
0
    def getclosest(self,coords,timelist=None):
        """
        This method will get the closest set of parameters in the coordinate space. It will return
        the parameters from all times.
        Input
        coords - A list of x,y and z coordinates.
        Output
        paramout - A NtxNp array from the closes output params
        sphereout - A Nc length array The sphereical coordinates of the closest point.
        cartout -  Cartisian coordinates of the closes point.
        distance - The spatial distance between the returned location and the
            desired location.
        minidx - The spatial index point.
        tvec - The times of the returned data.
        """
        X_vec = self.Cart_Coords[:,0]
        Y_vec = self.Cart_Coords[:,1]
        Z_vec = self.Cart_Coords[:,2]

        xdiff = X_vec -coords[0]
        ydiff = Y_vec -coords[1]
        zdiff = Z_vec -coords[2]
        distall = xdiff**2+ydiff**2+zdiff**2
        minidx = np.argmin(distall)
        paramout = self.Param_List[minidx]
        velout = self.Velocity[minidx]
        datatime = self.Time_Vector
        tvec = self.Time_Vector
        if sp.ndim(self.Time_Vector)>1:
            datatime = datatime[:,0]

        if isinstance(timelist,list):
            timelist=sp.array(timelist)
        if timelist is not None:
            timeindx = []
            for itime in timelist:
                if sp.isscalar(itime):
                    timeindx.append(sp.argmin(sp.absolute(itime-datatime)))
                else:
                    # look for overlap
                    log1 = (tvec[:,0]>=itime[0]) & (tvec[:,0]<itime[1])
                    log2 = (tvec[:,1]>itime[0]) & (tvec[:,1]<=itime[1])
                    log3 = (tvec[:,0]<=itime[0]) & (tvec[:,1]>itime[1])
                    log4 = (tvec[:,0]>itime[0]) & (tvec[:,1]<itime[1])
                    tempindx = sp.where(log1|log2|log3|log4)[0]

                    timeindx = timeindx +tempindx.tolist()
            paramout=paramout[timeindx]
            velout=velout[timeindx]
            tvec = tvec[timeindx]
        sphereout = self.Sphere_Coords[minidx]
        cartout = self.Cart_Coords[minidx]
        return (paramout,velout,sphereout,cartout,np.sqrt(distall[minidx]),minidx,tvec)
Example #49
0
 def BlockInd2SubWithoutBand(self,ind):
     if sp.isscalar(ind):
         if ind < 0 or ind > self.numTotalBlocks:
             raise exceptions.IndexError('BlockInd2SubWithouBand')
     else:
         if ind.any() < 0 or ind.any() >self.numTotalBlocks:
             raise exceptions.IndexError('BlockInd2SubWithouBand')
     sub = []
     for i in range(0,self.Dim):
         sub.append( ind%self.M )
         ind //= self.M
     return a(sub,dtype='int')
Example #50
0
def plot_density_contour(ax,
                         x,
                         y,
                         pdf,
                         bins=[20, 20],
                         nsigmas=2,
                         ndof=2,
                         color='blue',
                         lighten=-0.5,
                         **contour_kwargs):

    from scipy import stats, optimize
    """Create a density contour plot.

	Parameters
	----------
	x,y,pdf : 2d density 
	bins : bins
	ax : matplotlib.Axes
		plots the contour to this axis.
	contour_kwargs : dict
		kwargs to be passed to pyplot.contourf()

	"""
    if scipy.isscalar(nsigmas): nsigmas = 1 + scipy.arange(nsigmas)
    pdf = pdf / pdf.sum()

    def find_confidence_interval(x, confidence_level):
        return pdf[pdf > x].sum() - confidence_level

    to_quantiles = nsigmas_to_quantiles_2d if ndof == 1 else nsigmas_to_quantiles_1d
    levels = [
        optimize.brentq(find_confidence_interval,
                        0.,
                        1.,
                        args=(to_quantiles(nsigma))) for nsigma in nsigmas
    ]
    if isinstance(color, list):
        colors = color
    else:
        colors = [color]
        for icol in nsigmas[:-1]:
            colors.append(lighten_color(colors[-1], amount=lighten))

    contour = ax.contourf(x,
                          y,
                          pdf.T,
                          levels=levels[::-1] + [pdf.max() + 1.],
                          colors=colors,
                          **contour_kwargs)

    return contour
Example #51
0
    def __mul__(self, other):
        if sp.isscalar(other):
            return simple_diag_matrix(self.diag * other)

        try:
            other = sp.asanyarray(other)

            if other.shape == self.shape:
                return simple_diag_matrix(self.diag * other.diagonal())

            return self.toarray() * other
        except:
            return NotImplemented
Example #52
0
 def __mul__(self, other):
     if sp.isscalar(other):
         return simple_diag_matrix(self.diag * other)
     
     try:
         other = sp.asanyarray(other)
 
         if other.shape == self.shape:
             return simple_diag_matrix(self.diag * other.diagonal())
         
         return self.toarray() * other
     except:
         return NotImplemented
Example #53
0
    def process(self, phi_mat, obs_vec):
        """
        Solve optimization problem. 

        :param phi_mat: Dictionary :math:`\Phi\in\mathbb{R}^{m\\times n}`
        :type phi_mat: numpy array
        :param obs_vec: Observation vector :math:`y\in\mathbb{R}^{m}`
        :type obs_vec: numpy array
        :return: `x_hat`: estimated state vector :math:`x\in\mathbb{R}^{n}`
        :rtype:  numpy array
        :return: `current_it`: number of iterations algorithm is run
        :rtype:  int     
        """        
        nsamp = phi_mat.shape[1]
        resid = obs_vec
        indices = [] 
        current_it = 0
        norm_r = norm(resid)

        while norm_r > self.tol and current_it < self.max_iter:
            proxy = dot(phi_mat.transpose(), resid) # compute proxy
            gamma = argmax(absolute(proxy)) # find index with largest value
            indices.append(gamma) # update indices

            phi_curr = phi_mat[:, indices].copy() # select columns from phi_mat 

            x_hat_sub = zeros((len(indices), 1))
            x_hat = zeros((nsamp, 1))

            if len(indices) is 1: # in this case, need to solve least squares yourself
                x_hat_sub = vdot(phi_curr, phi_curr)*vdot(phi_curr, obs_vec)
            else:            
                x_hat_sub = lstsq(phi_curr, obs_vec)[0]
                
            index = 0
   
            for i in indices:   
                if isscalar(x_hat_sub):
                    x_hat[i] = x_hat_sub
                else: 
                    x_hat[i] = x_hat_sub[index]
                index = index + 1

            resid = obs_vec - dot(phi_mat, x_hat) # calculate new residual 
            norm_r = norm(resid)
            current_it = current_it + 1 

            self.x_hat = x_hat
            
        return x_hat, current_it
Example #54
0
File: pdf.py Project: ayr0/StatLab
def betapdf(X,a,b):
    #operate only on x in (0,1)
    if isscalar(X):
        if X<=0 or X>=1:
            raise ValueError("X must be in the interval (0,1)")
        else:
            x=X
    else:
        goodx = logical_and(X>0, X<1)
        x = X[goodx].copy()

    loga = (a-1.0)*log(x)
    logb = (b-1.0)*log(1.0-x)

    return exp(gammaln(a+b)-gammaln(a)-gammaln(b)+loga+logb)
Example #55
0
 def jacobian_det(self, reference_point):
     jac = self.jacobian(reference_point)
     if sp.isscalar(jac):
         return jac
     elif sp.all(sp.array(jac.shape) == 1):
         # an array with only one entry
         return sp.asscalar(jac)
     elif len(jac.shape) == 1 or jac.shape[0] == 1 or jac.shape[1] == 1:
         return sp.linalg.norm(jac)
     elif jac.shape[0] == jac.shape[1]:
         # a square matrix
         return spl.det(jac)
     else:
         raise NotImplementedError("Computing Jacobian \"determinant\" not implemented for shpae=({0:d}, {1:d})"
                                   .format(jac.shape))
def mono(lam, mrr, mri, r, lmax=None, **kwds):
    """ 
    Define a general purpose mono-disperse scattering function 
    """
    # call mono_dense if r is a scalar
    if sp.isscalar(r):
        out = mono_dense(lam, mrr, mri, r)
        if ~(lmax is None):
            out.reshape_lmax(lmax)
        return out
    # assume r is a vector and call mono_dense_array
    else:
        # Check that lmax is specified for passing a vector of radii
        if lmax is None:
            raise ValueError("Specify lmax when passing a vector for 'r'")
        return mono_dense_array(lam, mrr, mri, r, lmax, **kwds)
Example #57
0
    def __init__(self, value):
        """
        :type value: scalar dtype
        :param value: single digit value
        """

        super(MRScalar, self).__init__()

        value_ = value
        if sp.isscalar(value_):
            value_ = sp.asanyarray(value_)
        try:
            assert value_.ndim == 0
        except:
            raise ValueError('%s is not a scalar!' % value)
        self._value = value_