예제 #1
0
def find_ypic_sens(y, yp, time, var_types, rtol, atol_for_sens, constants, 
                   net, opt_var, redirect_msgs=False):
    # On some systems, the f2py'd functions don't like len(constants)=0.
    if len(constants) == 0:
        constants = [0]
    var_types = scipy.asarray(var_types)
    y = scipy.asarray(y, scipy.float_)
    yp = scipy.asarray(yp, scipy.float_)
    atol_for_sens = scipy.asarray(atol_for_sens)

    N_dyn = len(var_types)
    y = copy.copy(y)
    yp = copy.copy(yp)

    # Find the initial conditions for the normal variables
    y_norm, yp_norm = find_ics(y[:N_dyn], yp[:N_dyn], time,
                               var_types, rtol, atol_for_sens[:N_dyn],
                               net.constantVarValues, net, 
                               redirect_msgs=redirect_msgs)
    # Copy the updated values into our y and yp arrays
    y[:N_dyn] = y_norm
    yp[:N_dyn] = yp_norm
    
    # Now we can solve for yp for all the *non-algebraic* sensitivity variables
    # Notice that they are simply the appropriate residual evaluated when
    #  yp for the sens variable is zero.
    yp[N_dyn:][var_types == 1] = 0
    res = net.sens_rhs(time, y, yp, constants)
    yp[N_dyn:][var_types == 1] = res[N_dyn:][var_types == 1]

    return yp
예제 #2
0
def evaluations(ty, pv, useScipy = True):
	"""
	evaluations(ty, pv, useScipy) -> (ACC, MSE, SCC)
	ty, pv: list, tuple or ndarray
	useScipy: convert ty, pv to ndarray, and use scipy functions for the evaluation

	Calculate accuracy, mean squared error and squared correlation coefficient
	using the true values (ty) and predicted values (pv).
	"""
	if scipy != None and useScipy:
		return evaluations_scipy(scipy.asarray(ty), scipy.asarray(pv))
	if len(ty) != len(pv):
		raise ValueError("len(ty) must be equal to len(pv)")
	total_correct = total_error = 0
	sumv = sumy = sumvv = sumyy = sumvy = 0
	for v, y in zip(pv, ty):
		if y == v:
			total_correct += 1
		total_error += (v-y)*(v-y)
		sumv += v
		sumy += y
		sumvv += v*v
		sumyy += y*y
		sumvy += v*y
	l = len(ty)
	ACC = 100.0*total_correct/l
	MSE = total_error/l
	try:
		SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
	except:
		SCC = float('nan')
	return (float(ACC), float(MSE), float(SCC))
예제 #3
0
def sinc_interp1d(x, s, r):
    """Interpolates `x`, sampled at times `s`
    Output `y` is sampled at times `r`

    inspired from from Matlab:
    http://phaseportrait.blogspot.com/2008/06/sinc-interpolation-in-matlab.html

    :param ndarray x: input data time series
    :param ndarray s: input sampling time series (regular sample interval)
    :param ndarray r: output sampling time series
    :return ndarray: output data time series (regular sample interval)
    """

    # init
    s = sp.asarray(s)
    r = sp.asarray(r)
    x = sp.asarray(x)
    if x.ndim == 1:
        x = sp.atleast_2d(x)
    else:
        if x.shape[0] == len(s):
            x = x.T
        else:
            if x.shape[1] != s.shape[0]:
                raise ValueError('x and s must be same temporal extend')
    if sp.allclose(s, r):
        return x.T
    T = s[1] - s[0]

    # resample
    sincM = sp.tile(r, (len(s), 1)) - sp.tile(s[:, sp.newaxis], (1, len(r)))
    return sp.vstack([sp.dot(xx, sp.sinc(sincM / T)) for xx in x]).T
예제 #4
0
파일: brdf.py 프로젝트: marrabld/dimitripy
    def calc_roujean_coeffs(self, sun_zenith, sensor_zenith, relative_azimuth, reflectance):
        """
        Calculates the Roujean coefficients k0, k1 and k2

        :param sun_zenith: <numpy> array of sun zenith angles in radians.
        :param sensor_zenith: <numpy> array of sensor zenith angles in radians
        :param relative_azimuth: <numpy> array of relative (sun/sensor) azimuth angles in radians.
        :param reflectance: <numpy> array of reflectance (TOA in the case of dimitripy)
        return: k_coeff.T, residual, rank, singular_values: <numpy>
        """

        # Remove any values that have -999 for the reflectance.
        idx = reflectance != -999

        lg.debug('Calculating kernel functions')
        f_matrix = scipy.ones((reflectance[idx].shape[0], 3))  # There are 3 k_coeffs
        f_matrix[:, 1] = self.calc_kernel_f1(sun_zenith[idx], sensor_zenith[idx], relative_azimuth[idx])
        f_matrix[:, 2] = self.calc_kernel_f2(sun_zenith[idx], sensor_zenith[idx], relative_azimuth[idx])

        lg.debug('Inverting for K coeffs')
        try:
            #k_coeff = scipy.dot(f_matrix.T, f_matrix)
            #k_coeff = scipy.linalg.inv(k_coeff)
            #k_coeff = scipy.dot(k_coeff, f_matrix.T)
            #k_coeff = scipy.dot(k_coeff, reflectance.T)

            k_coeff, residual, rank, singular_values = scipy.linalg.lstsq(f_matrix, reflectance[idx].T)
        except:
            lg.exception("Couldn't find k_coeff setting to -999")
            k_coeff = scipy.asarray([-999, -999, -999])  # Right thing to do?
            residual = rank = singular_values = scipy.asarray([-999, -999, -999])

        return k_coeff.T, residual, rank, singular_values
예제 #5
0
  def __init__(self, mixcoeffs, means, covs):
    self.mixcoeffs = scipy.asarray(mixcoeffs)
    self.means = scipy.asarray(means)
    self.covs = scipy.asarray(covs)

    self.degree = self.mixcoeffs.shape[0]
    self.dim = self.covs.shape[1]
예제 #6
0
    def set_data(self, data, events=None, gtruth=None):
        """update the plot with new chunk of data

        :Parameters:
            data : ndarray
                A 1d array with data to show.
            events : ndarray
                A 1d array of event times to show.
        """

        # data
        self._data.setData(N.arange(data.size), data)

        # events
        if events is None:
            ev = N.zeros(0)
        else:
            ev = N.asarray(events)
        self._events.setData(ev, N.ones(ev.size) * self.range)

        # gtruth
        if gtruth is None:
            gt = N.zeros(0)
        else:
            gt = N.asarray(list(gtruth))
        self._gtruth.setData(gt, N.ones(gt.size) * -self.range)

        # replot
        if self.replot_on_update is True:
            self.replot()
예제 #7
0
    def test_Epicentral(self):
        dist = Distances(None,None,None,None,None,None,None,None,None,None,None)
        
        distance_type='Epicentral'

        rupture_centroid_lat=asarray((-31.0))
        rupture_centroid_lon=asarray((116.0))
        
        site_lat=asarray((-31,-32,-33,-34))        
        site_lon=asarray((116.0,116.0,116.0,116.0))

        distance=dist.raw_distances(site_lat,
                                    site_lon,
                                    rupture_centroid_lat,
                                    rupture_centroid_lon,
                                    lengths,
                                    azimuths,
                                    widths,
                                    dips,
                                    depths,
                                    depths_to_top,
                                    distance_type,
                                    projection)


        d=asarray((0,1,2,3))*(1.852*60)
        d=d[:,newaxis]
        d2=As_The_Cockey_Flies(rupture_centroid_lat,
                               rupture_centroid_lon,
                               site_lat,
                               site_lon)

        assert allclose(d,distance,rtol=0.001)
        assert allclose(d,d2)
예제 #8
0
def trimesh(vertices, indices, labels=False):
    """
    Plot a 2D triangle mesh
    """
    from scipy import asarray
    from matplotlib import collections
    from pylab import gca, axis, text
    from numpy import average
    
    vertices,indices = asarray(vertices),asarray(indices)

    #3d tensor [triangle index][vertex index][x/y value]
    triangles = vertices[indices.ravel(),:].reshape((indices.shape[0],3,2))
    
    col = collections.PolyCollection(triangles)
    col.set_facecolor('grey')
    col.set_alpha(0.5)
    col.set_linewidth(1)

    sub = gca()
    sub.add_collection(col,autolim=True)
    axis('off')
    sub.autoscale_view()

    if labels:
        barycenters = average(triangles,axis=1)
        for n,bc in enumerate(barycenters):
            text(bc[0], bc[1], str(n), {'color' : 'k', 'fontsize' : 8,
                                        'horizontalalignment' : 'center',
                                        'verticalalignment' : 'center'})
예제 #9
0
파일: utils.py 프로젝트: versae/transfer
def draw_regions(original_image, mask_image, outline=["green", "blue", "red"],
                 fill=[None, None, None], mode_pass=[False, False, False]):
    heigth = original_image.size[1]
    regions = find_regions(mask_image)
    filtered_regions = filter_regions(regions,
                                      noise_heigth=NOISE_FACTOR * heigth)
    out = original_image.convert("RGB")
    for i, region in enumerate(filtered_regions):
        if mode_pass[i]:
            widths = scipy.asarray([f.width() for f in filtered_regions[i]])
            heigths = scipy.asarray([f.heigth() for f in filtered_regions[i]])
            mode_width = float(scipy.stats.mstats.mode(widths)[0])
            mode_heigth = float(scipy.stats.mstats.mode(heigths)[0])
        if outline[i] or fill[i]:
            draw = ImageDraw.Draw(out)
            for r in filtered_regions[i]:
                if (not mode_pass[i]
                    or (mode_pass[i]
                        and (mode_width - mode_pass[i] <= r.width() \
                             <= mode_width + mode_pass[i]
                             or mode_heigth - mode_pass[i] <= r.heigth() \
                             <= mode_heigth + mode_pass[i]))):
                    draw.rectangle(r.box(), outline=outline[i], fill=fill[i])
            del draw
    return out
예제 #10
0
 def test_As_The_Cockey_Flies(self):
     # Test data from GA website 
     # As_The_Cockey_Flies implements the Great Circle method
     # 
     # http://www.ga.gov.au/earth-monitoring/geodesy/geodetic-techniques/distance-calculation-algorithms.html
     rupture_centroid_lat = asarray((-30))
     rupture_centroid_lon = asarray((150))
     
     site_lat = asarray((-31,-31,-32,-33,-34,-35,-40,-50,-60,-70,-80))
     site_lon = asarray((150,151,151,151,151,151,151,151,151,151,151))
     
     expected = asarray([[111.120],
                         [146.677],
                         [241.787],
                         [346.556],
                         [454.351],
                         [563.438],
                         [1114.899],
                         [2223.978],
                         [3334.440],
                         [4445.247],
                         [5556.190]])
     
     d = As_The_Cockey_Flies(rupture_centroid_lat,
                             rupture_centroid_lon,
                             site_lat,
                             site_lon)
     
     assert allclose(d,expected)
    def ref_indicator(self, coord):
        """
        Return the value of the indicator for the reference coordinates if appropriate.

        NOTE: Currently we will simply implement the reference discretization as non-overlapping boxes.
        """
        # create a distance vector
        distancevec = sp.asarray(coord) - sp.asarray(self.ref_center)

        # if any collective variable is periodic, construct dr, the adjuct for minimum image convetion for the periodic cv's
        if self.wrapping is not None:

            # build dr
            dr = np.zeros(distancevec.shape)

            # add values to dr if the CV wraps
            for i in xrange(len(self.wrapping)):
                if self.wrapping[i] != 0.0:
                    # This is an old trick from MD codes to find the minimum distance between two points.
                    dr[i] = self.wrapping[i] * np.rint(distancevec[i]/self.wrapping[i])

            # add min image vector
            distancevec -= dr

        # We return 1.0 if all the distances are smaller than the width of the box from the center, 0.0 otherwise.
        return float(np.prod(self.ref_width > np.abs(distancevec)))
예제 #12
0
def xcorrv(a, b=None, lag=None, dtype=None):
    """vectorial cross correlation by taking the expectation over an outer product"""

    # checks
    a = sp.asarray(a)
    b = sp.asarray(b or a)
    if not (a.ndim == b.ndim):
        raise ValueError('a.ndim !== b.ndim')

    #if a.size != b.size:
    #    raise ValueError('a.size != b.size')
    #if a.size < 2:
    #    raise ValueError('a.size < 2')

    if lag is None:
        lag = int(a.shape[0] - 1)
    if lag > a.shape[0] - 1:
        raise ValueError('lag > vector len - 1')

    # init
    lag_range = xrange(int(-lag), int(lag) + 1)
    rval = sp.empty((a.shape[1], b.shape[1], len(lag_range)), dtype=dtype or a.dtype)

    # calc
    for tau in lag_range:
        prod = a.T[:, None, max(0, +tau):min(len(a), len(a) + tau)] * \
               b.T[None, :, max(0, -tau):min(len(b), len(b) - tau)].conj()
        rval[..., lag + tau] = prod.mean(axis=-1)

    # return
    return rval
예제 #13
0
    def test2_raster_data_at_points(self):
        # Write a file to test
        f = tempfile.NamedTemporaryFile(suffix='.aai',
                                        prefix='test_misc',
                                        delete=False)
        f.write('ncols 3   \r\n')
        f.write('nrows 2 \r\n')
        f.write('xllcorner +0.   \r\n')
        f.write('yllcorner +8. \r\n')
        f.write('cellsize 1   \r\n')
        f.write('NODATA_value -9999 \r\n')
        f.write('1 2 -9999   \r\n')
        f.write('4 5 6')
        f.close()
        # lon 0 - 3
        # lat 8 - 10

        # Just outside the midpoint of all sides
        lon = asarray([-0.0001, 1.5, 3.0001, 1.5])
        lat = asarray([9., 10.00001, 9.0, 7.99999])
        raster = Raster.from_file(f.name)
        data = raster.raster_data_at_points(lon, lat)
        self.assertTrue(numpy.all(numpy.isnan(data)))

        # Inside lower left corner of No data cell

        lon = asarray([2.0001])
        lat = asarray([9.000019])
        raster = Raster.from_file(f.name)
        data = raster.raster_data_at_points(lon, lat)
        self.assertTrue(numpy.all(numpy.isnan(data)))

        os.remove(f.name)
예제 #14
0
파일: warping.py 프로젝트: markchil/gptools
 def __call__(self, X, d, n, *args):
     """Evaluate the I-spline warping function.
     
     Parameters
     ----------
     X : array, (`M`,)
         `M` inputs from dimension `d`.
     d : non-negative int
         The index (starting from zero) of the dimension to apply the warping
         to.
     n : non-negative int
         The derivative order to compute.
     *args : scalar flots
         The remaining parameters to describe the warping, given as scalars.
         These are given as the knots followed by the coefficients, for each
         dimension. Note that these must ALL be provided for each call.
     """
     X = scipy.asarray(X, dtype=float)
     args = scipy.asarray(args, dtype=float)
     try:
         iter(self.nt)
     except TypeError:
         nt = self.nt * scipy.ones(d + 1)
     else:
         nt = self.nt
     i = 0
     for j in range(0, d):
         i += 2 * nt[j] + self.k - 2
     t = args[i:i + nt[d]]
     # No DC offset for the mapping, always map the origin to 0:
     C = scipy.concatenate(([0.0,], args[i + nt[d]:i + 2 * nt[d] + self.k - 2]))
     return spev(t, C, self.k, X, n=n, I_spline=True)
예제 #15
0
    def test_Epicentral2(self):
        dist = Distances(None,None,None,None,None,None,None,None,None,None,None)
        
        distance_type='Epicentral'
        rupture_centroid_lat=asarray((-31.0,-33,-36))
        rupture_centroid_lon=asarray((116.0,118,222))
        

        site_lat=asarray((-31,-32,-33,-34))        
        site_lon=asarray((116.0,116.0,116.0,116.0))

        distance=dist.raw_distances(site_lat,
                                    site_lon,
                                    rupture_centroid_lat,
                                    rupture_centroid_lon,
                                    lengths,
                                    azimuths,
                                    widths,
                                    dips,
                                    depths,
                                    depths_to_top,
                                    distance_type,
                                    projection)
        

        
        d = As_The_Cockey_Flies(rupture_centroid_lat,
                                rupture_centroid_lon,
                                site_lat,
                                site_lon)

        assert allclose(d,distance,rtol=0.1)
예제 #16
0
파일: core.py 프로젝트: pennajm/gptools
 def __call__(self, Xi, Xj, ni, nj, hyper_deriv=None, symmetric=False):
     """Evaluate the covariance between points `Xi` and `Xj` with derivative order `ni`, `nj`.
     
     Parameters
     ----------
     Xi : :py:class:`Matrix` or other Array-like, (`M`, `N`)
         `M` inputs with dimension `N`.
     Xj : :py:class:`Matrix` or other Array-like, (`M`, `N`)
         `M` inputs with dimension `N`.
     ni : :py:class:`Matrix` or other Array-like, (`M`, `N`)
         `M` derivative orders for set `i`.
     nj : :py:class:`Matrix` or other Array-like, (`M`, `N`)
         `M` derivative orders for set `j`.
     hyper_deriv : Non-negative int or None, optional
         The index of the hyperparameter to compute the first derivative
         with respect to. If None, no derivatives are taken. Hyperparameter
         derivatives are not supported at this point. Default is None.
     symmetric : bool, optional
         Whether or not the input `Xi`, `Xj` are from a symmetric matrix.
         Default is False.
     
     Returns
     -------
     Kij : :py:class:`Array`, (`M`,)
         Covariances for each of the `M` `Xi`, `Xj` pairs.
     
     Raises
     ------
     NotImplementedError
         If the `hyper_deriv` keyword is not None.
     """
     if hyper_deriv is not None:
         raise NotImplementedError("Hyperparameter derivatives have not been implemented!")
     n_cat = scipy.asarray(scipy.concatenate((ni, nj), axis=1), dtype=int)
     X_cat = scipy.asarray(scipy.concatenate((Xi, Xj), axis=1), dtype=float)
     n_cat_unique = unique_rows(n_cat)
     k = scipy.zeros(Xi.shape[0], dtype=float)
     # Loop over unique derivative patterns:
     if self.num_proc > 1:
         pool = multiprocessing.Pool(processes=self.num_proc)
     for n_cat_state in n_cat_unique:
         idxs = scipy.where(scipy.asarray((n_cat == n_cat_state).all(axis=1)).squeeze())[0]
         if (n_cat_state == 0).all():
             k[idxs] = self.cov_func(Xi[idxs, :], Xj[idxs, :], *self.params)
         else:
             if self.num_proc > 1 and len(idxs) > 1:
                 k[idxs] = scipy.asarray(
                     pool.map(_ArbitraryKernelEval(self, n_cat_state), X_cat[idxs, :]),
                     dtype=float
                 )
             else:
                 for idx in idxs:
                     k[idx] = mpmath.chop(mpmath.diff(self._mask_cov_func,
                                                      X_cat[idx, :],
                                                      n=n_cat_state,
                                                      singular=True))
     
     if self.num_proc > 0:
         pool.close()
     return k
예제 #17
0
파일: test_interp.py 프로젝트: dynaryu/eqrm
 def test_interp(self):
     functx = asarray([1,2])
     functy = asarray([10,20])
     x = asarray([0.5,1.5,2.5])
     y = interp(x, functy, functx)
     y_ans = x * 10.0
     self.assert_(allclose(y, y_ans, rtol=0.05))
예제 #18
0
    def __init__(self, pga_bins, moment_magnitude_bins, periods,
                 log_amplifications, log_stds):

        pga_bins = asarray(pga_bins)
        moment_magnitude_bins = asarray(moment_magnitude_bins)
        periods = asarray(periods)
        for site_class in log_amplifications.keys():
            log_amplifications[site_class] = asarray(
                log_amplifications[site_class])
            log_stds[site_class] = asarray(log_stds[site_class])

        self.pga_bins = pga_bins
        self.moment_magnitude_bins = moment_magnitude_bins
        self.periods = periods
        self.log_amplifications = log_amplifications
        self.log_stds = log_stds

        # check that periods is increasing
        try:
            assert (self.periods.argsort() == r_[0:len(self.periods)]).all()
        except:
            self.periods = self.periods[::-1]  # reverse self.periods
            assert (self.periods.argsort() == r_[0:len(self.periods)]).all()
            for site_class in self.log_amplifications.keys():
                log_amp = self.log_amplifications[site_class]
                self.log_amplifications[site_class] = log_amp[:, :, ::-1]
                # reverse log amplifications

                self.log_stds[site_class] = self.log_stds[site_class][:, :, ::-1]
예제 #19
0
def matrix_distance(A, B):
    # matrix distance = sum of distances of columns
    A = sp.asarray(A)
    B = sp.asarray(B)
    if not shape(A) == shape(B):
        exit
    return sp.array([sp.linalg.norm(u-v) for u, v in zip(A,B)]).sum()
예제 #20
0
파일: gibbs.py 프로젝트: markchil/gptools
    def __call__(self, Xi, Xj, sigmaf, l1, l2, lw, x0):
        """Evaluate the covariance function between points `Xi` and `Xj`.

        Parameters
        ----------
        Xi, Xj : :py:class:`Array`, :py:class:`mpf` or scalar float
            Points to evaluate covariance between. If they are :py:class:`Array`,
            :py:mod:`scipy` functions are used, otherwise :py:mod:`mpmath`
            functions are used.
        sigmaf : scalar float
            Prefactor on covariance.
        l1, l2, lw, x0 : scalar floats
            Parameters of length scale warping function, passed to
            :py:attr:`warp_function`.

        Returns
        -------
        k : :py:class:`Array` or :py:class:`mpf`
            Covariance between the given points.
        """
        li = self.warp_function(Xi, l1, l2, lw, x0)
        lj = self.warp_function(Xj, l1, l2, lw, x0)
        if isinstance(Xi, scipy.ndarray):
            if isinstance(Xi, scipy.matrix):
                Xi = scipy.asarray(Xi, dtype=float)
                Xj = scipy.asarray(Xj, dtype=float)
            return sigmaf**2.0 * (
                scipy.sqrt(2.0 * li * lj / (li**2.0 + lj**2.0)) *
                scipy.exp(-(Xi - Xj)**2.0 / (li**2 + lj**2))
            )
        else:
            return sigmaf**2.0 * (
                mpmath.sqrt(2.0 * li * lj / (li**2.0 + lj**2.0)) *
                mpmath.exp(-(Xi - Xj)**2.0 / (li**2 + lj**2))
            )
예제 #21
0
파일: utils.py 프로젝트: rmcgibbo/gptools
def unique_rows(arr):
    """Returns a copy of arr with duplicate rows removed.
    
    From Stackoverflow "Find unique rows in numpy.array."
    
    Parameters
    ----------
    arr : :py:class:`Array`, (`m`, `n`). The array to find the unique rows of.
    
    Returns
    -------
    unique : :py:class:`Array`, (`p`, `n`) where `p` <= `m`
        The array `arr` with duplicate rows removed.
    """
    b = scipy.ascontiguousarray(arr).view(
        scipy.dtype((scipy.void, arr.dtype.itemsize * arr.shape[1]))
    )
    try:
        dum, idx = scipy.unique(b, return_index=True)
    except TypeError:
        # Handle bug in numpy 1.6.2:
        rows = [_Row(row) for row in b]
        srt_idx = sorted(range(len(rows)), key=rows.__getitem__)
        rows = scipy.asarray(rows)[srt_idx]
        row_cmp = [-1]
        for k in xrange(1, len(srt_idx)):
            row_cmp.append(rows[k-1].__cmp__(rows[k]))
        row_cmp = scipy.asarray(row_cmp)
        transition_idxs = scipy.where(row_cmp != 0)[0]
        idx = scipy.asarray(srt_idx)[transition_idxs]
    return arr[idx]
예제 #22
0
파일: draw.py 프로젝트: DongliangGao/pydec
def lineplot(vertices,indices,labels=False,linewidths=1):
    """
    Plot 2D line segments
    """
    vertices,indices = asarray(vertices),asarray(indices)
    
    #3d tensor [segment index][vertex index][x/y value]
    lines = vertices[numpy.ravel(indices),:].reshape((indices.shape[0],2,2))
    
    col = matplotlib.collections.LineCollection(lines)
    col.set_color('k')
    col.set_linewidth(linewidths)

    #sub =  subplot(111)
    sub = matplotlib.pylab.gca()
    sub.add_collection(col,autolim=True)
    matplotlib.pylab.axis('off')
    sub.autoscale_view()

    if labels:
        barycenters = numpy.average(lines,axis=1)
        for n,bc in enumerate(barycenters):
            matplotlib.pylab.text(bc[0], bc[1], str(n), {'color' : 'k', 'fontsize' : 8,
                                        'horizontalalignment' : 'center',
                                        'verticalalignment' : 'center'
                                        })
예제 #23
0
def compute_mean_vector(category_name, labellist, layer = 'fc8'):
    print category_name
    featurefile_list = glob.glob('%s/%s/*.mat' %(featurefilepath, category_name))
    
    # gather all the training samples for which predicted category
    # was the category under consideration
    correct_features = []
    for featurefile in featurefile_list:
        try:
            img_arr = loadmat(featurefile)
            predicted_category = labellist[img_arr['scores'].argmax()]
            if predicted_category == category_name:
                correct_features += [img_arr[layer]]
        except TypeError:
            continue
    
    # Now compute channel wise mean vector
    channel_mean_vec = []
    for channelid in range(correct_features[0].shape[0]):
        channel = []
        for feature in correct_features:
            channel += [feature[channelid, :]]
        channel = sp.asarray(channel)
        assert len(correct_features) == channel.shape[0]
        # Gather mean over each channel, to get mean channel vector
        channel_mean_vec += [sp.mean(channel, axis=0)]

    # this vector contains mean computed over correct classifications
    # for each channel separately
    channel_mean_vec = sp.asarray(channel_mean_vec)
    savemat('%s.mat' %category_name, {'%s'%category_name: channel_mean_vec})
예제 #24
0
def __prepare_histogram(h1, h2):
    """Convert the histograms to scipy.ndarrays if required."""
    h1 = h1 if scipy.ndarray == type(h1) else scipy.asarray(h1)
    h2 = h2 if scipy.ndarray == type(h2) else scipy.asarray(h2)
    if h1.shape != h2.shape or h1.size != h2.size:
        raise ValueError('h1 and h2 must be of same shape and size')
    return h1, h2
예제 #25
0
파일: draw.py 프로젝트: DongliangGao/pydec
def triplot(vertices, indices, labels=False):
    """
    Plot a 2D triangle mesh
    """
    
    vertices,indices = asarray(vertices),asarray(indices)

    #3d tensor [triangle index][vertex index][x/y value]
    triangles = vertices[numpy.ravel(indices),:].reshape((indices.shape[0],3,2))
    
    col = matplotlib.collections.PolyCollection(triangles)
    col.set_facecolor('grey')
    col.set_alpha(0.5)
    col.set_linewidth(1)

    #sub =  subplot(111)
    sub = matplotlib.pylab.gca()
    sub.add_collection(col,autolim=True)
    matplotlib.pylab.axis('off')
    sub.autoscale_view()

    if labels:
        barycenters = numpy.average(triangles,axis=1)
        for n,bc in enumerate(barycenters):
            matplotlib.pylab.text(bc[0], bc[1], str(n), {'color' : 'k', 'fontsize' : 8,
                                                         'horizontalalignment' : 'center',
                                                         'verticalalignment' : 'center'
                                                         })
예제 #26
0
def recalibrate_scores(weibull_model, labellist, imgarr,
                       layer = 'fc8', alpharank = 10, distance_type = 'eucos'):
    """ 
    Given FC8 features for an image, list of weibull models for each class,
    re-calibrate scores

    Input:
    ---------------
    weibull_model : pre-computed weibull_model obtained from weibull_tailfitting() function
    labellist : ImageNet 2012 labellist
    imgarr : features for a particular image extracted using caffe architecture
    
    Output:
    ---------------
    openmax_probab: Probability values for a given class computed using OpenMax
    softmax_probab: Probability values for a given class computed using SoftMax (these
    were precomputed from caffe architecture. Function returns them for the sake 
    of convienence)

    """
    
    imglayer = imgarr[layer]
    ranked_list = imgarr['scores'].argsort().ravel()[::-1]
    alpha_weights = [((alpharank+1) - i)/float(alpharank) for i in range(1, alpharank+1)]
    ranked_alpha = sp.zeros(1000)
    for i in range(len(alpha_weights)):
        ranked_alpha[ranked_list[i]] = alpha_weights[i]

    # Now recalibrate each fc8 score for each channel and for each class
    # to include probability of unknown
    openmax_fc8, openmax_score_u = [], []
    for channel in range(NCHANNELS):
        channel_scores = imglayer[channel, :]
        openmax_fc8_channel = []
        openmax_fc8_unknown = []
        count = 0
        for categoryid in range(NCLASSES):
            # get distance between current channel and mean vector
            category_weibull = query_weibull(labellist[categoryid], weibull_model, distance_type = distance_type)
            channel_distance = compute_distance(channel_scores, channel, category_weibull[0],
                                                distance_type = distance_type)

            # obtain w_score for the distance and compute probability of the distance
            # being unknown wrt to mean training vector and channel distances for
            # category and channel under consideration
            wscore = category_weibull[2][channel].w_score(channel_distance)
            modified_fc8_score = channel_scores[categoryid] * ( 1 - wscore*ranked_alpha[categoryid] )
            openmax_fc8_channel += [modified_fc8_score]
            openmax_fc8_unknown += [channel_scores[categoryid] - modified_fc8_score ]

        # gather modified scores fc8 scores for each channel for the given image
        openmax_fc8 += [openmax_fc8_channel]
        openmax_score_u += [openmax_fc8_unknown]
    openmax_fc8 = sp.asarray(openmax_fc8)
    openmax_score_u = sp.asarray(openmax_score_u)
    
    # Pass the recalibrated fc8 scores for the image into openmax    
    openmax_probab = computeOpenMaxProbability(openmax_fc8, openmax_score_u)
    softmax_probab = imgarr['scores'].ravel() 
    return sp.asarray(openmax_probab), sp.asarray(softmax_probab)
예제 #27
0
def computeOpenMaxProbability(openmax_fc8, openmax_score_u):
    """ Convert the scores in probability value using openmax
    
    Input:
    ---------------
    openmax_fc8 : modified FC8 layer from Weibull based computation
    openmax_score_u : degree

    Output:
    ---------------
    modified_scores : probability values modified using OpenMax framework,
    by incorporating degree of uncertainity/openness for a given class
    
    """
    prob_scores, prob_unknowns = [], []
    for channel in range(NCHANNELS):
        channel_scores, channel_unknowns = [], []
        for category in range(NCLASSES):
            channel_scores += [sp.exp(openmax_fc8[channel, category])]
                    
        total_denominator = sp.sum(sp.exp(openmax_fc8[channel, :])) + sp.exp(sp.sum(openmax_score_u[channel, :]))
        prob_scores += [channel_scores/total_denominator ]
        prob_unknowns += [sp.exp(sp.sum(openmax_score_u[channel, :]))/total_denominator]
        
    prob_scores = sp.asarray(prob_scores)
    prob_unknowns = sp.asarray(prob_unknowns)

    scores = sp.mean(prob_scores, axis = 0)
    unknowns = sp.mean(prob_unknowns, axis=0)
    modified_scores =  scores.tolist() + [unknowns]
    assert len(modified_scores) == 1001
    return modified_scores
예제 #28
0
 def frombounds(
     cls, func, lbound, ubound, npop, crossover_rate=0.5, scale=None, strategy=("rand", 2, "bin"), eps=1e-6
 ):
     lbound = sp.asarray(lbound)
     ubound = sp.asarray(ubound)
     pop0 = rand(npop, len(lbound)) * (ubound - lbound) + lbound
     return cls(func, pop0, crossover_rate=crossover_rate, scale=scale, strategy=strategy, eps=eps)
예제 #29
0
    def test3_raster_data_from_array(self):
        # A test based on this info;
        # http://en.wikipedia.org/wiki/Esri_grid
        # Let's hope no one edits the data....
        raster = [[-9999, -9999, 5, 2], [-9999, 20, 100, 36],
                  [3, 8, 35, 10], [32, 42, 50, 6],
                  [88, 75, 27, 9], [13, 5, 1, -9999]]
        upper_left_x = 0.
        upper_left_y = 300.
        cell_size = 50.0
        no_data_value = -9999

        # Just outside the midpoint of all sides
        lon = asarray([125, 125, 125, 125, 125, 125])
        lat = asarray([275, 225, 175, 125, 75, 25])

        raster = Raster.from_array(raster, upper_left_x, upper_left_y,
                                   cell_size, no_data_value)
        self.assertEqual(raster.ul_x, 0)
        self.assertEqual(raster.ul_y, 300)
        self.assertEqual(raster.x_pixel, 50)
        self.assertEqual(raster.y_pixel, -50)
        self.assertEqual(raster.x_size, 4)
        self.assertEqual(raster.y_size, 6)

        data = raster.raster_data_at_points(lon, lat)
        self.assertTrue(allclose(data, asarray([5.0, 100.0, 35.0,
                                                50.0, 27.0, 1.0])))

        # testing extent
        min_long, min_lat, max_long, max_lat = raster.extent()
        self.assertEqual(min_long, 0)
        self.assertEqual(min_lat, 0)
        self.assertEqual(max_long, 200)
        self.assertEqual(max_lat, 300)
예제 #30
0
파일: lines.py 프로젝트: markchil/bayesimp
def ZYFF(Te, EIJ):
    """Computes `ZY` and `FF`, used in other functions.
    
    If `EIJ` is a scalar, the output has the same shape as `Te`. If `EIJ` is an
    array, the output has shape `EIJ.shape` + `Te.shape`. This should keep the
    output broadcastable with `Te`.
    
    Parameters
    ----------
    Te : array of float
        Electron temperature. Shape is arbitrary.
    EIJ : scalar float or array of float
        Energy difference.
    """
    # Expand the dimensions of EIJ to produce the desired output shape:
    Te = scipy.asarray(Te, dtype=float)
    EIJ = scipy.asarray(EIJ, dtype=float)
    for n in xrange(Te.ndim):
        EIJ = scipy.expand_dims(EIJ, axis=-1)
    
    ZY = EIJ / (1e3 * Te)
    
    FF = scipy.zeros_like(ZY)
    mask = (ZY >= 1.5)
    FF[mask] = scipy.log((ZY[mask] + 1) / ZY[mask]) - (0.36 + 0.03 * scipy.sqrt(ZY[mask] + 0.01)) / (ZY[mask] + 1)**2
    mask = ~mask
    FF[mask] = scipy.log((ZY[mask] + 1) / ZY[mask]) - (0.36 + 0.03 / scipy.sqrt(ZY[mask] + 0.01)) / (ZY[mask] + 1)**2
    
    return ZY, FF
예제 #31
0
파일: bFit.py 프로젝트: cudmore/bImPy
    100.0, 75.0, 137.0, 65.0, 49.0, 70.0, 38.0, 67.0, 91.0, 104.0, 47.0, 52.0,
    118.0, 135.0, 61.0, 47.0, 34.0, 117.0, 141.0, 74.0, 77.0, 61.0, 54.0, 53.0,
    109.0, 85.0, 36.0, 84.0, 141.0, 98.0, 96.0, 92.0, 72.0, 33.0, 70.0, 60.0,
    125.0, 56.0, 50.0, 58.0, 181.0, 81.0, 55.0, 109.0, 60.0, 57.0, 53.0, 34.0,
    57.0, 45.0, 34.0, 69.0, 90.0, 29.0, 35.0, 103.0, 76.0, 84.0, 68.0, 73.0,
    34.0, 75.0, 99.0, 82.0, 107.0, 129.0, 128.0, 150.0, 485.0, 786.0, 1379.0,
    1524.0, 2052.0, 2421.0, 1737.0, 905.0, 333.0, 239.0, 177.0, 140.0, 149.0,
    125.0, 102.0, 106.0, 114.0, 82.0, 54.0, 25.0, 50.0, 44.0, 34.0, 61.0, 61.0,
    73.0, 60.0, 66.0, 63.0, 29.0, 43.0, 55.0, 59.0, 39.0, 29.0, 26.0, 33.0,
    38.0, 48.0, 39.0, 29.0, 35.0, 55.0, 30.0, 49.0, 55.0, 27.0, 74.0, 70.0,
    96.0, 38.0, 63.0, 90.0, 92.0, 46.0, 67.0, 29.0, 70.0, 78.0, 34.0, 68.0,
    72.0, 40.0, 56.0
]
x = range(len(y))

x = asarray(x)
y = asarray(y)

print('x:', x)
print('y:', y)

n = len(x)  #the number of data
mean = sum(x * y) / n  #note this correction
sigma = sum(y * (x - mean)**2) / n  #note this correction


def myGaussian(x, amplitude, mean, stddev):
    return amplitude * np.exp(-((x - mean) / 4 / stddev)**2)


popt, pcov = curve_fit(myGaussian, x, y)
예제 #32
0
def manhattan(chromosome_lst,
              position_lst,
              pvalue_lst,
              output_fn=None,
              show_labels=True,
              y_limits=None):
    """
    Manhattan plot of p-values

    :param chromosome_lst: list containing chromosomes
    :type chromosome_lst: list of int

    :param position_lst: list containing SNP positions
    :type position_lst: list of int

    :param pvalue_lst: list containing p-values
    :type pvalue_lst: list of float

    :param output_fn: filename of figure to generate
    :type output_fn: string

    :param show_labels: whether or not to display axis labels
    :type show_labels: boolean

    :param y_limits: range (minimum and maximum) of y-axis
    :type y_limits: list of float
    :return:
    """

    # assert that all the input lists have the same length
    assert (len(chromosome_lst) == len(position_lst)
            ), 'lists have to be of same length'
    assert (len(position_lst) == len(pvalue_lst)
            ), 'lists have to be of same length'

    # remove SNPs that were assigned nan values
    nan_idx = sp.where(sp.isnan(sp.array(pvalue_lst)))[0]
    if len(nan_idx):
        logging.warning(
            'Of %s analyzed SNPs, %s have \'nan\' p-values. Removing Snps.' %
            (len(pvalue_lst), len(nan_idx)))
        # remove p-values with nan-values
        [pvalue_lst, chromosome_lst,
         position_lst] = allgwas.util.remove_list_entries(
             nan_idx, [pvalue_lst, chromosome_lst, position_lst])

        # make sure all nan entries were removed successfully
        assert (len(sp.where(sp.isnan(
            sp.array(pvalue_lst)))[0]) == 0), "Not all nan's were removed"

    # sort lists by chromosome and position
    chromosome_lst, position_lst, pvalue_lst = allgwas.util.sort_cpp_lists(
        chromosome_lst, position_lst, pvalue_lst)

    # transform p-values to -log10
    pv_arr = sp.asarray([-sp.log10(p) for p in pvalue_lst])

    # generate color vector (each entry corresponds to one chromosome)
    col_vec = [
        '#A6CEE3' if i % 2 == 0 else '#1F78B4'
        for i in sp.unique(chromosome_lst)
    ]

    # generate manhattan plot
    fig, ax = plt.subplots()

    # initialize location and labels of x-axis ticks
    xtick_loc = []
    xtick_label = []

    # compute Bonferroni correction
    bonf = 7.2e-8
    log_bonf = -sp.log10(bonf)

    # plot p-values versus SNPs
    for chrom in sp.unique(chromosome_lst):
        idx = sp.asarray(sp.where(chromosome_lst == chrom)[0])
        ax.plot([idx[0], idx[-1]], [log_bonf, log_bonf], '-r', lw=1.5)
        ax.plot(idx, pv_arr[idx], '.', color=col_vec[chrom - 1], markersize=4)

        # generate locations for the x-axis ticks and resp. labels
        xtick_loc.append(idx[0] + (idx[-1] - idx[0]) / 2)
        xtick_label.append('%s' % chrom)

    # set x-axis limits
    ax.set_xlim([0, idx[-1]])

    # if specified, set y-limits:
    if y_limits:
        print y_limits
        ax.set_ylim(y_limits)

    # add grid, add labels for x-axis
    ax.get_xaxis().set_ticks(xtick_loc)
    ax.get_xaxis().set_ticklabels(xtick_label, fontsize=15, rotation=90)
    plt.gca().xaxis.grid(False)
    plt.gca().yaxis.grid(True)

    # increase the fontsize of the y-ticks
    ax.yaxis.set_tick_params(labelsize=15)

    # axis labels
    if show_labels:
        ax.set_xlabel('chromosome')
        ax.set_ylabel('significance [-log10(pvalue)]')

    if output_fn:
        # check if output folder exists and save
        allgwas.util.check_dirs(filename=output_fn)
        plt.savefig(output_fn, bbox_inches='tight')
    else:
        plt.show()
    pass
예제 #33
0
def compute_lines(
        Z,
        cs_den,
        ne,
        Te,
        atdata=None,
        path=None,
        sindat=None,
        sindir=None,
        PEC=None,
        E_thresh=0.0,
        He_source='PEC',
        full_return=False
    ):
    """Compute the spectral lines, using the same algorithm as the IDL program lines.pro.
    
    The output `em` is an array of shape (`n_time`, `n_lines`, `n_space`).
    
    Returns a tuple of (`em`, `lam`, `E`, `q`, `comment`), where the variables
    are as follows:
    * `em` is the emissivities in photons/s/cm^3, and has shape
      (`n_time`, `n_lines`, `n_space`).
    * `lam` is the wavelengths of each line in nm, and has shape (`n_lines`,).
    * `E` is the energy of each line in keV, and has shape (`n_lines`,).
    * `q` is the charge state of each line, and has shape (`n_lines`,).
    * `comment` is the comment from each line, and has shape (`n_lines`,).
    
    Parameters
    ----------
    Z : int
        The atomic number of the element to compute the lines for.
    cs_den : array, (`n_time`, `n_cs`, `n_space`)
        The charge state densities (as returned by STRAHL). Units are cm^-3.
    ne : array, (`n_space`,) or (`n_time`, `n_space`)
        The electron densities, either a stationary profile, or profiles as a
        function of time. Units are cm^-3.
    Te : array, (`n_space`,) or (`n_time`, `n_space`)
        The electron temperatures, either a stationary profile, or profiles as a
        function of time. Units are keV.
    atdata : dict, optional
        The atomic physics data, as read by :py:func:`read_atdata`. If `None`,
        the atomic physics data are read from `path`. Default is `None` (read
        from file).
    path : str, optional
        The path to read the atomics physics data from, only used if `atdata` is
        `None`. If `None`, the default path defined in :py:func:`read_atdata` is
        used. Default is `None` (use :py:func:`read_atdata` default).
    sindat : dict, optional
        Data from the sin*.dat files. If `None`, the data are read from `root`
        using :py:func:`read_sindat`. Default is `None` (read from files).
    sindir : str, optional
        The directory to look for the `sin*.dat` files in. If `None`, the
        default path defined in :py:func:`read_sindat` is used. Default is `None`
        (use :py:func:`read_sindat` default).
    PEC : dict, optional
        Dictionary of photon emissivity coefficients as collected by
        :py:class:`~bayesimp.Run`. This should have keys which are the charge
        states and values which are dictionaries which have keys which are
        wavelengths and values which are interp1d objects.
    E_thresh : float, optional
        The energy threshold below which lines are thrown out. Default is 0.0.
        FOR NOW THIS IS ONLY APPLIED TO THE PEC CALCUATION.
    He_source : {'PEC', 'lines', 'both'}, optional
        Source for data on He-like line emission. Can come from the ADAS ADF15
        PEC files ('PEC'), the approach used in lines.pro ('lines'), or both
        sets of lines can be included ('both'). Default is to use both sets of
        lines.
    full_output : bool, optional
        If True, all values described above are returned. If False, only `em`
        and `E` are returned. Default is False (only return `em`, `E`).
    """
    # Convert the densities to cm^-3:
    # cs_den = cs_den * 1e-6
    # ne = ne * 1e-6
    
    # Make sure the atomic physics data are loaded:
    if atdata is None:
        if path is None:
            atdata = read_atdata()
        else:
            atdata = read_atdata(path=path)
    try:
        atdata = atdata[Z]
    except KeyError:
        raise ValueError("No atomic physics data for Z={Z:d}!".format(Z=Z))
    # Get the additional data from the sin*.dat files:
    if sindat is None:
        if sindir is None:
            sindat = read_sindat()
        else:
            sindat = read_sindat(sindir=sindir)
    try:
        sindat = sindat[Z]
    except KeyError:
        sindat = None
    
    # Pull out the He-, Li- and H-like charge states:
    n_H = cs_den[:, -2, :]
    if Z > 1:
        n_He = cs_den[:, -3, :]
    if Z > 2:
        n_Li = cs_den[:, -4, :]
    
    # Figure out the shape of the PEC results so we don't have to do so many
    # concatenations:
    PEC_len = 0
    for qv, P in PEC.iteritems():
        for lv, pv in P.iteritems():
            for pec_obj in pv:
                E_val = h * c / (lv / 10.0 * 1e-9 * e * 1e3)
                if E_val >= E_thresh:
                    PEC_len += 1
    
    em = scipy.zeros((cs_den.shape[0], len(atdata) + PEC_len, cs_den.shape[2]))
    
    # Set up the return values:
    lam = [l.lam for l in atdata]
    E = [l.E for l in atdata]
    q = [l.q for l in atdata]
    comment = [l.comment for l in atdata]
    
    # The He-like lines need to be handled specially, since they all enter into
    # the calculation of each other:
    # This approach lets the He-like lines appear anywhere in the sequence in
    # atdata.dat, as long as they are ordered.
    if He_source in ('lines', 'both'):
        line_types = scipy.asarray([ld.data_type for ld in atdata])
        He_like_lines, = scipy.where(line_types == 8)
        # Enforce the condition that the lines be in order in the file:
        He_like_lines.sort()
        if len(He_like_lines) > 0:
            S1 = SSS(Te, atdata[He_like_lines[0]].p[0:6])
            S2 = SSS(Te, atdata[He_like_lines[1]].p[0:6])
            S3 = SSS(Te, atdata[He_like_lines[2]].p[0:6])
            S4 = SSS(Te, atdata[He_like_lines[3]].p[0:6])
            S5 = SSS(Te, atdata[He_like_lines[4]].p[0:6])
            S6 = SSS(Te, atdata[He_like_lines[5]].p[0:6])
            
            SPR1 = S1 * atdata[He_like_lines[0]].p[6]
            SPR2 = S2 * atdata[He_like_lines[1]].p[6]
            SPR3 = S3 * atdata[He_like_lines[2]].p[6]
            SPR4 = S4 * ALPHAZ(Te, atdata[He_like_lines[3]].p[0], S2, S3, S6, S4)
            SPR5 = S5 * atdata[He_like_lines[4]].p[6]
            SPR6 = S6 * atdata[He_like_lines[5]].p[6]
            
            SMP1P = SSSDPR(
                Te,
                Z,
                atdata[He_like_lines[4]].p[0],
                atdata[He_like_lines[0]].p[0],
                atdata[He_like_lines[0]].p[7],
                atdata[He_like_lines[0]].p[8]
            )
            SM2 = SSSDPR(
                Te,
                Z,
                atdata[He_like_lines[3]].p[0],
                atdata[He_like_lines[1]].p[0],
                atdata[He_like_lines[1]].p[7],
                atdata[He_like_lines[1]].p[8]
            )
            SM1 = SSSDPR(
                Te,
                Z,
                atdata[He_like_lines[3]].p[0],
                atdata[He_like_lines[2]].p[0],
                atdata[He_like_lines[2]].p[7],
                atdata[He_like_lines[2]].p[8]
            )
            SM0 = SSSDPR(
                Te,
                Z,
                atdata[He_like_lines[3]].p[0],
                atdata[He_like_lines[5]].p[0],
                atdata[He_like_lines[5]].p[7],
                atdata[He_like_lines[5]].p[8]
            )
            
            S1PMP = SSSDPRO(
                Te,
                0.333,
                atdata[He_like_lines[4]].p[0],
                atdata[He_like_lines[0]].p[0],
                SMP1P
            )
            S2M = SSSDPRO(
                Te,
                0.6,
                atdata[He_like_lines[3]].p[0],
                atdata[He_like_lines[1]].p[0],
                SM2
            )
            S1M = SSSDPRO(
                Te,
                1.0,
                atdata[He_like_lines[3]].p[0],
                atdata[He_like_lines[2]].p[0],
                SM1
            )
            S0M = SSSDPRO(
                Te,
                3.0,
                atdata[He_like_lines[3]].p[0],
                atdata[He_like_lines[5]].p[0],
                SM0
            )
            
            SLIF = SSSLI(Te, atdata[He_like_lines[4]].p[9], 0.5)
            SLIZ = SSSLI(Te, atdata[He_like_lines[3]].p[9], 1.5)
            
            ALPHRRW = RADREC(Te, Z, atdata[He_like_lines[0]].p[10:16])
            ALPHRRX = RADREC(Te, Z, atdata[He_like_lines[1]].p[10:16])
            ALPHRRY = RADREC(Te, Z, atdata[He_like_lines[2]].p[10:16])
            ALPHRRZ = RADREC(Te, Z, atdata[He_like_lines[3]].p[10:16])
            ALPHRRF = RADREC(Te, Z, atdata[He_like_lines[4]].p[10:16])
            ALPHRRO = RADREC(Te, Z, atdata[He_like_lines[5]].p[10:16])
            
            T1DR = scipy.exp(-6.80 * (Z + 0.5)**2 / (1e3 * Te))
            T2DR = scipy.exp(-8.77 * Z**2 / (1e3 * Te))
            T3DR = scipy.exp(-10.2 * Z**2 / (1e3 * Te))
            T0DR = 5.17e-14 * Z**4 / (1e3 * Te)**1.5
            
            C1 = 12.0 / (1.0 + 6.0e-6 * Z**4)
            C2 = 18.0 / (1.0 + 3.0e-5 * Z**4)
            C3 = 69.0 / (1.0 + 5.0e-3 * Z**3)
            ALPHDRW = T0DR * (C1 * T1DR + C2 * T2DR + C3 * T3DR)
            
            C1 = 1.9
            C2 = 54.0 / (1.0 + 1.9e-4 * Z**4)
            C3 = (
                380.0 / (1.0 + 5.0e-3 * Z**3) * 2.0 * (Z - 1)**0.6 /
                (1e3 * Te)**0.3 / (1.0 + 2.0 * (Z - 1)**0.6 / (1e3 * Te)**0.3)
            )
            ALPHDRX = T0DR * 5.0 / 9.0 * (C1 * T1DR + C2 * T2DR + C3 * T3DR)
            ALPHDRY = T0DR * 3.0 / 9.0 * (C1 * T1DR + C2 * T2DR + C3 * T3DR)
            ALPHDRO = T0DR * 1.0 / 9.0 * (C1 * T1DR + C2 * T2DR + C3 * T3DR)
            
            C1 = 3.0 / (1.0 + 3.0e-6 * Z**4)
            C2 = 0.5 / (1.0 + 2.2e-5 * Z**4)
            C3 = 6.3 / (1.0 + 5.0e-3 * Z**3)
            ALPHDRF = T0DR * (C1 * T1DR + C2 * T2DR + C3 * T3DR)
            
            C1 = 9.0 / (1.0 + 7.0e-5 * Z**4)
            C2 = 27.0 / (1.0 + 8.0e-5 * Z**4)
            C3 = 380.0 / (1.0 + 5.0e-3 * Z**3) / (1.0 + 2.0 * (Z - 1)**0.6 / (1e3 * Te)**0.3)
            ALPHDRZ = T0DR * (C1 * T1DR + C2 * T2DR + C3 * T3DR)
            
            ALPHW = ALPHRRW + ALPHDRW
            ALPHX = ALPHRRX + ALPHDRX
            ALPHY = ALPHRRY + ALPHDRY
            ALPHZ = ALPHRRZ + ALPHDRZ
            ALPHF = ALPHRRF + ALPHDRF
            ALPHO = ALPHRRO + ALPHDRO
            
            # NOTE: This only computes the W, X, Y, Z lines, even though we in
            # principle have data for F and O, too. I suspect this is because the
            # other lines are treated as negligible, but it still seems to be a
            # strange oversight.
            
            # Calculation for W line:
            NA1 = (n_Li * SLIF + n_He * SPR5 + n_H * ALPHF) / (atdata[He_like_lines[4]].p[16] + ne * SMP1P)
            NA2 = (n_He * SPR1 + n_H * ALPHW) / (ne * SMP1P)
            NA3 = (atdata[He_like_lines[0]].p[16] + ne * S1PMP) / (ne * SMP1P)
            NA4 = (atdata[He_like_lines[0]].p[17] + ne * S1PMP) / (atdata[He_like_lines[4]].p[16] + ne * SMP1P)
            NW = atdata[He_like_lines[0]].p[16] * ne * (NA1 + NA2) / (NA3 - NA4)
            em[:, He_like_lines[0], :] = NW
            
            # Calculation for Z line:
            NA1 = n_Li * SLIZ
            NA2 = n_He * (
                SPR4 + SPR6 + SPR3 / (
                    1.0 + atdata[He_like_lines[2]].p[16] / (
                        atdata[He_like_lines[2]].p[17] + ne * S1M
                    )
                )
            )
            NA3 = n_He * SPR2 / (
                1.0 + atdata[He_like_lines[1]].p[16] / (
                    atdata[He_like_lines[1]].p[17] + ne * S2M
                )
            )
            NA4 = n_H * (
                ALPHZ + ALPHO + ALPHY / (
                    1.0 + atdata[He_like_lines[2]].p[16] / (
                        atdata[He_like_lines[2]].p[17] + ne * S1M
                    )
                )
            )
            NA5 = n_H * ALPHX / (
                1.0 + atdata[He_like_lines[1]].p[16] / (
                    atdata[He_like_lines[1]].p[17] + ne * S2M
                )
            )
            NA6 = ne / atdata[He_like_lines[3]].p[16] * SM2 / (
                1.0 + (
                    atdata[He_like_lines[1]].p[17] + ne * S2M
                ) / atdata[He_like_lines[1]].p[16]
            )
            NA7 = ne / atdata[He_like_lines[3]].p[16] * SM1 / (
                1.0 + (
                    atdata[He_like_lines[2]].p[17] + ne * S1M
                ) / atdata[He_like_lines[2]].p[16]
            )
            NZ = ne * (NA1 + NA2 + NA3 + NA4 + NA5) / (1.0 + NA6 + NA7)
            em[:, He_like_lines[3], :] = NZ
            
            # Calculation for X line:
            NA1 = n_He * SPR2 + n_H * ALPHX
            NA2 = 1.0 + (
                atdata[He_like_lines[1]].p[17] + ne * S2M
            ) / atdata[He_like_lines[1]].p[16]
            NA3 = ne * SM2 / NA2
            NX = ne * NA1 / NA2 + NA3 * NZ / atdata[He_like_lines[3]].p[16]
            em[:, He_like_lines[1], :] = NX
            
            # Calculation for Y line:
            NA1 = n_He * SPR3 + n_H * ALPHY
            NA2 = 1.0 + (atdata[He_like_lines[2]].p[17] + ne * S1M) / atdata[He_like_lines[2]].p[16]
            NA3 = ne * SM1 / NA2
            NY = ne * NA1 / NA2 + NA3 * NZ / atdata[He_like_lines[3]].p[16]
            em[:, He_like_lines[2], :] = NY
    
    for line_idx, line_data in zip(range(len(atdata)), atdata):
        if line_data.data_type == 8:
            # This case was handled above, but this saves having to filter it
            # out at the start of the loop.
            pass
        elif line_data.data_type == 9:
            # This case is handled in a far more complicated manner in
            # lines.pro, but I am going to use a far simpler approximation for
            # the time being. This will probably need to change to get F right.
            ZY, FF = ZYFF(Te, line_data.p[0])
            gg = line_data.p[3] + (
                line_data.p[4] * ZY - line_data.p[5] * ZY**2 + line_data.p[6]
            ) * FF + line_data.p[5] * ZY
            # This assumes ne is in cm^-3...
            SHY = 1.58e-5 * 1.03 / scipy.sqrt(1e3 * Te) / line_data.p[0] * line_data.p[1] * gg * scipy.exp(-ZY)
            em[:, line_idx, :] = SHY * n_H * ne
        elif line_data.data_type == 10 and He_source in ('lines', 'both'):
            # Handle He-like/H-like satellites:
            tfact = scipy.exp(-line_data.p[0] / (1e3 * Te)) / (1e3 * Te)**1.5 * ne
            # This does the same thing as the if-statements in lines.pro.
            v = Z - line_data.q - 1
            if v in (0, 1, 2):
                em[:, line_idx, :] = 1.65e-9 * tfact * line_data.p[1] * cs_den[:, -v - 2, :]
            else:
                warnings.warn("Unknown satellite lines, skipping!", RuntimeWarning)
            # This handles all of the Z cases for which there are only q, r, s,
            # t lines present. The others are not implemented at this time.
            if (sindat is not None) and (line_data.q == (Z - 2)):
                if line_data.comment in ('/q line', '/r line', '/s line', '/t line'):
                    try:
                        em[:, line_idx, :] += 10**(
                            scipy.interpolate.InterpolatedUnivariateSpline(
                                sindat['terange'],
                                scipy.log10(sindat['s' + line_data.comment[1]])
                            )(1e3 * Te)
                        ) * ne * cs_den[:, Z - 3, :]
                    except KeyError:
                        pass
        # None of these line types are in the spectral range I need, so it isn't
        # worth the time to implement them at present.
        elif line_data.data_type == 11:
            # TODO!
            pass
        elif line_data.data_type == 12:
            # TODO!
            pass
        elif line_data.data_type == 13:
            # TODO!
            pass
        elif line_data.data_type == 14:
            # TODO!
            pass
        elif line_data.data_type == 15:
            # TODO!
            pass
        elif line_data.data_type == 16:
            # TODO!
            pass
        else:
            warnings.warn(
                "Unsupported line type {lt:d}, skipping line and leaving set to "
                "zero!".format(lt=line_data.data_type),
                RuntimeWarning
            )
    
    if PEC is not None and He_source in ('PEC', 'both'):
        k = len(atdata)
        for qv, P in PEC.iteritems():
            for lv, pv in P.iteritems():
                for pec_obj in pv:
                    E_val = h * c / (lv / 10.0 * 1e-9 * e * 1e3)
                    if E_val >= E_thresh:
                        lam = lam + [lv / 10.0,]
                        E = E + [E_val,]
                        q = q + [qv,]
                        comment = comment + [str(lv),]
                        # em has shape [ntimes, nlines, nspace]
                        em[:, k, :] = ne * cs_den[:, qv, :] * pec_obj.ev(
                            scipy.log10(ne),
                            scipy.log10(1e3 * Te)
                        )
                        k += 1
                        # em = scipy.concatenate((em, emv[:, None, :]), axis=1)
    
    # Convert to ph/s/m^3:
    # em = em * 1e6
    
    # This simplifies later calculations:
    E = scipy.asarray(E)
    
    if full_return:
        return em, lam, E, q, comment
    else:
        return em, E
예제 #34
0
    def test_simulateAlignmentRandomSeed(self):
        """Simulate evolution, ensure scaled branches match number of subs."""

        scipy.random.seed(1)
        random.seed(1)

        # define model
        nsites = 200
        prefs = []
        minpref = 0.01
        for r in range(nsites):
            rprefs = scipy.random.dirichlet([1] * N_AA)
            rprefs[rprefs < minpref] = minpref
            rprefs /= rprefs.sum()
            prefs.append(dict(zip(sorted(AA_TO_INDEX.keys()), rprefs)))
        kappa = 4.2
        omega = 0.4
        beta = 1.5
        mu = 0.3
        if self.MODEL == phydmslib.models.ExpCM:
            phi = scipy.random.dirichlet([7] * N_NT)
            model = phydmslib.models.ExpCM(prefs, kappa=kappa, omega=omega,
                    beta=beta, mu=mu, phi=phi, freeparams=['mu'])
        elif self.MODEL == phydmslib.models.ExpCM_empirical_phi:
            g = scipy.random.dirichlet([7] * N_NT)
            model = phydmslib.models.ExpCM_empirical_phi(prefs, g,
                    kappa=kappa, omega=omega, beta=beta, mu=mu,
                    freeparams=['mu'])
        elif self.MODEL == phydmslib.models.YNGKP_M0:
            e_pw = scipy.asarray([scipy.random.dirichlet([7] * N_NT) for i
                    in range(3)])
            model = phydmslib.models.YNGKP_M0(e_pw, nsites)
        else:
            raise ValueError("Invalid MODEL: {0}".format(type(self.MODEL)))

        # make a test tree
        # tree is two sequences separated by a single branch
        t = 0.04 / model.branchScale
        newicktree = '(tip1:{0},tip2:{0});'.format(t / 2.0)
        temptree = '_temp.tree'
        with open(temptree, 'w') as f:
            f.write(newicktree)

        counter = 0
        seed = 1
        alignments = [{}, {}, {}]
        # alignments with the same seed number should be the same
        # make two alignments with the same seed number
        for counter in range(2):
            alignmentPrefix = "test_counter{0}_seed{1}".format(counter,seed)
            phydmslib.simulate.simulateAlignment(model, temptree, alignmentPrefix, seed)
            for s in Bio.SeqIO.parse("test_counter{0}_seed{1}_simulatedalignment.fasta".format(counter,seed), "fasta"):
                alignments[counter][s.id] = str(s.seq)
        # check they are the same
        for key in alignments[counter].keys():
            self.assertTrue(alignments[counter][key] == alignments[counter - 1][key])

        # alignments with different seed numbers should be different
        # make an alignment with a different seed number
        seed += 1
        counter += 1
        alignmentPrefix = "test_counter{0}_seed{1}".format(counter,seed)
        phydmslib.simulate.simulateAlignment(model, temptree, alignmentPrefix, seed)
        for s in Bio.SeqIO.parse("test_counter{0}_seed{1}_simulatedalignment.fasta".format(counter,seed), "fasta"):
            alignments[counter][s.id] = str(s.seq)
        # check they are different
        for key in alignments[counter].keys():
            self.assertFalse(alignments[counter][key] == alignments[counter - 1][key])


        # general clean-up
        os.remove(temptree)
        for fasta in glob.glob("test*simulatedalignment.fasta"):
            if os.path.isfile(fasta):
                os.remove(fasta)
Dy = nst.Div_y(dy, nx, ny)
Dy = sp.csc_matrix(Dy)

# Laplace-Operator in 2D domain (is already sparse)
L = nst.Laplace(Gx, Gy, Dx, Dy)

# Transport Operator in 2D domain for the convective term (is already sparse)
Du = nst.Convective(Gx, Gy, Dx, Dy, U, V)

# Start Simulation
t_sum = 0
i = 0
while t_sum <= T_simu:
    # Make vector-shaped velocity to diagonal-shaped velocity for calculation
    # of transport operator for the convective term
    U = sc.asarray(sp.csc_matrix.todense(u_vec)).reshape(-1)
    U = sp.csc_matrix(sc.diag(U))
    V = sc.asarray(sp.csc_matrix.todense(v_vec)).reshape(-1)
    V = sp.csc_matrix(sc.diag(V))

    # Calculation of Transport operator
    Du = nst.Convective(Gx, Gy, Dx, Dy, U, V)

    # Runge-Kutta 4 for time integration
    u_vec = nst.RK4(t_step_vec, u_vec, p_vec, Gx, Du, L, nu)
    v_vec = nst.RK4(t_step_vec, v_vec, p_vec, Gy, Du, L, nu)

    # Solve Laplace equation to update pressure and to compensate for numerical
    # divergence
    (u_vec, v_vec, p_vec) = \
        nst.solve_laplace(p_vec, u_vec, v_vec, Gx, Gy, Dx, Dy, L, nx, ny)
예제 #36
0
    def solve(self, wls):
        """Anisotropic solver.

        INPUT
        wls = wavelengths to scan (any asarray-able object).

        OUTPUT
        self.DEO1, self.DEE1, self.DEO3, self.DEE3 = power reflected
        and transmitted.
        """

        self.wls = S.atleast_1d(wls)

        LAMBDA = self.LAMBDA
        n = self.n
        multilayer = self.multilayer
        alpha = self.alpha
        delta = self.delta
        psi = self.psi
        phi = self.phi

        nlayers = len(multilayer)
        i = S.arange(-n, n + 1)
        nood = 2 * n + 1
        hmax = nood - 1

        DEO1 = S.zeros((nood, self.wls.size))
        DEO3 = S.zeros_like(DEO1)
        DEE1 = S.zeros_like(DEO1)
        DEE3 = S.zeros_like(DEO1)

        c1 = S.array([1., 0., 0.])
        c3 = S.array([1., 0., 0.])
        # grating on the xy plane
        K = 2 * pi / LAMBDA * \
            S.array([S.sin(phi), 0., S.cos(phi)], dtype=complex)
        dirk1 = S.array([S.sin(alpha) * S.cos(delta),
                         S.sin(alpha) * S.sin(delta),
                         S.cos(alpha)])

        # D polarization vector
        u = S.array([S.cos(psi) * S.cos(alpha) * S.cos(delta) - S.sin(psi) * S.sin(delta),
                     S.cos(psi) * S.cos(alpha) * S.sin(delta) +
                     S.sin(psi) * S.cos(delta),
                     -S.cos(psi) * S.sin(alpha)])

        kO1i = S.zeros((3, i.size), dtype=complex)
        kE1i = S.zeros_like(kO1i)
        kO3i = S.zeros_like(kO1i)
        kE3i = S.zeros_like(kO1i)

        Mp = S.zeros((4 * nood, 4 * nood, nlayers), dtype=complex)
        M = S.zeros((4 * nood, 4 * nood, nlayers), dtype=complex)

        dlt = (i == 0).astype(int)

        for iwl, wl in enumerate(self.wls):

            nO1 = nE1 = multilayer[0].mat.n(wl).item()
            nO3 = nE3 = multilayer[-1].mat.n(wl).item()

            # wavevectors
            k = 2 * pi / wl

            eps1 = S.diag(S.asarray([nE1, nO1, nO1]) ** 2)
            eps3 = S.diag(S.asarray([nE3, nO3, nO3]) ** 2)

            # ordinary wave
            abskO1 = k * nO1
            # abskO3 = k * nO3
            # extraordinary wave
            # abskE1 = k * nO1 *nE1 / S.sqrt(nO1**2 + (nE1**2 - nO1**2) * S.dot(-c1, dirk1)**2)
            # abskE3 = k * nO3 *nE3 / S.sqrt(nO3**2 + (nE3**2 - nO3**2) * S.dot(-c3, dirk1)**2)

            k1 = abskO1 * dirk1

            kO1i[0, :] = k1[0] - i * K[0]
            kO1i[1, :] = k1[1] * S.ones_like(i)
            kO1i[2, :] = - \
                dispersion_relation_ordinary(kO1i[0, :], kO1i[1, :], k, nO1)

            kE1i[0, :] = kO1i[0, :]
            kE1i[1, :] = kO1i[1, :]
            kE1i[2,
                 :] = -dispersion_relation_extraordinary(kE1i[0,
                                                              :],
                                                         kE1i[1,
                                                              :],
                                                         k,
                                                         nO1,
                                                         nE1,
                                                         c1)

            kO3i[0, :] = kO1i[0, :]
            kO3i[1, :] = kO1i[1, :]
            kO3i[
                2, :] = dispersion_relation_ordinary(
                kO3i[
                    0, :], kO3i[
                    1, :], k, nO3)

            kE3i[0, :] = kO1i[0, :]
            kE3i[1, :] = kO1i[1, :]
            kE3i[
                2, :] = dispersion_relation_extraordinary(
                kE3i[
                    0, :], kE3i[
                    1, :], k, nO3, nE3, c3)

            # k2i = S.r_[[k1[0] - i * K[0]], [k1[1] - i * K[1]], [k1[2] - i * K[2]]]
            k2i = S.r_[[k1[0] - i * K[0]], [k1[1] - i * K[1]], [- i * K[2]]]

            # aliases for constant wavevectors
            kx = kO1i[0, :]  # o kE1i(1,;), tanto e' lo stesso
            ky = k1[1]

            # matrices
            I = S.eye(nood, dtype=complex)
            ZERO = S.zeros((nood, nood), dtype=complex)
            Kx = S.diag(kx / k)
            Ky = ky / k * I
            Kz = S.diag(k2i[2, :] / k)
            KO1z = S.diag(kO1i[2, :] / k)
            KE1z = S.diag(kE1i[2, :] / k)
            KO3z = S.diag(kO3i[2, :] / k)
            KE3z = S.diag(kE3i[2, :] / k)

            ARO = Kx * eps1[0, 0] + Ky * eps1[1, 0] + KO1z * eps1[2, 0]
            BRO = Kx * eps1[0, 1] + Ky * eps1[1, 1] + KO1z * eps1[2, 1]
            CRO_1 = inv(Kx * eps1[0, 2] + Ky * eps1[1, 2] + KO1z * eps1[2, 2])

            ARE = Kx * eps1[0, 0] + Ky * eps1[1, 0] + KE1z * eps1[2, 0]
            BRE = Kx * eps1[0, 1] + Ky * eps1[1, 1] + KE1z * eps1[2, 1]
            CRE_1 = inv(Kx * eps1[0, 2] + Ky * eps1[1, 2] + KE1z * eps1[2, 2])

            ATO = Kx * eps3[0, 0] + Ky * eps3[1, 0] + KO3z * eps3[2, 0]
            BTO = Kx * eps3[0, 1] + Ky * eps3[1, 1] + KO3z * eps3[2, 1]
            CTO_1 = inv(Kx * eps3[0, 2] + Ky * eps3[1, 2] + KO3z * eps3[2, 2])

            ATE = Kx * eps3[0, 0] + Ky * eps3[1, 0] + KE3z * eps3[2, 0]
            BTE = Kx * eps3[0, 1] + Ky * eps3[1, 1] + KE3z * eps3[2, 1]
            CTE_1 = inv(Kx * eps3[0, 2] + Ky * eps3[1, 2] + KE3z * eps3[2, 2])

            DRE = c1[1] * KE1z - c1[2] * Ky
            ERE = c1[2] * Kx - c1[0] * KE1z
            FRE = c1[0] * Ky - c1[1] * Kx

            DTE = c3[1] * KE3z - c3[2] * Ky
            ETE = c3[2] * Kx - c3[0] * KE3z
            FTE = c3[0] * Ky - c3[1] * Kx

            b = S.r_[u[0] * dlt, u[1] * dlt, (k1[1] / k * u[2] - k1[2] / k * u[1]) * dlt, (
                k1[2] / k * u[0] - k1[0] / k * u[2]) * dlt]
            Ky_CRO_1 = ky / k * CRO_1
            Ky_CRE_1 = ky / k * CRE_1
            Kx_CRO_1 = kx[:, S.newaxis] / k * CRO_1
            Kx_CRE_1 = kx[:, S.newaxis] / k * CRE_1
            MR31 = -S.dot(Ky_CRO_1, ARO)
            MR32 = -S.dot(Ky_CRO_1, BRO) - KO1z
            MR33 = -S.dot(Ky_CRE_1, ARE)
            MR34 = -S.dot(Ky_CRE_1, BRE) - KE1z
            MR41 = S.dot(Kx_CRO_1, ARO) + KO1z
            MR42 = S.dot(Kx_CRO_1, BRO)
            MR43 = S.dot(Kx_CRE_1, ARE) + KE1z
            MR44 = S.dot(Kx_CRE_1, BRE)
            MR = S.asarray(S.bmat([[I, ZERO, I, ZERO],
                                   [ZERO, I, ZERO, I],
                                   [MR31, MR32, MR33, MR34],
                                   [MR41, MR42, MR43, MR44]]))

            Ky_CTO_1 = ky / k * CTO_1
            Ky_CTE_1 = ky / k * CTE_1
            Kx_CTO_1 = kx[:, S.newaxis] / k * CTO_1
            Kx_CTE_1 = kx[:, S.newaxis] / k * CTE_1
            MT31 = -S.dot(Ky_CTO_1, ATO)
            MT32 = -S.dot(Ky_CTO_1, BTO) - KO3z
            MT33 = -S.dot(Ky_CTE_1, ATE)
            MT34 = -S.dot(Ky_CTE_1, BTE) - KE3z
            MT41 = S.dot(Kx_CTO_1, ATO) + KO3z
            MT42 = S.dot(Kx_CTO_1, BTO)
            MT43 = S.dot(Kx_CTE_1, ATE) + KE3z
            MT44 = S.dot(Kx_CTE_1, BTE)
            MT = S.asarray(S.bmat([[I, ZERO, I, ZERO],
                                   [ZERO, I, ZERO, I],
                                   [MT31, MT32, MT33, MT34],
                                   [MT41, MT42, MT43, MT44]]))

            Mp.fill(0.0)
            M.fill(0.0)

            for nlayer in range(nlayers - 2, 0, -1):  # internal layers

                layer = multilayer[nlayer]
                thickness = layer.thickness

                EPS2, EPS21 = layer.getEPSFourierCoeffs(
                    wl, n, anisotropic=True)

                # Exx = S.squeeze(EPS2[0, 0, :])
                # Exx = toeplitz(S.flipud(Exx[0:hmax + 1]), Exx[hmax:])
                Exy = S.squeeze(EPS2[0, 1, :])
                Exy = toeplitz(S.flipud(Exy[0:hmax + 1]), Exy[hmax:])
                Exz = S.squeeze(EPS2[0, 2, :])
                Exz = toeplitz(S.flipud(Exz[0:hmax + 1]), Exz[hmax:])

                Eyx = S.squeeze(EPS2[1, 0, :])
                Eyx = toeplitz(S.flipud(Eyx[0:hmax + 1]), Eyx[hmax:])
                Eyy = S.squeeze(EPS2[1, 1, :])
                Eyy = toeplitz(S.flipud(Eyy[0:hmax + 1]), Eyy[hmax:])
                Eyz = S.squeeze(EPS2[1, 2, :])
                Eyz = toeplitz(S.flipud(Eyz[0:hmax + 1]), Eyz[hmax:])

                Ezx = S.squeeze(EPS2[2, 0, :])
                Ezx = toeplitz(S.flipud(Ezx[0:hmax + 1]), Ezx[hmax:])
                Ezy = S.squeeze(EPS2[2, 1, :])
                Ezy = toeplitz(S.flipud(Ezy[0:hmax + 1]), Ezy[hmax:])
                Ezz = S.squeeze(EPS2[2, 2, :])
                Ezz = toeplitz(S.flipud(Ezz[0:hmax + 1]), Ezz[hmax:])

                Exx_1 = S.squeeze(EPS21[0, 0, :])
                Exx_1 = toeplitz(S.flipud(Exx_1[0:hmax + 1]), Exx_1[hmax:])
                Exx_1_1 = inv(Exx_1)

                # lalanne
                Ezz_1 = inv(Ezz)
                Ky_Ezz_1 = ky / k * Ezz_1
                Kx_Ezz_1 = kx[:, S.newaxis] / k * Ezz_1
                Exz_Ezz_1 = S.dot(Exz, Ezz_1)
                Eyz_Ezz_1 = S.dot(Eyz, Ezz_1)
                H11 = 1j * S.dot(Ky_Ezz_1, Ezy)
                H12 = 1j * S.dot(Ky_Ezz_1, Ezx)
                H13 = S.dot(Ky_Ezz_1, Kx)
                H14 = I - S.dot(Ky_Ezz_1, Ky)
                H21 = 1j * S.dot(Kx_Ezz_1, Ezy)
                H22 = 1j * S.dot(Kx_Ezz_1, Ezx)
                H23 = S.dot(Kx_Ezz_1, Kx) - I
                H24 = -S.dot(Kx_Ezz_1, Ky)
                H31 = S.dot(Kx, Ky) + Exy - S.dot(Exz_Ezz_1, Ezy)
                H32 = Exx_1_1 - S.dot(Ky, Ky) - S.dot(Exz_Ezz_1, Ezx)
                H33 = 1j * S.dot(Exz_Ezz_1, Kx)
                H34 = -1j * S.dot(Exz_Ezz_1, Ky)
                H41 = S.dot(Kx, Kx) - Eyy + S.dot(Eyz_Ezz_1, Ezy)
                H42 = -S.dot(Kx, Ky) - Eyx + S.dot(Eyz_Ezz_1, Ezx)
                H43 = -1j * S.dot(Eyz_Ezz_1, Kx)
                H44 = 1j * S.dot(Eyz_Ezz_1, Ky)
                H = 1j * S.diag(S.repeat(S.diag(Kz), 4)) + \
                    S.asarray(S.bmat([[H11, H12, H13, H14],
                                      [H21, H22, H23, H24],
                                      [H31, H32, H33, H34],
                                      [H41, H42, H43, H44]]))

                q, W = eig(H)
                W1, W2, W3, W4 = S.split(W, 4)

                #
                # boundary conditions
                #
                # x = [R T]
                # R = [ROx ROy REx REy]
                # T = [TOx TOy TEx TEy]
                # b + MR.R = M1p.c
                # M1.c = M2p.c
                # ...
                # ML.c = MT.T
                # therefore: b + MR.R = (M1p.M1^-1.M2p.M2^-1. ...).MT.T
                # missing equations from (46)..(49) in glytsis_rigorous
                # [b] = [-MR Mtot.MT] [R]
                # [0]   [...........] [T]

                z = S.zeros_like(q)
                z[S.where(q.real > 0)] = -thickness
                D = S.exp(k * q * z)
                Sy0 = W1 * D[S.newaxis, :]
                Sx0 = W2 * D[S.newaxis, :]
                Uy0 = W3 * D[S.newaxis, :]
                Ux0 = W4 * D[S.newaxis, :]

                z = thickness * S.ones_like(q)
                z[S.where(q.real > 0)] = 0
                D = S.exp(k * q * z)
                D1 = S.exp(-1j * k2i[2, :] * thickness)
                Syd = D1[:, S.newaxis] * W1 * D[S.newaxis, :]
                Sxd = D1[:, S.newaxis] * W2 * D[S.newaxis, :]
                Uyd = D1[:, S.newaxis] * W3 * D[S.newaxis, :]
                Uxd = D1[:, S.newaxis] * W4 * D[S.newaxis, :]

                Mp[:, :, nlayer] = S.r_[Sx0, Sy0, -1j * Ux0, -1j * Uy0]
                M[:, :, nlayer] = S.r_[Sxd, Syd, -1j * Uxd, -1j * Uyd]

            Mtot = S.eye(4 * nood, dtype=complex)
            for nlayer in range(1, nlayers - 1):
                Mtot = S.dot(
                    S.dot(Mtot, Mp[:, :, nlayer]), inv(M[:, :, nlayer]))

            BC_b = S.r_[b, S.zeros_like(b)]
            BC_A1 = S.c_[-MR, S.dot(Mtot, MT)]
            BC_A2 = S.asarray(S.bmat(
                [[(c1[0] * I - c1[2] * S.dot(CRO_1, ARO)), (c1[1] * I - c1[2] * S.dot(CRO_1, BRO)), ZERO, ZERO, ZERO,
                  ZERO, ZERO, ZERO],
                 [ZERO, ZERO, (DRE - S.dot(S.dot(FRE, CRE_1), ARE)), (ERE - S.dot(S.dot(FRE, CRE_1), BRE)), ZERO, ZERO,
                  ZERO, ZERO],
                 [ZERO, ZERO, ZERO, ZERO, (c3[0] * I - c3[2] * S.dot(CTO_1, ATO)),
                  (c3[1] * I - c3[2] * S.dot(CTO_1, BTO)), ZERO, ZERO],
                 [ZERO, ZERO, ZERO, ZERO, ZERO, ZERO, (DTE - S.dot(S.dot(FTE, CTE_1), ATE)),
                  (ETE - S.dot(S.dot(FTE, CTE_1), BTE))]]))

            BC_A = S.r_[BC_A1, BC_A2]

            x = linsolve(BC_A, BC_b)

            ROx, ROy, REx, REy, TOx, TOy, TEx, TEy = S.split(x, 8)

            ROz = -S.dot(CRO_1, (S.dot(ARO, ROx) + S.dot(BRO, ROy)))
            REz = -S.dot(CRE_1, (S.dot(ARE, REx) + S.dot(BRE, REy)))
            TOz = -S.dot(CTO_1, (S.dot(ATO, TOx) + S.dot(BTO, TOy)))
            TEz = -S.dot(CTE_1, (S.dot(ATE, TEx) + S.dot(BTE, TEy)))

            denom = (k1[2] - S.dot(u, k1) * u[2]).real
            DEO1[:, iwl] = -((S.absolute(ROx) ** 2 + S.absolute(ROy) ** 2 + S.absolute(ROz) ** 2) * S.conj(kO1i[2, :]) -
                             (ROx * kO1i[0, :] + ROy * kO1i[1, :] + ROz * kO1i[2, :]) * S.conj(ROz)).real / denom
            DEE1[:, iwl] = -((S.absolute(REx) ** 2 + S.absolute(REy) ** 2 + S.absolute(REz) ** 2) * S.conj(kE1i[2, :]) -
                             (REx * kE1i[0, :] + REy * kE1i[1, :] + REz * kE1i[2, :]) * S.conj(REz)).real / denom
            DEO3[:, iwl] = ((S.absolute(TOx) ** 2 + S.absolute(TOy) ** 2 + S.absolute(TOz) ** 2) * S.conj(kO3i[2, :]) -
                            (TOx * kO3i[0, :] + TOy * kO3i[1, :] + TOz * kO3i[2, :]) * S.conj(TOz)).real / denom
            DEE3[:, iwl] = ((S.absolute(TEx) ** 2 + S.absolute(TEy) ** 2 + S.absolute(TEz) ** 2) * S.conj(kE3i[2, :]) -
                            (TEx * kE3i[0, :] + TEy * kE3i[1, :] + TEz * kE3i[2, :]) * S.conj(TEz)).real / denom

        # save the results
        self.DEO1 = DEO1
        self.DEE1 = DEE1
        self.DEO3 = DEO3
        self.DEE3 = DEE3

        return self
예제 #37
0
               5.4428, 3.7731, 5.7653, 5.8442, 5.6682, 1.3157, 3.9510, 5.6332,
               3.7770, 5.8015, 3.9457, 3.8990, 1.3182, 3.9404, 5.8320, 3.8812,
               3.9210, 5.8370, 11.8177, 12.4743, 5.6734, 3.9644, 12.6866,
               12.4557, 11.8555, 5.7780, 12.2669, 3.9627, 3.9002, 12.6020,
               12.6091, 3.9517, 12.2001, 5.8190, 12.6265, 12.4970, 12.4883,
               3.9585, 12.2793, 12.4807, 12.5836, 12.5252, 12.5256, 12.5007,
               107.5003, 12.5127, 124.3039, 260.0374, 301.5136, 229.1056,
               512.7942, 286.7219, 595.2381, 321.8228, 545.2265, 682.1748,
               1070.0909, 1338.6881, 766.2248, 1505.1174, 3374.9577, 4644.6817,
               6583.2783, 9090.9089, 7380.0736, 14430.0141)

# Locations of Ca 16+ lines (in nanometers):
CA_16_LINES = (19.2858, )

# Combined Ca 16+, 17+ lines (in nanometers):
CA_LINES = scipy.asarray(CA_17_LINES + CA_16_LINES)

# POS vector for XEUS:
XEUS_POS = [2.561, 0.2158, 0.196, 0.1136]


class VUVData(object):
    """Helper object to load and process the VUV data.
    
    Execution proceeds as follows:
    
    * Loads the XEUS data.
    * Allows user to select lines, background subtraction intervals.
    * Loads the LoWEUS data.
    * Allows the user to select lines, background subtraction intervals.
    * Computes the normalization factors.
예제 #38
0
파일: constants.py 프로젝트: cez026/phydms
                CODON_TO_INDEX[codon] = i
                INDEX_TO_CODON[i] = codon
                CODON_TO_AA.append(AA_TO_INDEX[aa])
                i += 1
            else:
                STOP_CODON_TO_NT_INDICES.append(
                    scipy.zeros((3, N_NT), dtype='float'))
                STOP_CODON_TO_NT_INDICES[-1][0][NT_TO_INDEX[nt1]] = 1.0
                STOP_CODON_TO_NT_INDICES[-1][1][NT_TO_INDEX[nt2]] = 1.0
                STOP_CODON_TO_NT_INDICES[-1][2][NT_TO_INDEX[nt3]] = 1.0
                STOP_POSITIONS[0][NT_TO_INDEX[nt1]] = -1.0
                STOP_POSITIONS[1][NT_TO_INDEX[nt2]] = -1.0
                STOP_POSITIONS[2][NT_TO_INDEX[nt3]] = -1.0
                N_STOP += 1

STOP_CODON_TO_NT_INDICES = scipy.asarray(STOP_CODON_TO_NT_INDICES)

N_CODON = len(CODON_TO_INDEX)
CODON_TO_AA = scipy.array(CODON_TO_AA, dtype='int')
assert len(CODON_TO_INDEX) == len(INDEX_TO_CODON) == len(
    CODON_TO_AA) == N_CODON

PURINES = frozenset(['A', 'G'])
PYRIMIDINES = frozenset(['C', 'T'])
assert PURINES.union(PYRIMIDINES) == frozenset(NT_TO_INDEX.keys())

CODON_TRANSITION = scipy.full((N_CODON, N_CODON), False, dtype='bool')
CODON_SINGLEMUT = scipy.full((N_CODON, N_CODON), False, dtype='bool')
CODON_NT_MUT = scipy.full((N_NT, N_CODON, N_CODON), False, dtype='bool')
CODON_NT = scipy.full((3, N_NT, N_CODON), False, dtype='bool')
CODON_NT_INDEX = scipy.full((3, N_CODON), -1, dtype='int')
예제 #39
0
    
    hd = h5py.File('/kyb/agbs/stegle/work/projects/warpedlmm/data/Nordborg_data.h5py','r')
    geno = hd['geno']
    pheno = hd['pheno']

    phenotype_names = hd['pheno/phenotype_names'][:]
    Npheno = phenotype_names.shape[0]
    #2. geno/pheno
    
    geno_index = pheno['geno_index'][:]
    #resort in increasing order
    Is = geno_index.argsort()
    geno_index = geno_index[Is]

    Y = pheno['Y'][:][Is]
    X = SP.asarray(geno['x'][:,geno_index][:].T,dtype='float')
    #center genotype
    X-=X.mean()
    X/-X.std()

    ip = 7
    y_ = Y[:,ip:ip+5]
    Iok = (~SP.isnan(y_)).all(axis=1)
    y_ = y_[Iok]
    X_ = X[Iok,::1]
    K = 1./X_.shape[1]*SP.dot(X_,X_.T)
    C_ = SP.ones([X_.shape[0],1])
    
    #standardize
    y_-=y_.mean(axis=0)
    y_/=y_.std(axis=0)
    def evaluate_interpolated_traj(self,dv_id,time,subinterval=None,der=0) :
        """ Needs Trajectory.build_interpolated_traj() to be called first

        Arguments:
        dvid         the name of the component of the trajectory you wish to 
                     evaluate
        time         a vector of times or a scalar
        subinterval  an optional argument specifying the time interval 
                     between events that the time argument lies (but given a 
                     valid time, it will be found automatically)
        der          the derivative of the spline function you want, the order
                     of the derivative will be constrained by the order of the 
                     interpolated spline
        Outputs:
        A single scalar value (if time input is a scalar)
        or
        (returned_times, interpolated_trajectory at those times) if times is a
        vector

        Note: It is necessary to have a returned_times argument too, in case 
              the times passed in happens to have a timepoint that corresponds 
              to an event time, which often has two trajectory values associated
              with it.
        """
        if scipy.isscalar(time) :
            time = scipy.asarray([time]) # if a scalar was passed in, convert to an array
        else :
            time = scipy.asarray(time)
        local_tcks = self.tcks
        sorted_intervals = scipy.sort(local_tcks.keys(),axis=0)

        if subinterval is not None : # confine things to just one interval
            if subinterval not in local_tcks.keys() :
                raise "Not a valid subinterval (not in Trajectory.tcks.keys())"
            else :
                sorted_intervals = [[subinterval[0],subinterval[1]]]
                interval_start_ind = 0
                interval_end_ind = 0
        else :
            # sorted_intervals ends up being a list of lists, each length 2, not tuples anymore
            for interval_ind, interval in enumerate(sorted_intervals) :
                start_time, end_time = interval[0],interval[1]
                if (time[0] >= start_time) :
                    interval_start_ind = interval_ind
                if (time[-1] <= end_time) :
                    interval_end_ind = interval_ind
                    break

        dv_y = []
        returned_times = []
        dv_ind = self.key_column.keyToIndex[dv_id]
        for interval in sorted_intervals[interval_start_ind:(interval_end_ind+1)] :
            currTimes = scipy.compress( scipy.logical_and((time>=interval[0]),(time<=interval[1])) , time )
            startslice, endslice = 0, None
            if len(currTimes) > 1 :
                if (currTimes[0]==currTimes[1]) :
                # skip the first time point because it's repeated
                    startslice = 1
                if (currTimes[-1]==currTimes[-2]) :
                # skip the last time point because it's repeated
                    endslice = -1
                dv_y.extend( scipy.interpolate.splev(currTimes[startslice:endslice],
                                local_tcks[(interval[0],interval[1])][dv_ind],der=der) )
                returned_times.extend(currTimes[startslice:endslice])
            elif len(currTimes) == 1: # explicitly check, because len(currTimes) could = 0
                dv_y.extend( [ scipy.interpolate.splev(currTimes, local_tcks[(interval[0],interval[1])][dv_ind],der=der) ])
                returned_times.extend(currTimes[startslice:endslice])

        if len(returned_times) == 1 :
            return dv_y[0]
        else :
            return returned_times,dv_y
예제 #41
0
    def test1_excel_curve2nrml(self):
        dirs = determine_this_file_path()
        excel_file = 'synthetic_data_Flood_2012.xls'
        excel_file = os.path.join(dirs, excel_file)
        contents_filename = 'contents_synthetic.xml'
        fabric_filename = 'fabric_synthetic.xml'
        create_vuln_xml.excel_curve2nrml(contents_filename, fabric_filename,
                                         excel_file)
        # load in the xml file to see if it's ok.
        vuln_sets = vuln_sets_from_xml_file(contents_filename)

        skey = create_vuln_xml.FLOOD_HOUSE_CONTENTS
        self.assertTrue(allclose(vuln_sets[skey].intensity_measure_level,
                                 asarray([0, 1])))
        self.assertEqual(vuln_sets[skey].intensity_measure_type,
                         "water depth m")
        self.assertEqual(vuln_sets[skey].vulnerability_set_id, skey)
        self.assertEqual(vuln_sets[skey].asset_category, "")
        self.assertEqual(vuln_sets[skey].loss_category,
                         "contents_loss_ratio")

        act_cont = {
            u'FCM1_INSURED_SAVE': array([0., 0.2]),
            u'FCM1_INSURED_NOACTION': array([0., 0.3]),
            u'FCM1_INSURED_EXPOSE': array([0., 0.4]),
            u'FCM1_UNINSURED_SAVE': array([0., 0.6]),
            u'FCM1_UNINSURED_NOACTION': array([0., 0.7]),
            u'FCM1_UNINSURED_EXPOSE': array([0., 0.8]),
            u'FCM2_INSURED_SAVE': array([0., 0.22]),
            u'FCM2_INSURED_NOACTION': array([0., 0.32]),
            u'FCM2_INSURED_EXPOSE': array([0., 0.42]),
            u'FCM2_UNINSURED_SAVE': array([0., 0.62]),
            u'FCM2_UNINSURED_NOACTION': array([0., 0.72]),
            u'FCM2_UNINSURED_EXPOSE': array([0., 0.82])
        }

        for key in act_cont:
            vul_funct = vuln_sets[skey].vulnerability_functions[key]
            self.assertTrue(allclose(vul_funct.mean_loss,
                                     act_cont[key]))
            self.assertTrue(allclose(vul_funct.coefficient_of_variation,
                                     array([0., 0.])))

        vuln_sets = vuln_sets_from_xml_file(fabric_filename)

        skey = create_vuln_xml.FLOOD_HOUSE_FABRIC
        self.assertTrue(allclose(vuln_sets[skey].intensity_measure_level,
                                 asarray([0, 1])))
        self.assertEqual(vuln_sets[skey].intensity_measure_type,
                         "water depth m")
        self.assertEqual(vuln_sets[skey].vulnerability_set_id, skey)
        self.assertEqual(vuln_sets[skey].asset_category, "")
        self.assertEqual(vuln_sets[skey].loss_category,
                         "structural_loss_ratio")
        actually_fab = {u'FCM1_INSURED': array([0., 0.1]),
                        u'FCM2_INSURED': array([0., 0.12]),
                        u'FCM1_UNINSURED': array([0., 0.5]),
                        u'FCM2_UNINSURED': array([0., 0.52])}

        for key in actually_fab:
            vul_funct = vuln_sets[skey].vulnerability_functions[key]
            self.assertTrue(allclose(vul_funct.mean_loss,
                                     actually_fab[key]))
            self.assertTrue(allclose(vul_funct.coefficient_of_variation,
                                     array([0., 0.])))

        os.remove(contents_filename)
        os.remove(fabric_filename)
예제 #42
0
    def multi_epoch(self,
                    velocity,
                    sigvel,
                    mass,
                    dates,
                    pfalse=1e-4,
                    log_minv=-3,
                    log_maxv=4,
                    log_stepv=0.02):
        """Returns a callable Basefitter which computes the log-likelihood to reproduce the observed multi-epoch radial velocity distribution.

        Uses the current settings of the binary properties to calculate the distribution of radial velocity offsets due to binary orbital motions.

        Arguments:
        - `velocity`: list-like with for every star an array-like containing the observed radial velocities in km/s.
        - `sigvel`: list-like with for every star an array-like containing the measurement uncertainties in km/s.
        - `mass`: 1D array-like (or single number) giving best estimate for mass of the observed stars in solar masses.
        - `dates`: list-like with for every star an array-like containing the date of observations in years.
        - `pfalse`: probability of false detection (i.e. detecting a single star as a binary). Lowering this will decrease the number of detected binaries.
        - `log_minv`: 10_log of the lowest velocity bin in km/s (should be significantly smaller than the velocity dispersion).
        - `log_maxv`: 10_log maximum of the largest velocity bin.
        - `log_stepv`: step size in 10_log(velocity) space.
        """
        if sp.asarray(mass).ndim == 0:
            mass = [mass] * len(velocity)
        for test_length in ('sigvel', 'mass', 'dates'):
            if len(locals()[test_length]) != len(velocity):
                raise ValueError(
                    '%s does not have the same length as the velocity list' %
                    test_length)
        unique_dates = sp.unique(reduce(sp.append, dates))
        vbin = {}
        for date in unique_dates:
            vbin.update({date: self.velocity(1., time=date)[0]})

        vmean = []
        sigmean = []
        single_mass = []
        pdet_single = []
        pdet_rvvar = []
        pbin = []
        is_single = []
        vbord = 10**sp.arange(log_minv, log_maxv, log_stepv)
        vbound = sp.append(-vbord[::-1], sp.append(0, vbord))

        for mult_vel, mult_sigvel, pmass, epochs in zip(
                velocity, sigvel, mass, dates):
            epochs, mult_vel, mult_sigvel = sp.broadcast_arrays(
                epochs, mult_vel, mult_sigvel)
            if epochs.size == 1:
                mean_rv = mult_vel[0]
                mean_sig = mult_sigvel[0]
                rv_binoffset = vbin[
                    epochs[0]]  # + sp.randn(self.size) * mult_sigvel[0]
                pdet = 0.
                rvvariable = False
            else:
                weight = mult_sigvel**-2
                rv_offset_per_epoch = sp.zeros((self.size, len(epochs)))
                rv_binoffset_per_epoch = sp.zeros((self.size, len(epochs)))
                for ixepoch, (date, sv) in enumerate(zip(epochs, mult_sigvel)):
                    rv_binoffset_per_epoch[:, ixepoch] = vbin[date]
                    rv_offset_per_epoch[:,
                                        ixepoch] = rv_binoffset_per_epoch[:, ixepoch] * pmass**(
                                            1. / 3.) + sp.randn(self.size) * sv
                rv_offset_mean = sp.sum(
                    rv_offset_per_epoch * weight[sp.newaxis, :],
                    -1) / sp.sum(weight)
                chisq = sp.sum(
                    (rv_offset_per_epoch - rv_offset_mean[:, sp.newaxis])**2. *
                    weight[sp.newaxis, :], -1)
                isdetected = sp.stats.chisqprob(chisq,
                                                len(epochs) - 1) < pfalse
                pdet = np.float(sp.sum(isdetected)) / isdetected.size
                rv_binoffset = (sp.sum(
                    rv_binoffset_per_epoch * weight[sp.newaxis, :], -1) /
                                sp.sum(weight))[~isdetected]

                mean_rv = sp.sum(mult_vel * weight) / sp.sum(weight)
                mean_sig = sp.sum(weight)**-.5
                rvvariable = sp.stats.chisqprob(
                    sp.sum((mean_rv - mult_vel)**2 * weight),
                    len(epochs) - 1) < pfalse
            if rvvariable:
                pdet_rvvar.append(pdet)
            else:
                vmean.append(mean_rv)
                sigmean.append(mean_sig)
                pdet_single.append(pdet)
                single_mass.append(pmass)
                prob_bin = sp.histogram(
                    abs(rv_binoffset), bins=sp.append(
                        0, vbord))[0] * 1. / rv_binoffset.size
                pbin.append(
                    sp.append(prob_bin[::-1], prob_bin) / 2. /
                    (vbound[1:] - vbound[:-1]))
            is_single.append(not rvvariable)
        pbin = sp.array(pbin).T
        vmean = sp.array(vmean)
        sigmean = sp.array(sigmean)
        single_mass = sp.array(single_mass)
        pdet_single = sp.array(pdet_single)
        pdet_rvvar = sp.array(pdet_rvvar)
        is_single = sp.array(is_single, dtype='bool')

        return fitter.BinaryFit(vmean, sigmean, single_mass, vbound, pbin,
                                pdet_single, pdet_rvvar, is_single)
예제 #43
0
# Load data set
X,y=rt.get_samples_from_roi('../Data/university.tif','../Data/university_gt.tif')
wave = sp.loadtxt('../Data/waves.csv',delimiter=',') 

# Select the same number of samples
nt = 900
xt,yt=[],[]
for i in sp.unique(y):
    t = sp.where(y==i)[0]
    nc = t.size
    rp =  sp.random.permutation(nc)
    xt.extend(X[t[rp[0:nt]],:])
    yt.extend(y[t[rp[0:nt]]])

xt = sp.asarray(xt)
yt = sp.asarray(yt)

# Do FFFS
maxVar = 12
model = npfs.GMMFeaturesSelection()
model.learn_gmm(xt,yt)
idx, crit, [] = model.selection('forward',xt, yt,criterion='kappa', varNb=maxVar, nfold=5)

for i in range(maxVar):
    print "({0},{1})".format(wave[idx[i]],crit[i])

for i in range(maxVar):
    print "({0},{1})".format(i+1,crit[i])

# Save selected feature
예제 #44
0
 def addSample(self, train, target):
     self.trainx = r_[self.trainx, asarray([train])]
     self.trainy = r_[self.trainy, asarray(target)]
     self.noise = r_[self.noise, array([0.001])]
     self.calculated = False
예제 #45
0
    def solve(self, wls):
        """Isotropic solver.

        INPUT
        wls = wavelengths to scan (any asarray-able object).

        OUTPUT
        self.DE1, self.DE3 = power reflected and transmitted.

        NOTE
        see:
        Moharam, "Formulation for stable and efficient implementation
        of the rigorous coupled-wave analysis of binary gratings",
        JOSA A, 12(5), 1995
        Lalanne, "Highly improved convergence of the coupled-wave
        method for TM polarization", JOSA A, 13(4), 1996
        Moharam, "Stable implementation of the rigorous coupled-wave
        analysis for surface-relief gratings: enhanced trasmittance
        matrix approach", JOSA A, 12(5), 1995
        """

        self.wls = S.atleast_1d(wls)

        LAMBDA = self.LAMBDA
        n = self.n
        multilayer = self.multilayer
        alpha = self.alpha
        delta = self.delta
        psi = self.psi
        phi = self.phi

        nlayers = len(multilayer)
        i = S.arange(-n, n + 1)
        nood = 2 * n + 1
        hmax = nood - 1

        # grating vector (on the xz plane)
        # grating on the xy plane
        K = 2 * pi / LAMBDA * \
            S.array([S.sin(phi), 0., S.cos(phi)], dtype=complex)

        DE1 = S.zeros((nood, self.wls.size))
        DE3 = S.zeros_like(DE1)

        dirk1 = S.array([S.sin(alpha) * S.cos(delta),
                         S.sin(alpha) * S.sin(delta),
                         S.cos(alpha)])

        # usefull matrices
        I = S.eye(i.size)
        I2 = S.eye(i.size * 2)
        ZERO = S.zeros_like(I)

        X = S.zeros((2 * nood, 2 * nood, nlayers), dtype=complex)
        MTp1 = S.zeros((2 * nood, 2 * nood, nlayers), dtype=complex)
        MTp2 = S.zeros_like(MTp1)

        EPS2 = S.zeros(2 * hmax + 1, dtype=complex)
        EPS21 = S.zeros_like(EPS2)

        dlt = (i == 0).astype(int)

        for iwl, wl in enumerate(self.wls):

            # free space wavevector
            k = 2 * pi / wl

            n1 = multilayer[0].mat.n(wl).item()
            n3 = multilayer[-1].mat.n(wl).item()

            # incident plane wave wavevector
            k1 = k * n1 * dirk1

            # all the other wavevectors
            tmp_x = k1[0] - i * K[0]
            tmp_y = k1[1] * S.ones_like(i)
            tmp_z = dispersion_relation_ordinary(tmp_x, tmp_y, k, n1)
            k1i = S.r_[[tmp_x], [tmp_y], [tmp_z]]

            # k2i = S.r_[[k1[0] - i*K[0]], [k1[1] - i * K[1]], [-i * K[2]]]

            tmp_z = dispersion_relation_ordinary(tmp_x, tmp_y, k, n3)
            k3i = S.r_[[k1i[0, :]], [k1i[1, :]], [tmp_z]]

            # aliases for constant wavevectors
            kx = k1i[0, :]
            ky = k1[1]

            # angles of reflection
            # phi_i = S.arctan2(ky,kx)
            phi_i = S.arctan2(ky, kx.real)  # OKKIO

            Kx = S.diag(kx / k)
            Ky = ky / k * I
            Z1 = S.diag(k1i[2, :] / (k * n1 ** 2))
            Y1 = S.diag(k1i[2, :] / k)
            Z3 = S.diag(k3i[2, :] / (k * n3 ** 2))
            Y3 = S.diag(k3i[2, :] / k)
            # Fc = S.diag(S.cos(phi_i))
            fc = S.cos(phi_i)
            # Fs = S.diag(S.sin(phi_i))
            fs = S.sin(phi_i)

            MR = S.asarray(S.bmat([[I, ZERO],
                                   [-1j * Y1, ZERO],
                                   [ZERO, I],
                                   [ZERO, -1j * Z1]]))

            MT = S.asarray(S.bmat([[I, ZERO],
                                   [1j * Y3, ZERO],
                                   [ZERO, I],
                                   [ZERO, 1j * Z3]]))

            # internal layers (grating or layer)
            X.fill(0.0)
            MTp1.fill(0.0)
            MTp2.fill(0.0)
            for nlayer in range(nlayers - 2, 0, -1):  # internal layers

                layer = multilayer[nlayer]
                d = layer.thickness

                EPS2, EPS21 = layer.getEPSFourierCoeffs(
                    wl, n, anisotropic=False)

                E = toeplitz(EPS2[hmax::-1], EPS2[hmax:])
                E1 = toeplitz(EPS21[hmax::-1], EPS21[hmax:])
                E11 = inv(E1)
                # B = S.dot(Kx, linsolve(E,Kx)) - I
                B = kx[:, S.newaxis] / k * linsolve(E, Kx) - I
                # A = S.dot(Kx, Kx) - E
                A = S.diag((kx / k) ** 2) - E

                # Note: solution bug alfredo
                # randomizzo Kx un po' a caso finche' cond(A) e' piccolo (<1e10)
                # soluzione sporca... :-(
                # per certi kx, l'operatore di helmholtz ha 2 autovalori nulli e A, B
                # non sono invertibili --> cambio leggermente i kx... ma dovrei invece
                # trattare separatamente (analiticamente) questi casi
                if cond(A) > 1e10:
                    warning('BAD CONDITIONING: randomization of kx')
                    while cond(A) > 1e10:
                        Kx = Kx * (1 + 1e-9 * S.rand())
                        B = kx[:, S.newaxis] / k * linsolve(E, Kx) - I
                        A = S.diag((kx / k) ** 2) - E

                if S.absolute(K[2] / k) > 1e-10:

                    raise ValueError(
                        'First Order Helmholtz Operator not implemented, yet!')

                elif ky == 0 or S.allclose(S.diag(Ky / ky * k), 1):

                    # lalanne
                    # H_U_reduced = S.dot(Ky, Ky) + A
                    H_U_reduced = (ky / k) ** 2 * I + A
                    # H_S_reduced = S.dot(Ky, Ky) + S.dot(Kx, linsolve(E, S.dot(Kx, E11))) - E11
                    H_S_reduced = (ky / k) ** 2 * I + kx[:, S.newaxis] / k * linsolve(E,
                                                                                      kx[:, S.newaxis] / k * E11) - E11

                    q1, W1 = eig(H_U_reduced)
                    q1 = S.sqrt(q1)
                    q2, W2 = eig(H_S_reduced)
                    q2 = S.sqrt(q2)

                    # boundary conditions

                    # V11 = S.dot(linsolve(A, W1), S.diag(q1))
                    V11 = linsolve(A, W1) * q1[S.newaxis, :]
                    V12 = (ky / k) * S.dot(linsolve(A, Kx), W2)
                    V21 = (ky / k) * S.dot(linsolve(B, Kx), linsolve(E, W1))
                    # V22 = S.dot(linsolve(B, W2), S.diag(q2))
                    V22 = linsolve(B, W2) * q2[S.newaxis, :]

                    # Vss = S.dot(Fc, V11)
                    Vss = fc[:, S.newaxis] * V11
                    # Wss = S.dot(Fc, W1)  + S.dot(Fs, V21)
                    Wss = fc[:, S.newaxis] * W1 + fs[:, S.newaxis] * V21
                    # Vsp = S.dot(Fc, V12) - S.dot(Fs, W2)
                    Vsp = fc[:, S.newaxis] * V12 - fs[:, S.newaxis] * W2
                    # Wsp = S.dot(Fs, V22)
                    Wsp = fs[:, S.newaxis] * V22
                    # Wpp = S.dot(Fc, V22)
                    Wpp = fc[:, S.newaxis] * V22
                    # Vpp = S.dot(Fc, W2)  + S.dot(Fs, V12)
                    Vpp = fc[:, S.newaxis] * W2 + fs[:, S.newaxis] * V12
                    # Wps = S.dot(Fc, V21) - S.dot(Fs, W1)
                    Wps = fc[:, S.newaxis] * V21 - fs[:, S.newaxis] * W1
                    # Vps = S.dot(Fs, V11)
                    Vps = fs[:, S.newaxis] * V11

                    Mc2bar = S.asarray(S.bmat([[Vss, Vsp, Vss, Vsp],
                                               [Wss, Wsp, -Wss, -Wsp],
                                               [Wps, Wpp, -Wps, -Wpp],
                                               [Vps, Vpp, Vps, Vpp]]))

                    x = S.r_[S.exp(-k * q1 * d), S.exp(-k * q2 * d)]

                    # Mc1 = S.dot(Mc2bar, S.diag(S.r_[S.ones_like(x), x]))
                    xx = S.r_[S.ones_like(x), x]
                    Mc1 = Mc2bar * xx[S.newaxis, :]

                    X[:, :, nlayer] = S.diag(x)

                    MTp = linsolve(Mc2bar, MT)
                    MTp1[:, :, nlayer] = MTp[0:2 * nood, :]
                    MTp2 = MTp[2 * nood:, :]

                    MT = S.dot(
                        Mc1, S.r_[
                            I2, S.dot(
                                MTp2, linsolve(
                                    MTp1[
                                        :, :, nlayer], X[
                                        :, :, nlayer]))])

                else:

                    ValueError(
                        'Second Order Helmholtz Operator not implemented, yet!')

            # M = S.asarray(S.bmat([-MR, MT]))
            M = S.c_[-MR, MT]
            b = S.r_[S.sin(psi) * dlt,
                     1j * S.sin(psi) * n1 * S.cos(alpha) * dlt,
                     -1j * S.cos(psi) * n1 * dlt,
                     S.cos(psi) * S.cos(alpha) * dlt]

            x = linsolve(M, b)
            R, T = S.split(x, 2)
            Rs, Rp = S.split(R, 2)
            for ii in range(1, nlayers - 1):
                T = S.dot(linsolve(MTp1[:, :, ii], X[:, :, ii]), T)
            Ts, Tp = S.split(T, 2)

            DE1[:, iwl] = (k1i[2, :] / (k1[2])).real * S.absolute(Rs) ** 2 + \
                          (k1i[2, :] / (k1[2] * n1 ** 2)).real * \
                S.absolute(Rp) ** 2
            DE3[:, iwl] = (k3i[2, :] / (k1[2])).real * S.absolute(Ts) ** 2 + \
                          (k3i[2, :] / (k1[2] * n3 ** 2)).real * \
                S.absolute(Tp) ** 2

        # save the results
        self.DE1 = DE1
        self.DE3 = DE3

        return self
예제 #46
0
def arcwave2(arc, arcmodel, scale, order=3, bcutoff=2e3, rcutoff=1e4):
    from scipy import ndimage, stats, interpolate, signal, optimize
    import pylab

    arc = arc.copy()
    arc[scipy.isnan(arc)] = 1.
    arc[arc <= 1.] = arc.mean()

    wave, model = arcmodel['orig']
    model[scipy.isnan(model)] = 0.
    wave = wave.copy()
    model = model.copy()
    cond = (wave > bcutoff) & (wave < rcutoff)
    corrmodel = model.copy()
    corrmodel[(wave < bcutoff) | (wave > rcutoff)] = 0.

    corr = signal.correlate(arc, corrmodel, mode='valid')
    offset = corr.argmax()

    lines = arcmodel['lines'].copy()
    bc = lines.min() - scale * 20.
    rc = lines.max() + scale * 20.

    x = scipy.arange(arc[offset:].size)
    w = wave[offset:offset + x.size].copy()
    cond = (w > bc) & (w < rc)
    fit = scipy.empty((x[cond].size, 2))
    fit[:, 0] = x[cond].copy()
    fit[:, 1] = w[cond].copy()
    pars = sf.lsqfit(fit, 'polynomial', 1)

    pars['coeff'][0] = wave[offset]
    pars['coeff'][1] = scale
    #    pars = [wave[offset],scale]
    #    for i in range(2,order+1):
    #        pars.append(1e-5**i)
    pylab.plot(wave, model)
    w = sf.genfunc(scipy.arange(x.size), 0., pars)
    pylab.plot(w, interpolate.splev(w, arcmodel['matched']))
    pylab.show()

    def arcfit(p, x, arc, mod):
        fit = {'coeff': scipy.atleast_2d(p).T, 'type': 'polynomial'}
        w = sf.genfunc(x, 0., fit)
        cond = (w > bcutoff) & (w < rcutoff)
        m = interpolate.splev(w[cond], mod)
        chi = (m - arc[cond]) / abs(arc[cond])**0.5
        return chi

    widearc = ndimage.gaussian_filter(arc, 7.)
    x = scipy.arange(arc[offset:].size)
    coeff, ier = optimize.leastsq(
        arcfit,
        pars, (x, widearc[offset:].copy(), arcmodel['wide']),
        maxfev=100000)

    fit = {'coeff': scipy.atleast_2d(coeff).T, 'type': 'polynomial'}

    x = scipy.arange(arc.size)
    l = get_lines(x, arc, nstd=15.)
    lw = sf.genfunc(l - offset, 0., fit)
    lines = []
    for i in range(l.size):
        diff = abs(lw[i] - arcmodel['lines'])
        if diff.min() > 5. * scale:
            continue
        lines.append([l[i], arcmodel['lines'][diff.argmin()]])
    fit = sf.lsqfit(scipy.asarray(lines), 'polynomial', order)

    pars = fit['coeff'].flatten()
    coeff, ier = optimize.leastsq(
        arcfit,
        pars, (x[offset:], arc[offset:], arcmodel['matched']),
        maxfev=100000)

    fit = {'coeff': scipy.atleast_2d(coeff).T, 'type': 'polynomial'}

    return fit
예제 #47
0
    def fit(self, X, y):
        """
        The Gaussian Process model fitting method.

        Parameters
        ----------
        X : double array_like
            An array with shape (n_samples, n_features) with the input at which
            observations were made.

        y : double array_like
            An array with shape (n_samples, ) or shape (n_samples, n_targets)
            with the observations of the output to be predicted.

        Returns
        -------
        gp : self
            A fitted Gaussian Process model object awaiting data to perform
            predictions.
        """

        K = self.calc_kernel_matrix(X)
        # # Force data to 2D numpy.array
        X = array2d(X)
        n_samples, n_features = X.shape
        y = sp.asarray(y)
        self.y_ndim_ = y.ndim
        if y.ndim == 1:
            y = y[:, sp.newaxis]
        _, n_targets = y.shape

        # # Normalise output data or not
        if self.normalise == 1:
            y_mean = sp.mean(y, axis=0)
            y_std = sp.std(y, axis=0)
            y_std[y_std == 0.] = 1.
            y = (y - y_mean) / y_std
        else:
            y_mean = 0.0
            y_std = 1.0

        err = 'Dummy error message'
        inverse = K + self.nugget * sp.ones(n_samples)
        try:
            # print "is symmetric", Cholesky.isSymmetric(inverse)
            # upper_triang = Cholesky.Cholesky(inverse)
            # inverse = Cholesky.CholeskyInverse(upper_triang)
            inverse = LA.inv(inverse)
        except LA.LinAlgError as err:
            print "inv failed: %s. Switching to pinvh" % err
            try:
                inverse = LA.pinvh(inverse)
            except LA.LinAlgError as err:
                print "pinvh failed: %s. Switching to pinv2" % err
                try:
                    inverse = LA.pinv2(inverse)
                except LA.LinAlgError as err:
                    print "pinv2 failed: %s. Failed to invert matrix." % err
                    inverse = None

        # alpha is the vector of regression coefficients of GaussianProcess
        alpha = sp.dot(inverse, y)

        self.y = y
        self.y_mean, self.y_std = y_mean, y_std
        if not self.low_memory:
            self.inverse = inverse
        self.alpha = sp.array(alpha)
예제 #48
0
def arcwave(sky, arc, arcmodel, skymodel, scale, order):
    from scipy import ndimage, stats, interpolate, optimize
    sky = sky.copy()
    arc = arc.copy()

    sky = scipy.median(sky, 0)

    x = scipy.arange(sky.size)
    x_orig = x.copy()

    wave = scipy.arange(3000., 10000., scale)
    arc_wide = ndimage.gaussian_filter(arc, 5)
    m = interpolate.splev(wave, arcmodel['norm'])

    a = arc.copy()
    aw = arc_wide.copy()
    arc = a[:a.size / 2.]

    x = x_orig[:a.size / 2.]
    arclines = get_lines(x, arc)
    fit = scipy.zeros(3 * arclines.size + 1)
    index = 1
    for i in range(arclines.size):
        fit[index] = 1.
        fit[index + 1] = arclines[i]
        fit[index + 2] = 15. * scale
        index += 3
    arc_wide = sf.ngauss(x, fit)
    """
    Do an approximate chi-square between the sky model and the data over a
        range of offsets using a broadened data and sky model.
    """
    max = 0.
    mid = 0

    delta = scale / 10.
    s = scipy.arange(scale - delta, scale + delta, delta / 10.)
    for stmp in s:
        wtmp = scipy.arange(2000., 10000., stmp)
        m = interpolate.splev(wtmp, arcmodel['norm'])
        conv = scipy.empty(m.size - arc_wide.size + 1)
        for i in range(conv.size):
            tmp = m[i:i + arc_wide.size].copy()
            if tmp.max() < 0.1:
                conv[i] = 0.
                continue
            conv[i] = (tmp * arc_wide).sum()
            conv[i] = 1. / ((tmp - arc_wide)**2).sum()
        curr = conv.max()
        if curr > max:
            mid = conv.argmax()
            scale = stmp
            max = conv.max()
            wave = wtmp.copy()
    """
    Refine the starting wavelength position using the 'true' (ie narrow) model
        of the sky. Searches for a minimum around the minimum found in the
        previous optimization.
    """
    m = interpolate.splev(wave, arcmodel['matched'])
    conv = scipy.empty(m.size - arc.size + 1)
    for i in range(conv.size):
        tmp = m[i:i + arc.size].copy()
        ratio = arc.max() / tmp.max()
        if tmp.max() < 1.:
            conv[i] = 0.
            continue
        tmp *= ratio
        conv[i] = (tmp * arc).sum()
    pt = conv[mid - 50:mid + 51].argmax() + mid - 50

    initial_pars = [wave[pt], scale]
    for i in range(order + 1 - len(initial_pars)):
        initial_pars.append(0.)
    modellines = get_lines(wave, m, std=10.)
    modellines = modellines[modellines > wave[pt]]
    modellines = arcmodel['lines']
    modellines = modellines[modellines > wave[pt]]

    fit = {'coeff': scipy.atleast_2d(initial_pars).T, 'type': 'polynomial'}

    for o in [1, 2]:
        w = sf.genfunc(arclines, 0., fit)
        matches = []
        for j in range(w.size):
            diff = abs(w[j] - modellines)
            if diff.min() < 5. * scale:
                matches.append([arclines[j], modellines[diff.argmin()]])
        fit = sf.lsqfit(scipy.asarray(matches), 'polynomial', o)

    left_matches = [i for i in matches]
    wmin = sf.genfunc(a.size * 0.45, 0., fit)

    arc = a[a.size / 2.:].copy()
    x = scipy.arange(arc.size).astype(scipy.float32) + a.size / 2.
    arclines = get_lines(x, arc)
    fit = scipy.zeros(3 * arclines.size + 1)
    index = 1
    for i in range(arclines.size):
        fit[index] = 1.
        fit[index + 1] = arclines[i]
        fit[index + 2] = 10. * scale
        index += 3
    arc_wide = sf.ngauss(x, fit)
    """
    Do an approximate chi-square between the sky model and the data over a
        range of offsets using a broadened data and sky model.
    """
    max = 0.
    mid = 0
    delta = scale / 10.
    s = scipy.arange(scale - delta, scale + delta, delta / 10.)
    for stmp in s:
        wtmp = scipy.arange(wmin, 10000., stmp)
        m = interpolate.splev(wtmp, arcmodel['norm'])
        conv = scipy.empty(m.size - arc_wide.size + 1)
        for i in range(conv.size):
            tmp = m[i:i + arc_wide.size].copy()
            if tmp.max() < 0.1:
                conv[i] = 0.
                continue
            conv[i] = (tmp * arc_wide).sum()
        curr = conv.max()
        if curr > max:
            mid = conv.argmax()
            scale = stmp
            max = conv.max()
            wave = wtmp.copy()
    """
    Refine the starting wavelength position using the 'true' (ie narrow) model
        of the sky. Searches for a minimum around the minimum found in the
        previous optimization.
    """
    m = interpolate.splev(wave, arcmodel['matched'])
    conv = scipy.empty(m.size - arc.size + 1)
    for i in range(conv.size):
        tmp = m[i:i + arc.size].copy()
        ratio = arc.max() / tmp.max()
        if tmp.max() < 1.:
            conv[i] = 0.
            continue
        tmp *= ratio
        conv[i] = (tmp * arc).sum()
    pt = conv[mid - 50:mid + 51].argmax() + mid - 50
    wavept = wave[pt]

    initial_pars = [wavept, scale]
    for i in range(order + 1 - len(initial_pars)):
        initial_pars.append(0.)
    modellines = get_lines(wave, m, std=10.)
    modellines = modellines[modellines > wavept]
    modellines = arcmodel['lines']
    modellines = modellines[modellines > wavept]

    fit = {'coeff': scipy.atleast_2d(initial_pars).T, 'type': 'polynomial'}
    for o in [1, 2]:
        # The (o-2) bit is to correct the offset after the first loop
        w = sf.genfunc(arclines + (o - 2) * a.size / 2., 0., fit)
        matches = []
        for j in range(w.size):
            diff = abs(w[j] - modellines)
            if diff.min() < 5. * scale:
                matches.append([arclines[j], modellines[diff.argmin()]])
        fit = sf.lsqfit(scipy.asarray(matches), 'polynomial', o)

    arc = a.copy()
    arc_wide = aw.copy()

    w = sf.genfunc(arclines, 0., fit)
    for i in range(w.size):
        diff = abs(w[i] - modellines)
        if diff.min() < 5. * scale:
            left_matches.append([arclines[i], modellines[diff.argmin()]])

    fit = sf.lsqfit(scipy.asarray(left_matches), 'polynomial', order)
    """ Optimization function for refining the wavelength solution. """
    def dofit(p, x, data, model):
        fit = {'coeff': scipy.atleast_2d(p).T, 'type': 'polynomial'}
        w = sf.genfunc(x, 0., fit)
        m = interpolate.splev(w, model)
        return (m - data)

    x = scipy.arange(arc.size).astype(scipy.float32)

    initial_pars = fit['coeff'][:, 0].tolist()
    coeff, ier = optimize.leastsq(dofit,
                                  initial_pars,
                                  (x, arc_wide, arcmodel['wide']),
                                  maxfev=100000)
    coeff, ier = optimize.leastsq(dofit,
                                  coeff, (x, arc, arcmodel['matched']),
                                  maxfev=100000)
    outcoeff = {'coeff': scipy.atleast_2d(coeff).T, 'type': 'polynomial'}

    def skycorrect(p, arc, sky, arcmodel, skymodel):
        fit = {'coeff': scipy.atleast_2d(p[:-1]).T, 'type': 'polynomial'}
        w = sf.genfunc(x, 0., fit)
        arcm = interpolate.splev(w + p[-1], arcmodel)
        chi_arc = (arcm - arc)
        s = sky[w > 5100.]
        skym = interpolate.splev(w[w > 5100.], skymodel)
        skym *= scipy.median(s / skym)
        chi_sky = 5. * (skym - s)  #/abs(m)**0.5
        chi = scipy.concatenate((chi_arc, chi_sky))
        return chi

    newcoeff = coeff.tolist()
    newcoeff.append(0.)
    coeff, ier = optimize.leastsq(
        skycorrect,
        newcoeff, (arc, sky, arcmodel['matched'], skymodel['matched']),
        maxfev=100000)
    outcoeff = {'coeff': scipy.atleast_2d(coeff[:-1]).T, 'type': 'polynomial'}
    """
    wave = sf.genfunc(x,0.,outcoeff)
    sky = sky[wave>5000.]
    wave = wave[wave>5000.]

    m = interpolate.splev(wave,wavemodel['matched'])
    ratio = scipy.median(sky/m)
    import pylab
    pylab.plot(wave,sky)
    pylab.plot(wave,m*ratio)
    pylab.show()

    offset,ier = optimize.leastsq(skycorrect,[0.],
                        (wave,sky,wavemodel['matched']),maxfev=100000)
    print offset
    outcoeff['coeff'][0] += offset
    """

    return outcoeff
예제 #49
0
 def _forwardImplementation(self, inbuf, outbuf):
     """ takes the state vector and return the discrete action with
         the maximum value over all actions for this state.
     """
     outbuf[0] = self.getMaxAction(asarray(inbuf))
예제 #50
0
def wave_arcsky(arc, arcmodel, sky, solution):
    """
    First find the best solution with the skylines, then apply this solution
        to all arclines within the bounds of the lowest/highest wavelength
        skylines, solving for the delta_pixel offset between the sky and the
        arc. Then find the solution for all lines (sky and delta_pixel-offset
        arcs).
    """
    def clip(arr):
        a = arr.copy()
        m, s, l = a.mean(), a.std(), a.size
        while 1:
            a = a[abs(a - m) < 3. * s]
            if a.size == l:
                return m, s
            m, s, l = a.mean(), a.std(), a.size

    STD_LINES = [
        5197.928, 5200.286, 5202.977, 5460.735, 5577.345, 5867.5522, 5915.308,
        5932.864, 6257.970, 6300.320, 6363.810, 6533.040, 6553.610, 6863.971,
        6912.620, 6923.210, 6939.520, 7303.716, 7329.148, 7340.885, 7358.659,
        7392.198, 7586.093, 7808.467, 7821.510, 7841.266, 7993.332, 8310.719,
        8344.613, 8399.160, 8415.231, 8430.170, 8791.186, 8885.830, 8943.395,
        8988.384, 9038.059, 9337.854, 9375.977, 9419.746, 9439.670, 9458.524
    ]
    x = scipy.arange(sky.size).astype(scipy.float32)
    lines = get_lines(x, sky)

    global fit
    scale = solution['coeff'][1]
    order = solution['coeff'].size - 1

    if scale > 1.5:
        STD_LINES.insert(6, 5891.)

    w = sf.genfunc(lines, 0., solution)
    matches = []
    for i in range(w.size):
        diff = abs(w[i] - STD_LINES)
        if diff.min() < 5. * scale:
            matches.append([lines[i], STD_LINES[diff.argmin()]])
    fit = sf.lsqfit(scipy.asarray(matches), 'polynomial', order)

    w = sf.genfunc(lines, 0., fit)
    matches = []
    for i in range(w.size):
        diff = abs(w[i] - STD_LINES)
        if diff.min() < 5. * scale:
            matches.append([lines[i], STD_LINES[diff.argmin()]])
    fit = sf.lsqfit(scipy.asarray(matches), 'polynomial', order)

    lines = get_lines(x, sky, nstd=7.)
    w = sf.genfunc(lines, 0., fit)
    matches = []
    for i in range(w.size):
        diff = abs(w[i] - STD_LINES)
        if diff.min() < 3. * scale:
            matches.append([lines[i], STD_LINES[diff.argmin()]])
    matches = scipy.asarray(matches)
    fit = sf.lsqfit(matches, 'polynomial', order)
    revfit = sf.lsqfit(matches[:, ::-1], 'polynomial', order)

    ARCS = scipy.sort(arcmodel['lines'])
    alines = get_lines(x, arc)
    xmin, xmax = matches[0, 0], matches[-1, 0]
    arc_x = sf.genfunc(ARCS, 0., revfit)
    offset = []
    for i in range(arc_x.size):
        if arc_x[i] < xmin - 2 or arc_x[i] > xmax + 2:
            continue
        diff = arc_x[i] - alines
        if abs(diff).min() < 9.:
            offset.append(diff[abs(diff).argmin()])
    offset = scipy.asarray(offset)
    off, width = clip(offset)

    aw = sf.genfunc(alines + off, 0., fit)
    matches = []
    for i in range(w.size):
        diff = abs(w[i] - STD_LINES)
        if diff.min() < 3. * scale:
            matches.append([lines[i], STD_LINES[diff.argmin()]])
    k = len(matches)
    for i in range(aw.size):
        diff = abs(aw[i] - ARCS)
        if diff.min() < 3. * scale:
            matches.append([alines[i] + off, ARCS[diff.argmin()]])
    matches = scipy.asarray(matches)
    fit = sf.lsqfit(matches, 'polynomial', order)
    """
    # This code is to optimize the arc offset -- likely unnecessary.
    def opt(p):
        global fit
        off = p[0]
        w = sf.genfunc(lines,0.,fit)
        aw = sf.genfunc(alines+off,0.,fit)
        matches = []
        for i in range(w.size):
            diff = abs(w[i]-STD_LINES)
            if diff.min()<3.*scale:
                matches.append([lines[i],STD_LINES[diff.argmin()]])
        k = len(matches)
        for i in range(aw.size):
            diff = abs(aw[i]-ARCS)
            if diff.min()<3.*scale:
                matches.append([alines[i]+off,ARCS[diff.argmin()]])
        matches = scipy.asarray(matches)
        fit = sf.lsqfit(matches,'polynomial',order)
        return (matches[:,1]-sf.genfunc(matches[:,0],0.,fit))
    from scipy import optimize
    coeff,ier = optimize.leastsq(opt,[off],epsfcn=1e-15)
    """
    return fit
def __interpolateBetweenBinaryObjects(obj1, obj2, slices):
    """
    Takes two binary objects and puts slices slices in-between them, each of which
    contains a smooth binary transition between the objects.
    @note private inner function
    """
    if not obj1.shape == obj2.shape:
        raise AttributeError(
            'The two supplied objects have to be of the same shape, not {} and {}.'
            .format(obj1.shape, obj2.shape))

    # constant
    offset = 0.5  # must be a value smaller than the minimal distance possible
    temporal_dimension = 3

    # get all voxel position
    obj1_voxel = scipy.nonzero(obj1)
    obj2_voxel = scipy.nonzero(obj2)

    # get smallest pairwise distances between all object voxels
    distances = cdist(scipy.transpose(obj1_voxel), scipy.transpose(obj2_voxel))

    # keep for each True voxel of obj1 only the smallest distance to a True voxel in obj2
    min_distances = distances.min(1)

    # test if all seems to work
    if len(min_distances) != len(obj1_voxel[0]):
        raise Exception('Invalid number of minimal distances received.')

    # replace True voxels in obj1 with their respective distances to the True voxels in obj2
    thr_obj = obj1.copy()
    thr_obj = thr_obj.astype(scipy.float_)
    thr_obj[obj1_voxel] = min_distances
    thr_obj[
        obj1_voxel] += offset  # previous steps distances include zeros, therefore this is required

    # compute the step size for each slice that is added
    maximum = min_distances.max()
    step = maximum / float(slices + 1)
    threshold = maximum

    # control step: see if thr_obj really corresponds to obj1
    if not scipy.all(thr_obj.astype(scipy.bool_) == obj1.astype(scipy.bool_)):
        raise Exception('First created object does not correspond to obj1.')

    # assemble return volume
    return_volume = [thr_obj.astype(scipy.bool_)]  # corresponds to obj1
    for _ in range(slices):
        threshold -= step
        # remove all value higher than the threshold
        thr_obj[thr_obj > threshold] = 0
        # add binary volume to list /makes a copy)
        return_volume.append(thr_obj.astype(scipy.bool_))

    # add last slice (corresponds to es obj2 slice)
    thr_obj[thr_obj > offset] = 0
    return_volume.append(thr_obj.astype(scipy.bool_))

    # return binary scipy array
    return scipy.rollaxis(scipy.asarray(return_volume, dtype=scipy.bool_), 0,
                          temporal_dimension + 1)
예제 #52
0
def gauss_mixture_calculate(x, u, sigma):
    D = len(x)
    x, u = sp.asarray(x), sp.asarray(u)
    y = x - u
    return sp.exp(-(sp.dot(y, sp.dot(inv(sigma), y))) / 2.0) / ((
        (2 * sp.pi)**(D / 2.0)) * (det(sigma)**0.5))
예제 #53
0
def care(A, B, Q, R=None, S=None, E=None, stabilizing=True):
    """ (X,L,G) = care(A,B,Q,R=None) solves the continuous-time algebraic Riccati
    equation

        :math:`A^T X + X A - X B R^{-1} B^T X + Q = 0`

    where A and Q are square matrices of the same dimension. Further,
    Q and R are a symmetric matrices. If R is None, it is set to the
    identity matrix. The function returns the solution X, the gain
    matrix G = B^T X and the closed loop eigenvalues L, i.e., the
    eigenvalues of A - B G.

    (X,L,G) = care(A,B,Q,R,S,E) solves the generalized continuous-time
    algebraic Riccati equation

        :math:`A^T X E + E^T X A - (E^T X B + S) R^{-1} (B^T X E + S^T) + Q = 0`

    where A, Q and E are square matrices of the same
    dimension. Further, Q and R are symmetric matrices. If R is None,
    it is set to the identity matrix. The function returns the
    solution X, the gain matrix G = R^-1 (B^T X E + S^T) and the
    closed loop eigenvalues L, i.e., the eigenvalues of A - B G , E."""

    # Make sure we can import required slycot routine
    try:
        from slycot import sb02md
    except ImportError:
        raise ControlSlycot("can't find slycot module 'sb02md'")

    try:
        from slycot import sb02mt
    except ImportError:
        raise ControlSlycot("can't find slycot module 'sb02mt'")

    # Make sure we can find the required slycot routine
    try:
        from slycot import sg02ad
    except ImportError:
        raise ControlSlycot("can't find slycot module 'sg02ad'")

    # Reshape 1-d arrays
    if len(shape(A)) == 1:
        A = A.reshape(1, A.size)

    if len(shape(B)) == 1:
        B = B.reshape(1, B.size)

    if len(shape(Q)) == 1:
        Q = Q.reshape(1, Q.size)

    if R is not None and len(shape(R)) == 1:
        R = R.reshape(1, R.size)

    if S is not None and len(shape(S)) == 1:
        S = S.reshape(1, S.size)

    if E is not None and len(shape(E)) == 1:
        E = E.reshape(1, E.size)

    # Determine main dimensions
    if size(A) == 1:
        n = 1
    else:
        n = size(A, 0)

    if size(B) == 1:
        m = 1
    else:
        m = size(B, 1)
    if R is None:
        R = eye(m, m)

    # Solve the standard algebraic Riccati equation
    if S is None and E is None:
        # Check input data for consistency
        if size(A) > 1 and shape(A)[0] != shape(A)[1]:
            raise ControlArgument("A must be a quadratic matrix.")

        if (size(Q) > 1 and shape(Q)[0] != shape(Q)[1]) or \
                (size(Q) > 1 and shape(Q)[0] != n) or \
                                size(Q) == 1 and n > 1:
            raise ControlArgument("Q must be a quadratic matrix of the same \
                dimension as A.")

        if (size(B) > 1 and shape(B)[0] != n) or \
                                size(B) == 1 and n > 1:
            raise ControlArgument("Incompatible dimensions of B matrix.")

        if not (asarray(Q) == asarray(Q).T).all():
            raise ControlArgument("Q must be a symmetric matrix.")

        if not (asarray(R) == asarray(R).T).all():
            raise ControlArgument("R must be a symmetric matrix.")

        # Create back-up of arrays needed for later computations
        R_ba = copy(R)
        B_ba = copy(B)

        # Solve the standard algebraic Riccati equation by calling Slycot
        # functions sb02mt and sb02md
        try:
            A_b, B_b, Q_b, R_b, L_b, ipiv, oufact, G = sb02mt(n, m, B, R)
        except ValueError as ve:
            if ve.info < 0:
                e = ValueError(ve.message)
                e.info = ve.info
            elif ve.info == m + 1:
                e = ValueError("The matrix R is numerically singular.")
                e.info = ve.info
            else:
                e = ValueError("The %i-th element of d in the UdU (LdL) \
                    factorization is zero." % ve.info)
                e.info = ve.info
            raise e

        try:
            if stabilizing:
                sort = 'S'
            else:
                sort = 'U'
            X, rcond, w, S_o, U, A_inv = sb02md(n, A, G, Q, 'C', sort=sort)
        except ValueError as ve:
            if ve.info < 0 or ve.info > 5:
                e = ValueError(ve.message)
                e.info = ve.info
            elif ve.info == 1:
                e = ValueError("The matrix A is (numerically) singular in \
                    continuous-time case.")
                e.info = ve.info
            elif ve.info == 2:
                e = ValueError("The Hamiltonian or symplectic matrix H cannot \
                    be reduced to real Schur form.")
                e.info = ve.info
            elif ve.info == 3:
                e = ValueError("The real Schur form of the Hamiltonian or \
                    symplectic matrix H cannot be appropriately ordered.")
                e.info = ve.info
            elif ve.info == 4:
                e = ValueError("The Hamiltonian or symplectic matrix H has \
                    less than n stable eigenvalues.")
                e.info = ve.info
            elif ve.info == 5:
                e = ValueError("The N-th order system of linear algebraic \
                         equations is singular to working precision.")
                e.info = ve.info
            raise e

        # Calculate the gain matrix G
        if size(R_b) == 1:
            G = dot(dot(1 / (R_ba), asarray(B_ba).T), X)
        else:
            G = dot(dot(inv(R_ba), asarray(B_ba).T), X)

        # Return the solution X, the closed-loop eigenvalues L and
        # the gain matrix G
        return (X, w[:n], G)

    # Solve the generalized algebraic Riccati equation
    elif S is not None and E is not None:
        # Check input data for consistency
        if size(A) > 1 and shape(A)[0] != shape(A)[1]:
            raise ControlArgument("A must be a quadratic matrix.")

        if (size(Q) > 1 and shape(Q)[0] != shape(Q)[1]) or \
                (size(Q) > 1 and shape(Q)[0] != n) or \
                                size(Q) == 1 and n > 1:
            raise ControlArgument("Q must be a quadratic matrix of the same \
                dimension as A.")

        if (size(B) > 1 and shape(B)[0] != n) or \
                                size(B) == 1 and n > 1:
            raise ControlArgument("Incompatible dimensions of B matrix.")

        if (size(E) > 1 and shape(E)[0] != shape(E)[1]) or \
                (size(E) > 1 and shape(E)[0] != n) or \
                                size(E) == 1 and n > 1:
            raise ControlArgument("E must be a quadratic matrix of the same \
                dimension as A.")

        if (size(R) > 1 and shape(R)[0] != shape(R)[1]) or \
                (size(R) > 1 and shape(R)[0] != m) or \
                                size(R) == 1 and m > 1:
            raise ControlArgument("R must be a quadratic matrix of the same \
                dimension as the number of columns in the B matrix.")

        if (size(S) > 1 and shape(S)[0] != n) or \
                (size(S) > 1 and shape(S)[1] != m) or \
                                size(S) == 1 and n > 1 or \
                                size(S) == 1 and m > 1:
            raise ControlArgument("Incompatible dimensions of S matrix.")

        if not (asarray(Q) == asarray(Q).T).all():
            raise ControlArgument("Q must be a symmetric matrix.")

        if not (asarray(R) == asarray(R).T).all():
            raise ControlArgument("R must be a symmetric matrix.")

        # Create back-up of arrays needed for later computations
        R_b = copy(R)
        B_b = copy(B)
        E_b = copy(E)
        S_b = copy(S)

        # Solve the generalized algebraic Riccati equation by calling the
        # Slycot function sg02ad
        try:
            if stabilizing:
                sort = 'S'
            else:
                sort = 'U'
            rcondu, X, alfar, alfai, beta, S_o, T, U, iwarn = \
                sg02ad('C', 'B', 'N', 'U', 'N', 'N', sort, 'R', n, m, 0, A, E, B, Q, R, S)
        except ValueError as ve:
            if ve.info < 0 or ve.info > 7:
                e = ValueError(ve.message)
                e.info = ve.info
            elif ve.info == 1:
                e = ValueError("The computed extended matrix pencil is \
                            singular, possibly due to rounding errors.")
                e.info = ve.info
            elif ve.info == 2:
                e = ValueError("The QZ algorithm failed.")
                e.info = ve.info
            elif ve.info == 3:
                e = ValueError("Reordering of the generalized eigenvalues \
                    failed.")
                e.info = ve.info
            elif ve.info == 4:
                e = ValueError("After reordering, roundoff changed values of \
                            some complex eigenvalues so that leading \
                            eigenvalues in the generalized Schur form no \
                            longer satisfy the stability condition; this \
                            could also be caused due to scaling.")
                e.info = ve.info
            elif ve.info == 5:
                e = ValueError("The computed dimension of the solution does \
                            not equal N.")
                e.info = ve.info
            elif ve.info == 6:
                e = ValueError("The spectrum is too close to the boundary of \
                            the stability domain.")
                e.info = ve.info
            elif ve.info == 7:
                e = ValueError("A singular matrix was encountered during the \
                            computation of the solution matrix X.")
                e.info = ve.info
            raise e

        # Calculate the closed-loop eigenvalues L
        L = zeros((n, 1))
        L.dtype = 'complex64'
        for i in range(n):
            L[i] = (alfar[i] + alfai[i] * 1j) / beta[i]

        # Calculate the gain matrix G
        if size(R_b) == 1:
            G = dot(1 / (R_b),
                    dot(asarray(B_b).T, dot(X, E_b)) + asarray(S_b).T)
        else:
            G = dot(inv(R_b),
                    dot(asarray(B_b).T, dot(X, E_b)) + asarray(S_b).T)

        # Return the solution X, the closed-loop eigenvalues L and
        # the gain matrix G
        return (X, L, G)

    # Invalid set of input parameters
    else:
        raise ControlArgument("Invalid set of input parameters.")
예제 #54
0
def threshold_detection(data, th, min_dist=1, mode='gt', find_max=True):
    """detect events by applying a threshold to the data

    :type data: ndarray
    :param data: the 2d-data to apply the threshold on. channels are in the
        second dimension (columns).
        Required
    :type th: ndarray or list
    :param th: list of threshold values, one value per channel in the `data`
        Required
    :type min_dist: int
    :param min_dist: minimal distance two successive events have to be
        separated in samples, else the event is ignored.
        Default=1
    :type mode: str
    :param mode: one of 'gt' for greater than or 'lt' for less than. will
        determine how the threshold is applied.
        Default='gt'
    :type find_max: bool
    :param find_max: if True, will find the maximum for each event epoch, else
        will find the start for each event epoch.
        Default=True
    :rtype: ndarray
    :returns: event samples
    """

    # checks
    data = sp.asarray(data)
    if data.ndim != 2:
        if data.ndim == 1:
            data = sp.atleast_2d(data).T
        else:
            raise ValueError('data.ndim != 2')
    th = sp.asarray(th)
    if th.ndim != 1:
        raise ValueError('th.ndim != 1')
    if th.size != data.shape[1]:
        raise ValueError('thresholds have to match the data channel count')
    if mode not in ['gt', 'lt']:
        raise ValueError('unknown mode, use one of \'lt\' or \'gt\'')
    if min_dist < 1:
        min_dist = 1

    # inits
    rval = []
    ep_func = {
        'gt': lambda d, t: epochs_from_binvec(d > t).tolist(),
        'lt': lambda d, t: epochs_from_binvec(d < t).tolist(),
    }[mode]

    # per channel detection
    for c in xrange(data.shape[1]):
        epochs = ep_func(data[:, c], th[c])
        if len(epochs) == 0:
            continue
        for e in xrange(len(epochs)):
            rval.append(epochs[e][0])
            if find_max is True:
                rval[-1] += data[epochs[e][0]:epochs[e][1] + 1, c].argmax()
    rval = sp.asarray(rval, dtype=INDEX_DTYPE)

    # do we have events?
    if rval.size == 0:
        return rval

    # drop event duplicates by sorting and checking for min_dist
    rval.sort()
    rval = rval[sp.diff(sp.concatenate(([0], rval))) >= min_dist]

    # return
    return rval
예제 #55
0
    def __init__(self, shot, injections, debug_plots=False):
        self.shot = shot

        self.vuv_lines = collections.OrderedDict()
        self.vuv_signal = {}
        self.vuv_time = {}
        self.vuv_lam = {}
        self.vuv_uncertainty = {}

        self.load_vuv('XEUS')

        t = []
        y = []
        std_y = []
        y_norm = []
        std_y_norm = []

        for k, i in enumerate(injections):
            vuv_signals = []
            vuv_uncertainties = []
            vuv_times = []

            for s in self.vuv_lines.keys():
                i_start, i_stop = profiletools.get_nearest_idx(
                    [i.t_start, i.t_stop], self.vuv_time[s])
                for l in self.vuv_lines[s]:
                    if l.diagnostic_lines is not None:
                        vuv_signals.append(l.signal[i_start:i_stop + 1])
                        vuv_uncertainties.append(l.uncertainty[i_start:i_stop +
                                                               1])
                        vuv_times.append(self.vuv_time[s][i_start:i_stop + 1] -
                                         i.t_inj)
            vuv_signals = scipy.asarray(vuv_signals)
            vuv_uncertainties = scipy.asarray(vuv_uncertainties)
            vuv_times = scipy.asarray(vuv_times)

            # shift the entire signal such that the mean of the pre-injection signal is 0
            # (considering only the 20ms before the injection)
            # import pdb
            # pdb.set_trace()
            pre_inj_sig_bool = np.asarray([
                a and b
                for a, b in zip(np.ndarray.tolist(vuv_times[0, :] > -0.02),
                                np.ndarray.tolist(vuv_times[0, :] < 0.0))
            ])
            vuv_signals = vuv_signals - np.mean(vuv_signals[0,
                                                            pre_inj_sig_bool])

            # We don't have a brightness cal for XEUS or LoWEUS, so normalize to
            # the peak:
            vuv_signals_norm = scipy.nan * scipy.zeros_like(vuv_signals)
            vuv_uncertainties_norm = scipy.nan * scipy.zeros_like(
                vuv_uncertainties)

            # import pdb
            # pdb.set_trace()
            for j in xrange(0, vuv_signals.shape[0]):
                m, s = interp_max(
                    vuv_times[j, :],
                    vuv_signals[j, :],
                    err_y=vuv_uncertainties[j, :],
                    debug_plots=debug_plots,
                    s_max=100.0,
                    method='GP'  #added by FS
                )
                vuv_signals_norm[j, :] = vuv_signals[j, :] / m
                vuv_uncertainties_norm[j, :] = (
                    scipy.sqrt((vuv_uncertainties[j, :] / m)**2.0 +
                               ((vuv_signals[j, :] / m) * (s / m))**2.0))

            # Assume all are on the same timebase:
            t.append(vuv_times[0])
            y.append(vuv_signals)
            std_y.append(vuv_uncertainties)
            y_norm.append(vuv_signals_norm)
            std_y_norm.append(vuv_uncertainties_norm)

        blocks = []
        names = []
        pos = []
        i = 0
        for s in self.vuv_lines.keys():
            for l in self.vuv_lines[s]:
                if l.diagnostic_lines is not None:
                    blocks.append(i)
                    i += 1
                    names.append(s)
                    pos.append(XEUS_POS if s == 'XEUS' else LOWEUS_POS)

        self.signal = Signal(scipy.hstack(y).T,
                             scipy.hstack(std_y).T,
                             scipy.hstack(y_norm).T,
                             scipy.hstack(std_y_norm).T,
                             scipy.hstack(t),
                             names,
                             scipy.asarray(blocks, dtype=int) + 1,
                             pos=pos,
                             blocks=blocks)
예제 #56
0
 def test_boundary_stawiaski_borders(self):
     """Test the @link medpy.graphcut.test_boundary_stawiaski() border conditions."""
     # TEST1: test for a label image with not continuous label ids not starting from 0
     label = [[1, 4, 8], [1, 3, 10], [1, 3, 10]]
     expected_result = {
         (1, 3): (2.0, 2.0),
         (1, 4): (1.0, 1.0),
         (4, 8): (1.0, 1.0),
         (3, 4): (1.0, 1.0),
         (3, 10): (2.0, 2.0),
         (8, 10): (1.0, 1.0)
     }
     result = boundary_stawiaski(label, (scipy.zeros_like(label)))
     self.__compare_dictionaries(result, expected_result, 'Test1')
     # TEST2: test for a label image with negative labels
     label = [[-1, 4, 8], [-1, 3, 10], [1, -3, 10]]
     expected_result = {
         (-1, 1): (1.0, 1.0),
         (4, 8): (1.0, 1.0),
         (-1, 3): (1.0, 1.0),
         (3, 10): (1.0, 1.0),
         (-3, 10): (1.0, 1.0),
         (8, 10): (1.0, 1.0),
         (-3, 1): (1.0, 1.0),
         (-3, 3): (1.0, 1.0),
         (-1, 4): (1.0, 1.0),
         (3, 4): (1.0, 1.0)
     }
     result = boundary_stawiaski(label, (scipy.zeros_like(label)))
     self.__compare_dictionaries(result, expected_result, 'Test2')
     # TEST3: test for behavior on occurrence of very small (~0) and 1 weights
     gradient = [[0., 0., 0.], [0., 0., sys.float_info.max]]
     label = [[0, 1, 2], [0, 1, 3]]
     expected_result = {
         (0, 1): (2.0, 2.0),
         (1, 2): (1.0, 1.0),
         (1, 3): (sys.float_info.min, sys.float_info.min),
         (2, 3): (sys.float_info.min, sys.float_info.min)
     }
     result = boundary_stawiaski(label, (gradient))
     self.__compare_dictionaries(result, expected_result, 'Test3')
     # TEST4: check behavior for integer gradient image
     label = [[1, 4, 8], [1, 3, 10], [1, 3, 10]]
     label = scipy.asarray(label)
     expected_result = {
         (1, 3): (2.0, 2.0),
         (1, 4): (1.0, 1.0),
         (4, 8): (1.0, 1.0),
         (3, 4): (1.0, 1.0),
         (3, 10): (2.0, 2.0),
         (8, 10): (1.0, 1.0)
     }
     result = boundary_stawiaski(label,
                                 (scipy.zeros(label.shape, scipy.int_)))
     self.__compare_dictionaries(result, expected_result, 'Test4')
     # TEST5: reaction to different array orders
     label = [[1, 4, 8], [1, 3, 10], [1, 3, 10]]
     label = scipy.asarray(label, order='C')  # C-order, gradient same order
     expected_result = {
         (1, 3): (2.0, 2.0),
         (1, 4): (1.0, 1.0),
         (4, 8): (1.0, 1.0),
         (3, 4): (1.0, 1.0),
         (3, 10): (2.0, 2.0),
         (8, 10): (1.0, 1.0)
     }
     result = boundary_stawiaski(label, (scipy.zeros_like(label)))
     self.__compare_dictionaries(result, expected_result, 'Test5 (C,C)')
     label = scipy.asarray(label,
                           order='F')  # Fortran order, gradient same order
     expected_result = {
         (1, 3): (2.0, 2.0),
         (1, 4): (1.0, 1.0),
         (4, 8): (1.0, 1.0),
         (3, 4): (1.0, 1.0),
         (3, 10): (2.0, 2.0),
         (8, 10): (1.0, 1.0)
     }
     result = boundary_stawiaski(label, (scipy.zeros_like(label)))
     self.__compare_dictionaries(result, expected_result, 'Test5 (F, F)')
     label = scipy.asarray(label,
                           order='C')  # C-order, gradient different order
     expected_result = {
         (1, 3): (2.0, 2.0),
         (1, 4): (1.0, 1.0),
         (4, 8): (1.0, 1.0),
         (3, 4): (1.0, 1.0),
         (3, 10): (2.0, 2.0),
         (8, 10): (1.0, 1.0)
     }
     result = boundary_stawiaski(label,
                                 (scipy.zeros(label.shape, order='F')))
     self.__compare_dictionaries(result, expected_result, 'Test5 (C, F)')
     label = scipy.asarray(label,
                           order='F')  # F-order, gradient different order
     expected_result = {
         (1, 3): (2.0, 2.0),
         (1, 4): (1.0, 1.0),
         (4, 8): (1.0, 1.0),
         (3, 4): (1.0, 1.0),
         (3, 10): (2.0, 2.0),
         (8, 10): (1.0, 1.0)
     }
     result = boundary_stawiaski(label,
                                 (scipy.zeros(label.shape, order='C')))
     self.__compare_dictionaries(result, expected_result, 'Test5 (F, C)')
예제 #57
0
    def __init__(self,
                 y,
                 std_y,
                 y_norm,
                 std_y_norm,
                 t,
                 name,
                 atomdat_idx,
                 pos=None,
                 sqrtpsinorm=None,
                 weights=None,
                 blocks=0,
                 m=None,
                 s=None):
        """Class to store the data from a given diagnostic.
        
        In the parameter descriptions, `n` is the number of signals (both
        spatial and temporal) contained in the instance.
        
        Parameters
        ----------
        y : array, (`n_time`, `n`)
            The unnormalized, baseline-subtracted data as a function of time and
            space. If `pos` is not None, "space" refers to the chords. Wherever
            there is a bad point, it should be set to NaN.
        std_y : array, (`n_time`, `n`)
            The uncertainty in the unnormalized, baseline-subtracted data as a
            function of time and space.
        y_norm : array, (`n_time`, `n`)
            The normalized, baseline-subtracted data.
        std_y_norm : array, (`n_time`, `n`)
            The uncertainty in the normalized, baseline-subtracted data.
        t : array, (`n_time`,)
            The time vector of the data.
        name : str
            The name of the signal.
        atomdat_idx : int or array of int, (`n`,)
            The index or indices of the signals in the atomdat file. If a single
            value is given, it is used for all of the signals. If a 1d array is
            provided, these are the indices for each of the signals in `y`. If
            `atomdat_idx` (or one of its entries) is -1, it will be treated as
            an SXR measurement.
        pos : array, (4,) or (`n`, 4), optional
            The POS vector(s) for line-integrated data. If not present, the data
            are assumed to be local measurements at the locations in
            `sqrtpsinorm`. If a 1d array is provided, it is used for all of the
            chords in `y`. Otherwise, there must be one pos vector for each of
            the chords in `y`.
        sqrtpsinorm : array, (`n`,), optional
            The square root of poloidal flux grid the (local) measurements are
            given on. If line-integrated measurements with the standard STRAHL
            grid for their quadrature points are to be used this should be left
            as None.
        weights : array, (`n`, `n_quadrature`), optional
            The quadrature weights to use. This can be left as None for a local
            measurement or can be set later.
        blocks : int or array of int, (`n`), optional
            A set of flags indicating which channels in the :py:class:`Signal`
            should be treated together as a block when normalizing. If a single
            int is given, all of the channels will be taken together. Otherwise,
            any channels sharing the same block number will be taken together.
        m : float
            maximum signal recorded across any chords and any time for this diagnostic.
            This value is used for normalization of the signals. 
        s : float
            uncertainty in m (see above)
        """
        self.y = scipy.asarray(y, dtype=float)
        if self.y.ndim != 2:
            raise ValueError("y must have two dimensions!")
        self.std_y = scipy.asarray(std_y, dtype=float)
        if self.y.shape != self.std_y.shape:
            raise ValueError("The shapes of y and std_y must match!")
        self.y_norm = scipy.asarray(y_norm, dtype=float)
        if self.y.shape != self.y_norm.shape:
            raise ValueError("The shapes of y and y_norm must match!")
        self.std_y_norm = scipy.asarray(std_y_norm, dtype=float)
        if self.std_y_norm.shape != self.y.shape:
            raise ValueError("The shapes of y and std_y_norm must match!")
        self.t = scipy.asarray(t, dtype=float)
        if self.t.ndim != 1:
            raise ValueError("t must have one dimension!")
        if len(self.t) != self.y.shape[0]:
            raise ValueError(
                "The length of t must equal the length of the leading dimension of y!"
            )
        if isinstance(name, str):
            name = [
                name,
            ] * self.y.shape[1]
        self.name = name
        try:
            iter(atomdat_idx)
        except TypeError:
            self.atomdat_idx = atomdat_idx * scipy.ones(self.y.shape[1],
                                                        dtype=int)
        else:
            self.atomdat_idx = scipy.asarray(atomdat_idx, dtype=int)
            if self.atomdat_idx.ndim != 1:
                raise ValueError(
                    "atomdat_idx must have at most one dimension!")
            if len(self.atomdat_idx) != self.y.shape[1]:
                raise ValueError(
                    "1d atomdat_idx must have the same number of elements as the second dimension of y!"
                )
        if pos is not None:
            pos = scipy.asarray(pos, dtype=float)
            if pos.ndim not in (1, 2):
                raise ValueError("pos must have one or two dimensions!")
            if pos.ndim == 1 and len(pos) != 4:
                raise ValueError("pos must have 4 elements!")
            if pos.ndim == 2 and (pos.shape[0] != self.y.shape[1]
                                  or pos.shape[1] != 4):
                raise ValueError("pos must have shape (n, 4)!")

        self.pos = pos
        self.sqrtpsinorm = sqrtpsinorm

        self.weights = weights

        try:
            iter(blocks)
        except TypeError:
            self.blocks = blocks * scipy.ones(self.y.shape[1], dtype=int)
        else:
            self.blocks = scipy.asarray(blocks, dtype=int)
            if self.blocks.ndim != 1:
                raise ValueError("blocks must have at most one dimension!")
            if len(self.blocks) != self.y.shape[1]:
                raise ValueError(
                    "1d blocks must have the same number of elements as the second dimension of y!"
                )

        if isinstance(m, (float)):
            self.m = m
        elif m == None:
            pass
        else:
            raise ValueError("maximum signal m must be a float!")
        if isinstance(s, (float)):
            self.s = s
        elif s == None:
            pass
        else:
            raise ValueError("maximum signal m must be a float!")
예제 #58
0
#test colorspaces

from psychopy import misc
import scipy

testDKL1 = scipy.asarray([45, 90, 1.0])

print testDKL1
print misc.dkl2rgb(testDKL1)
#print misc.dkl2rgb(testDKL2)
#print misc.dkl2rgb(testDKL3)
#print misc.dkl2rgb(testDKL4)
예제 #59
0
 def __call__(self, Xi, Xj, ni, nj, hyper_deriv=None, symmetric=False):
     """Evaluate the covariance between points `Xi` and `Xj` with derivative order `ni`, `nj`.
     
     Parameters
     ----------
     Xi : :py:class:`Matrix` or other Array-like, (`M`, `D`)
         `M` inputs with dimension `D`.
     Xj : :py:class:`Matrix` or other Array-like, (`M`, `D`)
         `M` inputs with dimension `D`.
     ni : :py:class:`Matrix` or other Array-like, (`M`, `D`)
         `M` derivative orders for set `i`.
     nj : :py:class:`Matrix` or other Array-like, (`M`, `D`)
         `M` derivative orders for set `j`.
     hyper_deriv : Non-negative int or None, optional
         The index of the hyperparameter to compute the first derivative
         with respect to. If None, no derivatives are taken. Default is None
         (no hyperparameter derivatives). Hyperparameter derivatives are not
         support for `n` > 0 at this time.
     symmetric : bool, optional
         Whether or not the input `Xi`, `Xj` are from a symmetric matrix.
         Default is False.
     
     Returns
     -------
     Kij : :py:class:`Array`, (`M`,)
         Covariances for each of the `M` `Xi`, `Xj` pairs.
     """
     only_first_order = (
         (scipy.asarray(ni, dtype=int) == 0).all() and
         (scipy.asarray(nj, dtype=int) == 0).all()
     )
     tau = scipy.asarray(Xi - Xj, dtype=float)
     r2l2, l_mat = self._compute_r2l2(tau, return_l=True)
     k = self.params[0]**2 * scipy.exp(-r2l2 / 2.0)
     if not only_first_order:
         # Account for derivatives:
         # Get total number of differentiations:
         n_tot_j = scipy.asarray(scipy.sum(nj, axis=1), dtype=int).flatten()
         n_combined = scipy.asarray(ni + nj, dtype=int)
         # Compute factor from the dtau_d/dx_d_j terms in the chain rule:
         j_chain_factors = (-1.0)**(n_tot_j)
         # Compute Hermite polynomial factor:
         hermite_factors = (
             (-1.0 / (scipy.sqrt(2.0) * l_mat))**(n_combined) *
             scipy.special.eval_hermite(n_combined, tau / (scipy.sqrt(2.0) * l_mat))
         )
         # Handle length scale hyperparameter derivatives:
         if hyper_deriv is not None and hyper_deriv > 0:
             t = (tau[:, hyper_deriv - 1])**2.0 / (self.params[hyper_deriv])**3.0
             mask = n_combined[:, hyper_deriv - 1] > 0
             t[mask] -= n_combined[mask, hyper_deriv - 1]  / self.params[hyper_deriv]
             mask = mask & (tau[:, hyper_deriv - 1] != 0.0)
             t[mask] -= (
                 scipy.sqrt(2.0) * n_combined[mask, hyper_deriv - 1] *
                 tau[mask, hyper_deriv - 1] / (self.params[hyper_deriv])**2.0 *
                 scipy.special.eval_hermite(
                     n_combined[mask, hyper_deriv - 1] - 1,
                     tau[mask, hyper_deriv - 1] / (scipy.sqrt(2.0) * self.params[hyper_deriv])
                 ) /
                 scipy.special.eval_hermite(
                     n_combined[mask, hyper_deriv - 1],
                     tau[mask, hyper_deriv - 1] / (scipy.sqrt(2.0) * self.params[hyper_deriv])
                 )
             )
             hermite_factors[:, hyper_deriv - 1] *= t
         
         k = j_chain_factors * scipy.prod(hermite_factors, axis=1) * k
     # Take care of hyperparameter derivatives:
     if hyper_deriv is None:
         return k
     elif hyper_deriv == 0:
         return 2.0 * k / self.params[0] if self.params[0] != 0.0 else scipy.zeros_like(k)
     else:
         # Keep efficient form for only_first_order:
         if only_first_order:
             return (tau[:, hyper_deriv - 1])**2.0 / (self.params[hyper_deriv])**3.0 * k
         else:
             # Was already computed above:
             return k
예제 #60
0
def magnitude_graph_cumu3(excel_file1, excel_file2, excel_file3):
    data1 = pd.read_excel(excel_file1)
    data2 = pd.read_excel(excel_file2)
    data3 = pd.read_excel(excel_file3)
    mag_column1 = data1.loc[:, 'Instrumental Magnitude']
    mag1 = mag_column1.values
    sp.asarray(mag1)
    mag_column2 = data2.loc[:, 'Instrumental Magnitude']
    mag2 = mag_column2.values
    sp.asarray(mag2)
    mag_column3 = data3.loc[:, 'Instrumental Magnitude']
    mag3 = mag_column3.values
    sp.asarray(mag3)
    magn_counter = 0
    x = []
    xerr = sp.array([11, 12, 13, 14, 15, 16, 17, 18, 20])
    y1 = []
    y2 = []
    y3 = []
    yline = []

    y_fit1 = []
    y_fit2 = []
    y_fit3 = []
    x_fit = []
    while magn_counter < 20:
        number_count1 = sp.count_nonzero(mag1 < magn_counter)
        number_count2 = sp.count_nonzero(mag2 < magn_counter)
        number_count3 = sp.count_nonzero(mag3 < magn_counter)
        x.append(magn_counter)
        y1.append(sp.log10(number_count1))
        y2.append(sp.log10(number_count2))
        y3.append(sp.log10(number_count3))
        if 13 <= magn_counter <= 17:
            x_fit.append(magn_counter)
            y_fit1.append(sp.log10(number_count1))
            y_fit2.append(sp.log10(number_count2))
            y_fit3.append(sp.log10(number_count3))
        #simple poisson statistics for now
        #error.append(sp.log10(sp.sqrt(number_count)))
        euclid_num = 0.6 * magn_counter - 5.5
        yline.append(euclid_num)
        magn_counter += 1

    fit1, cov1 = sp.polyfit(x_fit, y_fit1, deg=1, w=[1, 1, 1, 1, 1], cov=True)
    func1 = sp.poly1d(fit1)

    sp.asarray(x)
    sp.asarray(y1)
    sp.asarray(y2)
    sp.asarray(y3)
    sp.asarray(x_fit)
    sp.asarray(y_fit1)
    sp.asarray(y_fit2)
    sp.asarray(y_fit3)
    sp.asarray(yline)
    error_up3 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.10, 0.061, 0.038, 0.023, 0.015,
        0.010, 0.006, 0.003, 0.0015
    ])
    error_down3 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.13, 0.071, 0.041, 0.0245, 0.0160,
        0.0104, 0.0064, 0.0032, 0.0015
    ])
    error_up4 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.11, 0.063, 0.04, 0.024, 0.0168,
        0.011, 0.006, 0.002, 0.0007
    ])
    error_down4 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.148, 0.078, 0.045, 0.0269, 0.0168,
        0.011, 0.0055, 0.002, 0.001
    ])
    error_up5 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.11, 0.066, 0.042, 0.025, 0.0165,
        0.01, 0.0045, 0.0013, 0.0005
    ])
    error_down5 = sp.array([
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.147, 0.078, 0.046, 0.0268, 0.0172,
        0.0102, 0.0046, 0.0013, 0.0005
    ])
    error3 = [error_down3, error_up3]
    error4 = [error_down4, error_up4]
    error5 = [error_down5, error_up5]

    #fig, axs = plt.subplots(1, 3,sharey=True)
    #plt.scatter(x,y1,marker='^',color='b',alpha=0.5)
    plt.errorbar(x, y1, yerr=error3, capsize=2, elinewidth=0.5, fmt='.b')
    plt.ylim(0.5, 4)
    plt.xlim(10, 20)
    plt.ylabel('log(N(<m))')
    plt.xlabel('Calibrated magnitude')
    plt.grid()
    #plt.scatter(x,y2,marker='o',color='g',alpha=0.5)
    plt.errorbar(x, y2, yerr=error4, capsize=2, elinewidth=0.5, fmt='.g')
    #plt.scatter(x,y3,marker='s',color='r',alpha=0.5)
    plt.errorbar(x, y3, yerr=error5, capsize=2, elinewidth=0.5, fmt='.r')
    plt.legend(['3σ threshold', '4σ threshold', '5σ threshold'], loc=4)
    plt.show()