def test_mul(Poly):
    c1 = list(random((4, )) + .5)
    c2 = list(random((3, )) + .5)
    p1 = Poly(c1)
    p2 = Poly(c2)
    p3 = p1 * p2
    assert_poly_almost_equal(p2 * p1, p3)
    assert_poly_almost_equal(p1 * c2, p3)
    assert_poly_almost_equal(c2 * p1, p3)
    assert_poly_almost_equal(p1 * tuple(c2), p3)
    assert_poly_almost_equal(tuple(c2) * p1, p3)
    assert_poly_almost_equal(p1 * np.array(c2), p3)
    assert_poly_almost_equal(np.array(c2) * p1, p3)
    assert_poly_almost_equal(p1 * 2, p1 * Poly([2]))
    assert_poly_almost_equal(2 * p1, p1 * Poly([2]))
    assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1))
    assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1))
    if Poly is Polynomial:
        assert_raises(TypeError, op.mul, p1, Chebyshev([0]))
    else:
        assert_raises(TypeError, op.mul, p1, Polynomial([0]))
示例#2
0
def test_normalize_parts():
    w = np.arange(4000., 5000., 10.)
    coeff = [1., 0.1, 0.1]
    pol = Polynomial(coeff)
    obs_spectrum = Spectrum1D.from_array(w, pol(w), dispersion_unit=u.AA)
    parts = [slice(0, 30), slice(30, None)]
    norm_parts = NormalizeParts(obs_spectrum, parts, npol=[3, 3])
    model = Spectrum1D.from_array(w, np.ones_like(w), dispersion_unit=u.AA)
    fit = norm_parts(model)
    nptesting.assert_allclose(fit.flux.value, obs_spectrum.flux.value)
    for normalizer in norm_parts.normalizers:
        nptesting.assert_allclose(normalizer.polynomial.convert().coef,
                            np.array(coeff + [0.]), rtol=1e-3, atol=1.e-5)
    # try also with boolean arrays
    parts = [np.where(w < 4400.), np.where(w >= 4400.)]
    norm_parts = NormalizeParts(obs_spectrum, parts, npol=[3, 3])
    model = Spectrum1D.from_array(w, np.ones_like(w), dispersion_unit=u.AA)
    fit = norm_parts(model)
    nptesting.assert_allclose(fit.flux, obs_spectrum.flux)
    for normalizer in norm_parts.normalizers:
        nptesting.assert_allclose(normalizer.polynomial.convert().coef,
                            np.array(coeff + [0.]), rtol=1e-3, atol=1.e-5)
示例#3
0
def env_residual_rms(env, xs, ys):
    """Calculate the RMS of the envelope fit residuals

    Parameters
    ----------
    env : np.ndarray
        envelope parameters (polynomial coefficients)
    xs : list
        x values for the envelope fit
    ys : list
        y values for the envelope fit

    Returns
    -------
    float
    """
    xs = np.asarray(xs)
    ys = np.asarray(ys)

    resids = Polynomial(env)(xs) - ys

    return np.std(resids)
示例#4
0
def approx_legendre_poly(Moments):
    
    n_moments = Moments.shape[0]-1
    
    exp_coef = (np.zeros((1)))

    # For method description see, for instance: 
    # Chapter 3 of "The Problem of Moments", James Alexander Shohat, Jacob David Tamarkin
    for i in range(n_moments+1):
        p = Legendre.basis(i).convert(window = [0.0,1.0], kind=Polynomial)
       
        q = (2*i+1)*np.sum(Moments[0:(i+1)]*p.coef)
        
        pq = (p.coef*q)
                
        exp_coef = polynomial.polyadd(exp_coef, pq)

            
    expansion = Polynomial(exp_coef)
   
        
    return expansion
示例#5
0
def parabola(initial_trajectory, speed):
    """
    Find the quadratic form of the parabolic path

    Parameters
    ----------
    initial_trajectory : float
        The initial angular trajectory of the path (degrees), measured from
        the x-axis. A positive angle is in an anticlockwise direction.
    speed : float
        The initial speed of the object (m/s)

    Returns
    -------
    eqn : np.polynomial.Polynomial object
        Equation of parabolic path
    """
    traj_rad = np.radians(initial_trajectory)
    eqn = Polynomial([
        0,
        np.tan(traj_rad), -constants.g / 2. / (speed * np.cos(traj_rad))**2.
    ])
    return eqn
示例#6
0
def gradeSchoolParallel(poly1, poly2):
    pRes = Polynomial(
        [0 for _ in range(len(poly1.coef) + len(poly2.coef) - 1)])
    nrWorkers = 4
    step = len(poly1.coef) // nrWorkers
    stepRemainder = len(poly1.coef) % nrWorkers
    threads = []
    for x in range(nrWorkers):
        if stepRemainder != 0:
            t = Thread(target=singleMultiplication,
                       args=(poly1, poly2, pRes, x * (step + 1),
                             (x + 1) * (step + 1)))
            stepRemainder -= 1
        else:
            t = Thread(target=singleMultiplication,
                       args=(poly1, poly2, pRes,
                             x * step + len(poly1.coef) % nrWorkers,
                             (x + 1) * step + len(poly1.coef) % nrWorkers))
        t.start()
        threads.append(t)
    for t in threads:
        t.join()
    return pRes
示例#7
0
    def evaluate(self, wavelength, flux):
        # V[:,0]=mfi/e, Vp[:,1]=mfi/e*w, .., Vp[:,npol]=mfi/e*w**npol

        V = self._Vp * (flux / self.observed.uncertainty.value)[:, np.newaxis]
        # normalizes different powers
        scl = np.sqrt((V * V).sum(0))
        if np.isfinite(scl[0]):  # check for validity before evaluating
            sol, resids, rank, s = np.linalg.lstsq(V / scl,
                                                   self.signal_to_noise,
                                                   self._rcond)
            sol = (sol.T / scl).T
            if rank != self._Vp.shape[-1] - 1:
                msg = "The fit may be poorly conditioned"
                warnings.warn(msg)

            fit = np.dot(V, sol) * self.observed.uncertainty.value
            # keep coefficients in case the outside wants to look at it
            self.polynomial = Polynomial(sol,
                                         domain=self.domain.value,
                                         window=self.window.value)
            return wavelength, fit
        else:
            return wavelength, flux
示例#8
0
def check_floordiv(Poly) :
    c1 = list(random((4,)) + .5)
    c2 = list(random((3,)) + .5)
    c3 = list(random((2,)) + .5)
    p1 = Poly(c1)
    p2 = Poly(c2)
    p3 = Poly(c3)
    p4 = p1 * p2 + p3
    c4 = list(p4.coef)
    assert_poly_almost_equal(p4 // p2, p1)
    assert_poly_almost_equal(p4 // c2, p1)
    assert_poly_almost_equal(c4 // p2, p1)
    assert_poly_almost_equal(p4 // tuple(c2), p1)
    assert_poly_almost_equal(tuple(c4) // p2, p1)
    assert_poly_almost_equal(p4 // np.array(c2), p1)
    assert_poly_almost_equal(np.array(c4) // p2, p1)
    assert_poly_almost_equal(2 // p2, Poly([0]))
    assert_poly_almost_equal(p2 // 2, 0.5*p2)
    assert_raises(TypeError, p1.__floordiv__, Poly([0], domain=Poly.domain + 1))
    assert_raises(TypeError, p1.__floordiv__, Poly([0], window=Poly.window + 1))
    if Poly is Polynomial:
        assert_raises(TypeError, p1.__floordiv__, Chebyshev([0]))
    else:
        assert_raises(TypeError, p1.__floordiv__, Polynomial([0]))
示例#9
0
def build_deriv_integrals_l2(q: int,
                             h: float,
                             B: List[Polynomial],
                             p: int,
                             left: int = 0,
                             right: int = None):
    if right is None:
        right = q

    x = Polynomial([0., 1.])

    c = []
    for i in range(-q, q + 1):
        curr = 0
        for j in range(max(left, i), min(right, i + q) + 1):
            l, r = j * h, j * h + h
            first = B[j - i](x - i * h)
            second = B[j]
            integ = (first.deriv(p) * second.deriv(p)).integ()

            curr += integ(r) - integ(l)
        c.append(curr)

    return c
示例#10
0
def karatsubaSeq(poly1, poly2):
    threshold = 20
    if poly1.degree() <= threshold or poly2.degree() <= threshold:
        return gradeSchoolSeq(poly1, poly2)

    mid = max(len(poly1.coef), len(poly2.coef)) // 2

    low1 = Polynomial(poly1.coef[:mid])
    low2 = Polynomial(poly2.coef[:mid])
    high1 = Polynomial(poly1.coef[mid:])
    high2 = Polynomial(poly2.coef[mid:])

    res1 = karatsubaThreaded(low1, low2)
    res2 = karatsubaThreaded(low1 + high1, low2 + high2)
    res3 = karatsubaThreaded(high1, high2)

    return gradeSchoolSeq(res3, Polynomial([1] + [0 for _ in range(2 * mid)])) + \
           gradeSchoolSeq((res2 - res1 - res3), Polynomial([1] + [0 for _ in range(mid)])) + \
           res1
示例#11
0
def karatsubaThreaded(poly1, poly2):
    threshold = 20
    if poly1.degree() <= threshold or poly2.degree() <= threshold:
        return gradeSchoolSeq(poly1, poly2)

    mid = max(len(poly1.coef), len(poly2.coef)) // 2

    low1 = Polynomial(poly1.coef[:mid])
    low2 = Polynomial(poly2.coef[:mid])
    high1 = Polynomial(poly1.coef[mid:])
    high2 = Polynomial(poly2.coef[mid:])

    tp = ThreadPool(processes=3)
    res1 = tp.apply_async(func=karatsubaThreaded, args=(low1, low2)).get()
    res2 = tp.apply_async(func=karatsubaThreaded,
                          args=(low1 + high1, low2 + high2)).get()
    res3 = tp.apply_async(func=karatsubaThreaded, args=(high1, high2)).get()

    return gradeSchoolSeq(res3, Polynomial([1] + [0 for _ in range(2*mid)])) + \
        gradeSchoolSeq((res2 - res1 - res3), Polynomial([1] + [0 for _ in range(mid)])) + \
        res1
示例#12
0
    def hypergeometric_lyap_exp(self,
                                nb_vectors=None,
                                nb_experiments=10,
                                nb_iterations=10**4,
                                verbose=False,
                                output_file=None,
                                return_error=False):
        r"""
        Compute the Lyapunov exponents of the geodesic flow in the hypergeometric function
        space.

        INPUT:

        - ``nb_vectors`` -- the number of vectors to use

        - ``nb_experiments`` -- number of experimets

        - ``nb_iterations`` -- the number of iterations of the induction to perform

        - ``output_file`` -- place where we print the results

        - ``verbose`` -- do we print the result with an extensive number of information or not

        """
        import time
        import lyapunov_exponents  # the cython bindings
        from math import sqrt
        from cmath import exp, pi
        from numpy.polynomial import Polynomial

        if output_file is None:
            from sys import stdout
            output_file = stdout
            closed = True
        elif isinstance(output_file, str):
            output_file = open(output_file, "w")
            closed = False

        if nb_vectors <> None and nb_vectors <= 0:
            raise ValueError("the number of vectors must be positive")
        if nb_experiments <= 0:
            raise ValueError("the number of experiments must be positive")
        if nb_iterations <= 0:
            raise ValueError("the number of iterations must be positive")

        #recall that the lyapunov exponents are symmetric
        if nb_vectors == None:
            nb_vectors = self._dimension // 2

        if max(self.hodge_numbers()) == self._dimension:
            if verbose:
                output_file.write(
                    "by signature of the invariant hermitian form, all lyapunov exponents are 0\n"
                )
            if return_error:
                return [0] * nb_vectors, [0] * nb_vectors, 0, 0
            else:
                return [0] * nb_vectors

        n = self._dimension
        alpha, beta = self._alpha, self._beta

        p_zero, p_infinity = Polynomial(1 + 0 * 1j), Polynomial(1 + 0 * 1j)
        for i in range(n):
            p_zero *= Polynomial([-E(-alpha[i]), 1])
            p_infinity *= Polynomial([-E(-beta[i]), 1])

        t0 = time.time()
        res = lyapunov_exponents.lyapunov_exponents(p_zero.coef,
                                                    p_infinity.coef,
                                                    self._dimension,
                                                    nb_vectors, nb_experiments,
                                                    nb_iterations)
        t1 = time.time()

        res_final = []
        std_final = []
        s_m, s_d = 0, 0

        if verbose:
            from math import floor, log
            output_file.write("sample of %d experiments\n" % nb_experiments)
            output_file.write(
                "%d iterations (~2**%d)\n" %
                (nb_iterations, floor(log(nb_iterations) / log(2))))
            output_file.write("ellapsed time %s\n" %
                              time.strftime("%H:%M:%S", time.gmtime(t1 - t0)))
        for i in xrange(nb_vectors):
            m, d = mean_and_std_dev(res[i])
            s_m += m
            s_d += d**2
            if verbose:
                output_file.write(
                    "theta%d           : %f (std. dev. = %f, conf. rad. 0.01 = %f)\n"
                    % (i, m, d, 2.576 * d / sqrt(nb_experiments)))
            res_final.append(m)
            std_final.append(2.576 * d / sqrt(nb_experiments))

        s_d = sqrt(s_d)
        s_d_final = 2.576 * s_d / sqrt(nb_experiments)
        if verbose:
            output_file.write(
                "sum_theta        : %f (std. dev. = %f, conf. rad. 0.01 = %f)\n\n"
                % (s_m, s_d, 2.576 * s_d / sqrt(nb_experiments)))

        if not closed:
            output_file.close()
            print "file closed"

        if return_error:
            return (res_final, std_final, s_m, s_d_final)
        else:
            return res_final
示例#13
0
    # psr = 'B1919+21'
    # psr = 'B2016+28'
    # psr = 'B1957+20'
    # psr = 'B0329+54'
    psr = 'J1810+1744'

    dm_dict = {
        'B0329+54': 26.833 * u.pc / u.cm**3,
        'J1810+1744': 39.659298 * u.pc / u.cm**3,
        'B1919+21': 12.455 * u.pc / u.cm**3,
        'B1957+20': 29.11680 * 1.001 * u.pc / u.cm**3,
        'B2016+28': 14.172 * u.pc / u.cm**3
    }
    phasepol_dict = {
        'B0329+54':
        Polynomial([0., 1.399541538720]),
        'J1810+1744':
        Polynomial([
            5123935.3179235281, 601.3858344512422, -6.8670334150772988e-06,
            1.6851467436247837e-10, 1.4924190280848832e-13,
            3.681791676784501e-18, 3.4408214917205562e-22,
            2.3962705401172674e-25, 2.7467843239802234e-29,
            1.3396130966170961e-33, 3.0840132342990634e-38,
            2.7633775352567796e-43
        ]),
        'B1919+21':
        Polynomial([0.5, 0.7477741603725]),
        'B2016+28':
        Polynomial([0., 1.7922641135652])
    }
示例#14
0
def g2(x):
    p = Polynomial([2.0, 1.0, 0.0, -2.0, 2.5, 1.0])
    return np.sqrt(p(x) / 6)
示例#15
0
    def from_endf(cls, ev, file_obj, items):
        """Create Reich-Moore resonance data from an ENDF evaluation.

        Parameters
        ----------
        ev : endf.Evaluation
            ENDF evaluation
        file_obj : file-like object
            ENDF file positioned at the second record of a resonance range
            subsection in MF=2, MT=151
        items : list
            Items from the CONT record at the start of the resonance range
            subsection

        Returns
        -------
        ReichMoore
            Reich-Moore resonance parameters

        """
        # Read energy-dependent scattering radius if present
        energy_min, energy_max = items[0:2]
        nro, naps = items[4:6]
        if nro != 0:
            params, ape = get_tab1_record(file_obj)

        # Other scatter radius parameters
        items = get_cont_record(file_obj)
        target_spin = items[0]
        ap = Polynomial((items[1], ))
        angle_distribution = (items[3] == 1)  # Flag for angular distribution
        NLS = items[4]  # Number of l-values
        num_l_convergence = items[5]  # Number of l-values for convergence

        # Read resonance widths, J values, etc
        channel_radius = {}
        scattering_radius = {}
        records = []
        for i in range(NLS):
            items, values = get_list_record(file_obj)
            apl = Polynomial((items[1], )) if items[1] != 0.0 else ap
            l_value = items[2]
            awri = items[0]

            # Calculate channel radius from ENDF-102 equation D.14
            a = Polynomial((0.123 * (NEUTRON_MASS * awri)**(1. / 3.) + 0.08, ))

            # Construct scattering and channel radius
            if nro == 0:
                scattering_radius[l_value] = apl
                if naps == 0:
                    channel_radius[l_value] = a
                elif naps == 1:
                    channel_radius[l_value] = apl
            elif nro == 1:
                if naps == 0:
                    channel_radius[l_value] = a
                    scattering_radius[l_value] = ape
                elif naps == 1:
                    channel_radius[l_value] = scattering_radius[l_value] = ape
                elif naps == 2:
                    channel_radius[l_value] = apl
                    scattering_radius[l_value] = ape

            energy = values[0::6]
            spin = values[1::6]
            gn = values[2::6]
            gg = values[3::6]
            gfa = values[4::6]
            gfb = values[5::6]

            for i, E in enumerate(energy):
                records.append([
                    energy[i], l_value, spin[i], gn[i], gg[i], gfa[i], gfb[i]
                ])

        # Create pandas DataFrame with resonance data
        columns = [
            'energy', 'L', 'J', 'neutronWidth', 'captureWidth',
            'fissionWidthA', 'fissionWidthB'
        ]
        parameters = pd.DataFrame.from_records(records, columns=columns)

        # Create instance of ReichMoore
        rm = cls(target_spin, energy_min, energy_max, channel_radius,
                 scattering_radius)
        rm.parameters = parameters
        rm.angle_distribution = angle_distribution
        rm.num_l_convergence = num_l_convergence
        rm.atomic_weight_ratio = awri

        return rm
示例#16
0
def get_boundaries(bounddict_file, slitlet_number):
    """Read the bounddict json file and return the polynomial boundaries.

    Parameters
    ----------
    bounddict_file : file handler
        File containing the bounddict JSON data.
    slitlet_number : int
        Number of slitlet.

    Returns
    -------
    pol_lower_boundary : numpy polynomial
        Polynomial defining the lower boundary of the slitlet.
    pol_upper_boundary : numpy polynomial
        Polynomial defining the upper boundary of the slitlet.
    xmin_lower : float
        Minimum abscissae for the lower boundary.
    xmax_lower : float
        Maximum abscissae for the lower boundary.
    xmin_upper : float
        Minimum abscissae for the upper boundary.
    xmax_upper : float
        Maximum abscissae for the upper boundary.
    csu_bar_slit_center : float
        CSU bar slit center (in mm)

    """

    bounddict = json.loads(open(bounddict_file.name).read())

    # return values in case the requested slitlet number is not defined
    pol_lower_boundary = None
    pol_upper_boundary = None
    xmin_lower = None
    xmax_lower = None
    xmin_upper = None
    xmax_upper = None
    csu_bar_slit_center = None

    # search the slitlet number in bounddict
    slitlet_label = "slitlet" + str(slitlet_number).zfill(2)
    if slitlet_label in bounddict['contents'].keys():
        list_date_obs = list(bounddict['contents'][slitlet_label].keys())
        list_date_obs.sort()
        num_date_obs = len(list_date_obs)
        if num_date_obs == 1:
            date_obs = list_date_obs[0]
            tmp_dict = bounddict['contents'][slitlet_label][date_obs]
            pol_lower_boundary = Polynomial(tmp_dict['boundary_coef_lower'])
            pol_upper_boundary = Polynomial(tmp_dict['boundary_coef_upper'])
            xmin_lower = tmp_dict['boundary_xmin_lower']
            xmax_lower = tmp_dict['boundary_xmax_lower']
            xmin_upper = tmp_dict['boundary_xmin_upper']
            xmax_upper = tmp_dict['boundary_xmax_upper']
            csu_bar_slit_center = tmp_dict['csu_bar_slit_center']
        else:
            raise ValueError("num_date_obs =", num_date_obs, " (must be 1)")
    else:
        print("WARNING: slitlet number " + str(slitlet_number) +
              " is not available in " + bounddict_file.name)

    # return result
    return pol_lower_boundary, pol_upper_boundary, \
           xmin_lower, xmax_lower, xmin_upper, xmax_upper, \
           csu_bar_slit_center
示例#17
0
 def setUP(self):
     self.poly = Polynomial([1, 3, 2], [-3, 2])
     self.inter = [-2, .0]
     self.tol = 0.2
示例#18
0
文件: predictor.py 项目: mhvk/pulsar
    def polynomial(self,
                   index,
                   rphase=None,
                   deriv=0,
                   t0=None,
                   time_unit=u.min,
                   out_unit=None,
                   convert=False):
        """Prediction polynomial set up for times in MJD

        Parameters
        ----------
        index : int or float
            index into the polyco table (or MJD for finding closest)
        rphase : None or 'fraction' or float
            phase zero point; if None, use the one stored in polyco.
            (Those are typically large, so one looses some precision.)
            Can also set 'fraction' to use the stored one modulo 1, which is
            fine for folding, but breaks cycle count continuity between sets.
        deriv : int
            derivative of phase to take (1=frequency, 2=fdot, etc.); default 0

        Returns
        -------
        polynomial : Polynomial
            set up for MJDs between mjd_mid ± span

        Notes
        -----
        Units for the polynomial are cycles/second**deriv.  Taking a derivative
        outside will be per day (e.g., self.polynomial(1).deriv() gives
        frequencies in cycles/day)
        """

        out_unit = out_unit or time_unit

        try:
            index = index.__index__()
        except (AttributeError, TypeError):
            index = self.searchclosest(index)
        window = np.array([-1, 1]) * self['span'][index] / 2 * u.min

        polynomial = Polynomial(self['coeff'][index], window.value,
                                window.value)
        polynomial.coef[1] += self['f0'][index] * 60.

        if deriv == 0:
            if rphase is None:
                polynomial.coef[0] += self['rphase'][index]
            elif rphase == 'fraction':
                polynomial.coef[0] += self['rphase'][index] % 1
            else:
                polynomial.coef[0] = rphase
        else:
            polynomial = polynomial.deriv(deriv)
            polynomial.coef /= u.min.to(out_unit)**deriv

        if t0 is None:
            dt = 0. * time_unit
        elif not hasattr(t0, 'jd1') and t0 == 0:
            dt = (-self['mjd_mid'][index] * u.day).to(time_unit)
        else:
            dt = ((t0 - Time(self['mjd_mid'][index], format='mjd',
                             scale='utc')).jd * u.day).to(time_unit)

        polynomial.domain = (window.to(time_unit) - dt).value

        if convert:
            return polynomial.convert()
        else:
            return polynomial
示例#19
0
    def from_endf(cls, file_obj, items, fission_widths):
        """Read unresolved resonance data from an ENDF evaluation.

        Parameters
        ----------
        file_obj : file-like object
            ENDF file positioned at the second record of a resonance range
            subsection in MF=2, MT=151
        items : list
            Items from the CONT record at the start of the resonance range
            subsection
        fission_widths : bool
            Whether fission widths are given

        Returns
        -------
        openmc.data.Unresolved
            Unresolved resonance region parameters

        """
        # Read energy-dependent scattering radius if present
        energy_min, energy_max = items[0:2]
        nro, naps = items[4:6]
        if nro != 0:
            params, ape = get_tab1_record(file_obj)

        # Get SPI, AP, and LSSF
        formalism = items[3]
        if not (fission_widths and formalism == 1):
            items = get_cont_record(file_obj)
            target_spin = items[0]
            if nro == 0:
                ap = Polynomial((items[1], ))
            add_to_background = (items[2] == 0)

        if not fission_widths and formalism == 1:
            # Case A -- fission widths not given, all parameters are
            # energy-independent
            NLS = items[4]
            columns = ['L', 'J', 'd', 'amun', 'gn0', 'gg']
            records = []
            for ls in range(NLS):
                items, values = get_list_record(file_obj)
                awri = items[0]
                l = items[2]
                NJS = items[5]
                for j in range(NJS):
                    d, j, amun, gn0, gg = values[6 * j:6 * j + 5]
                    records.append([l, j, d, amun, gn0, gg])
            parameters = pd.DataFrame.from_records(records, columns=columns)
            energies = None

        elif fission_widths and formalism == 1:
            # Case B -- fission widths given, only fission widths are
            # energy-dependent
            items, energies = get_list_record(file_obj)
            target_spin = items[0]
            if nro == 0:
                ap = Polynomial((items[1], ))
            add_to_background = (items[2] == 0)
            NE, NLS = items[4:6]
            records = []
            columns = ['L', 'J', 'E', 'd', 'amun', 'amuf', 'gn0', 'gg', 'gf']
            for ls in range(NLS):
                items = get_cont_record(file_obj)
                awri = items[0]
                l = items[2]
                NJS = items[4]
                for j in range(NJS):
                    items, values = get_list_record(file_obj)
                    muf = items[3]
                    d = values[0]
                    j = values[1]
                    amun = values[2]
                    gn0 = values[3]
                    gg = values[4]
                    gfs = values[6:]
                    for E, gf in zip(energies, gfs):
                        records.append([l, j, E, d, amun, muf, gn0, gg, gf])
            parameters = pd.DataFrame.from_records(records, columns=columns)

        elif formalism == 2:
            # Case C -- all parameters are energy-dependent
            NLS = items[4]
            columns = [
                'L', 'J', 'E', 'd', 'amux', 'amun', 'amuf', 'gx', 'gn0', 'gg',
                'gf'
            ]
            records = []
            for ls in range(NLS):
                items = get_cont_record(file_obj)
                awri = items[0]
                l = items[2]
                NJS = items[4]
                for j in range(NJS):
                    items, values = get_list_record(file_obj)
                    ne = items[5]
                    j = items[0]
                    amux = values[2]
                    amun = values[3]
                    amuf = values[5]
                    energies = []
                    for k in range(1, ne + 1):
                        E = values[6 * k]
                        d = values[6 * k + 1]
                        gx = values[6 * k + 2]
                        gn0 = values[6 * k + 3]
                        gg = values[6 * k + 4]
                        gf = values[6 * k + 5]
                        energies.append(E)
                        records.append(
                            [l, j, E, d, amux, amun, amuf, gx, gn0, gg, gf])
            parameters = pd.DataFrame.from_records(records, columns=columns)

        # Calculate channel radius from ENDF-102 equation D.14
        a = Polynomial((0.123 * (NEUTRON_MASS * awri)**(1. / 3.) + 0.08, ))

        # Determine scattering and channel radius
        if nro == 0:
            scattering_radius = ap
            if naps == 0:
                channel_radius = a
            elif naps == 1:
                channel_radius = ap
        elif nro == 1:
            scattering_radius = ape
            if naps == 0:
                channel_radius = a
            elif naps == 1:
                channel_radius = ape
            elif naps == 2:
                channel_radius = ap

        urr = cls(target_spin, energy_min, energy_max, channel_radius,
                  scattering_radius)
        urr.parameters = parameters
        urr.add_to_background = add_to_background
        urr.atomic_weight_ratio = awri
        urr.energies = energies

        return urr
示例#20
0
"""
from __future__ import division, absolute_import, print_function

import operator as op
from numbers import Number

import numpy as np
from numpy.polynomial import (Polynomial, Legendre, Chebyshev, Laguerre,
                              Hermite, HermiteE)
from numpy.testing import (assert_almost_equal, assert_raises, assert_equal,
                           assert_, run_module_suite)
from numpy.compat import long

classes = (Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE)

np.negative(Polynomial([0]))


def test_class_methods():
    for Poly1 in classes:
        for Poly2 in classes:
            yield check_conversion, Poly1, Poly2
            yield check_cast, Poly1, Poly2
    for Poly in classes:
        yield check_call, Poly
        yield check_identity, Poly
        yield check_basis, Poly
        yield check_fromroots, Poly
        yield check_fit, Poly
        yield check_equal, Poly
        yield check_not_equal, Poly
                                            poly_order)

### Reconstruct data
bb_reconstructed = gs.wien_approximation(wl_sub_vec,Tave,bb_eps)
eps_vec_reconstructed = np.exp(filtered_data)/bb_reconstructed
# Since we get epsilon from the filtered data, "reconstructed_data" will be
# exactly like "filtered_data"
reconstructed_data = bb_reconstructed * eps_vec_reconstructed # exactly filtered   

# Alternative using the polynomial from optimization
reconstructed_alt = gs.wien_approximation(wl_sub_vec,Tave,bb_eps)
wl_min = np.min(wl_sub_vec)
wl_max = np.max(wl_sub_vec)

if poly_order > 0:
    cheb = Polynomial(sol.x,[wl_min,wl_max])
    eps_vec = polynomial.polyval(wl_sub_vec,cheb.coef)
    
else:
    eps_ave = np.average(eps_vec_reconstructed)
    eps_vec = eps_ave * np.ones(len(wl_sub_vec))
    
reconstructed_alt *= eps_vec


#### Plots
data = np.genfromtxt('data/duvaut-1995/tantalum-irradiance.csv', delimiter=',',skip_header=1)
radiance = data[:,1] / 1e7
wl_vec_radiance = data[:,0] * 1000 # Wavelengths are in micro-meter

fig, ax = plt.subplots(2,1)
示例#22
0
                    0.0016072823189784409,
                    1337.21932367595014])
    gates = np.zeros((4,4))
    gates[1] = [59,60,61,62]
    gates[2] = [1,8,9,16]
    gates[3] = [224,226,227,231]

    # Fiddle with DM of 1957
    dm0[2] *= 1.001
    # select pulsar
    psr = psrname[ipsr]
    dm = dm0[ipsr]
    t0 = -p00[ipsr]/3.
    f0 = 622.1678503154773  # 1./p00[ipsr]
    f1 = 1.45706137397e-06/2.  # 0.
    phasepol = Polynomial([f0, f1]).integ(1, 0., t0)
    igate = gates[ipsr]
    fndir1 = '/mnt/raid-project/gmrt/pen/B1937/1957+20/b'

    file1 = fndir1 + psr + '_pa.raw0.Pol-L1.dat'
    file2 = fndir1 + psr + '_pa.raw0.Pol-L2.dat'

    nhead = 0*32*1024*1024
    # frequency samples in a block; every sample is two bytes: real, imag
    nblock = 512
    # nt=45 for 1508, 180 for 0809 and 156 for 0531
    nt = 1024//2*8*2  # number of sets to fold  -> //128 for quick try
    ntint = 1024*32*1024//(nblock*2)//4  # total # of blocks per set
    ngate = 32//2  # number of bins over the pulsar period
    ntbin = 16*1  # number of bins the time series is split into for folding
    ntw = min(10000, nt*ntint)  # number of samples to combine for waterfall
示例#23
0
def polydiv_coef(p: Polynomial, mod: int) -> Polynomial:
    return Polynomial([c % mod for c in p.coef])
示例#24
0
    d_correct = f.deriv(2)

    for h_i in h:
        f_error = np.append(f_error, [((h_i * Y_correct) / 2.0)])
        b_error = np.append(b_error, [((h_i * Y_correct) / 2.0)])
        c_error = np.append(c_error, [((h_i * h_i * d_correct(x)) / 6)])

    return f_error, b_error, c_error


##################################

fig, ax = plt.subplots()
ax.axhline(y=0, color='k')

p = Polynomial([2.0, 1.0, -6.0, -2.0, 2.5, 1.0])
data = p.linspace(domain=[-2.4, 1.5])
ax.plot(data[0], data[1], label='Function')

p_prime = p.deriv(1)
data2 = p_prime.linspace(domain=[-2.4, 1.5])
ax.plot(data2[0], data2[1], label='Derivative')

ax.legend()

##################################

h = 1
fig, bx = plt.subplots()
bx.axhline(y=0, color='k')
示例#25
0
    def from_endf(cls, ev, file_obj, items):
        """Create MLBW data from an ENDF evaluation.

        Parameters
        ----------
        ev : endf.Evaluation
            ENDF evaluation
        file_obj : file-like object
            ENDF file positioned at the second record of a resonance range
            subsection in MF=2, MT=151
        items : list
            Items from the CONT record at the start of the resonance range
            subsection

        Returns
        -------
        MultiLevelBreitWigner
            Multi-level Breit-Wigner resonance parameters

        """

        # Read energy-dependent scattering radius if present
        energy_min, energy_max = items[0:2]
        nro, naps = items[4:6]
        if nro != 0:
            params, ape = get_tab1_record(file_obj)

        # Other scatter radius parameters
        items = get_cont_record(file_obj)
        target_spin = items[0]
        ap = Polynomial((items[1], ))  # energy-independent scattering-radius
        NLS = items[4]  # number of l-values

        # Read resonance widths, J values, etc
        channel_radius = {}
        scattering_radius = {}
        q_value = {}
        records = []
        for l in range(NLS):
            items, values = get_list_record(file_obj)
            l_value = items[2]
            awri = items[0]
            q_value[l_value] = items[1]
            competitive = items[3]

            # Calculate channel radius from ENDF-102 equation D.14
            a = Polynomial((0.123 * (NEUTRON_MASS * awri)**(1. / 3.) + 0.08, ))

            # Construct scattering and channel radius
            if nro == 0:
                scattering_radius[l_value] = ap
                if naps == 0:
                    channel_radius[l_value] = a
                elif naps == 1:
                    channel_radius[l_value] = ap
            elif nro == 1:
                scattering_radius[l_value] = ape
                if naps == 0:
                    channel_radius[l_value] = a
                elif naps == 1:
                    channel_radius[l_value] = ape
                elif naps == 2:
                    channel_radius[l_value] = ap

            energy = values[0::6]
            spin = values[1::6]
            gt = np.asarray(values[2::6])
            gn = np.asarray(values[3::6])
            gg = np.asarray(values[4::6])
            gf = np.asarray(values[5::6])
            if competitive > 0:
                gx = gt - (gn + gg + gf)
            else:
                gx = np.zeros_like(gt)

            for i, E in enumerate(energy):
                records.append([
                    energy[i], l_value, spin[i], gt[i], gn[i], gg[i], gf[i],
                    gx[i]
                ])

        columns = [
            'energy', 'L', 'J', 'totalWidth', 'neutronWidth', 'captureWidth',
            'fissionWidth', 'competitiveWidth'
        ]
        parameters = pd.DataFrame.from_records(records, columns=columns)

        # Create instance of class
        mlbw = cls(target_spin, energy_min, energy_max, channel_radius,
                   scattering_radius)
        mlbw.q_value = q_value
        mlbw.atomic_weight_ratio = awri
        mlbw.parameters = parameters

        return mlbw
示例#26
0
    def polynomial(self,
                   index,
                   rphase=None,
                   deriv=0,
                   t0=None,
                   time_unit=u.min,
                   out_unit=None,
                   convert=False):
        """Prediction polynomial set up for times in MJD

        Parameters
        ----------
        index : int or float
            index into the polyco table (or MJD for finding closest)
        rphase : None or 'fraction' or 'ignore' or float
            Phase zero point; if None, use the one stored in polyco.
            (Those are typically large, so one looses some precision.)
            Can also set 'fraction' to use the stored one modulo 1, which is
            fine for folding, but breaks cycle count continuity between sets,
            'ignore' for just keeping the value stored in the coefficients,
            or a value that should replace the zero point.
        deriv : int
            derivative of phase to take (1=frequency, 2=fdot, etc.); default 0

        Returns
        -------
        polynomial : Polynomial
            set up for MJDs between mjd_mid +/- span

        Notes
        -----
        Units for the polynomial are cycles/second**deriv.  Taking a derivative
        outside will be per day (e.g., self.polynomial(1).deriv() gives
        frequencies in cycles/day)
        """

        out_unit = out_unit or time_unit

        try:
            index = index.__index__()
        except (AttributeError, TypeError):
            index = self.searchclosest(index)
        window = np.array([-1, 1]) * self['span'][index] / 2

        polynomial = Polynomial(self['coeff'][index], window.value,
                                window.value)
        polynomial.coef[1] += self['f0'][index].to_value(u.cycle / u.minute)

        if deriv == 0:
            if rphase is None:
                polynomial.coef[0] += self['rphase'][index].value
            elif rphase == 'fraction':
                polynomial.coef[0] += self['rphase']['frac'][index].value % 1
            elif rphase != 'ignore':
                polynomial.coef[0] = rphase
        else:
            polynomial = polynomial.deriv(deriv)
            polynomial.coef /= u.min.to(out_unit)**deriv

        if t0 is not None:
            dt = Time(t0, format='mjd') - self['mjd_mid'][index]
            polynomial.domain = (window - dt).to(time_unit).value

        if convert:
            return polynomial.convert()
        else:
            return polynomial
示例#27
0
# -*- coding: utf-8 -*-
"""lab6B.py

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/13GdeGlRUzt1Naw5YFzzsALPKT1dLmrut
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy.polynomial import Polynomial

f = Polynomial([2.0, 1.0, -6.0, -2.0, 2.5, 1.0])
g1 = Polynomial([-2.0, 0.0, 6.0, 2.0, -2.5, -1.0])


def g2(x):
    p = Polynomial([2.0, 1.0, 0.0, -2.0, 2.5, 1.0])
    return np.sqrt(p(x) / 6)


def g3(x):
    p = Polynomial([-2.0, -1.0, 6.0, 2.0, 0.0, -1.0])
    return np.power(p(x) / 2.5, 1.0 / 4.0)


a1 = 0.8
g1_a = []
示例#28
0
def plot_polynomial(f, x, c=4):
        y = [f(x_i) for x_i in x]

        if ii == 0:
            plt.plot(x, [y_i - c for y_i in y], ls='-',
                     c="black", label=round(ii, 1))
            plt.plot(x, [y_i + c for y_i in y], ls='-', c="black")
        else:
            plt.plot(x, [y_i - c for y_i in y], ls='--', label=round(ii, 1))


plt.subplot(2, 3, 1)
plt.ylim((-10, 14))
plt.title('$f(x) = a \cdot x + c$')
for ii in seq(-0.5, 0.2, by=0.1):
    plot_polynomial(Polynomial([0., 1. + ii]), seq(0, 4))

plt.subplot(2, 3, 2)
plt.title('$f(x) = a \cdot x^2 + c$')
for ii in seq(-0.5, 0.2, by=0.1):
    plot_polynomial(Polynomial([0., 0., 1. + ii]), seq(0, 4))

plt.subplot(2, 3, 3)
plt.title('$f(x) = a \cdot x^3 + c$')
for ii in seq(-0.5, 0.2, by=0.1):
    plot_polynomial(Polynomial([0., 0., 0., 1. + ii]), seq(0, 4))

lgd = plt.legend(title='$\delta = a - a_{ref}$',
                 bbox_to_anchor=(1.04, 1.04),
                 loc="upper left")
plt.savefig('../plots/examples.pdf', bbox_extra_artists=(lgd,), bbox_inches='tight',
示例#29
0
def g3(x):
    p = Polynomial([-2.0, -1.0, 6.0, 2.0, 0.0, -1.0])
    return np.power(p(x) / 2.5, 1.0 / 4.0)
示例#30
0
import sympy
from numpy.polynomial import Polynomial
from numpy.polynomial import Chebyshev
from sympy.abc import x
import scipy.optimize


def T(n_):
    return Chebyshev(np.append(np.zeros(n_), 1))


a = 0
b = 2


P_n = Polynomial([0, 1, 0, 1])  # 0 + 1*x + 0*x^2 + 1*x^3
n = P_n.degree()

# Zero approximation
P_n_min = scipy.optimize.fminbound(P_n, a, b, full_output=True)[1]
P_n_max = -scipy.optimize.fminbound(-P_n, a, b, full_output=True)[1]
Q0 = (P_n_max + P_n_min) / 2

# First approximation
alpha1 = (P_n(b) - P_n(a)) / (b - a)
r = (P_n.deriv() - Polynomial([alpha1])).roots()
d = next(t for t in r if a < t < b)
alpha0 = (P_n(a) + P_n(d) - alpha1 * (a + d)) / 2
Q1 = alpha0 + alpha1 * x
Q1 = sympy.lambdify(x, Q1)