示例#1
0
def specwindow_lsp_value(times, mags, errs, omega):
    '''
    This calculates the peak associated with the spectral window function
    for times and at the specified omega.

    '''

    norm_times = times - times.min()

    tau = ((1.0 / (2.0 * omega)) * nparctan(
        npsum(npsin(2.0 * omega * norm_times)) /
        npsum(npcos(2.0 * omega * norm_times))))

    lspval_top_cos = (npsum(1.0 * npcos(omega * (norm_times - tau))) *
                      npsum(1.0 * npcos(omega * (norm_times - tau))))
    lspval_bot_cos = npsum((npcos(omega * (norm_times - tau))) *
                           (npcos(omega * (norm_times - tau))))

    lspval_top_sin = (npsum(1.0 * npsin(omega * (norm_times - tau))) *
                      npsum(1.0 * npsin(omega * (norm_times - tau))))
    lspval_bot_sin = npsum((npsin(omega * (norm_times - tau))) *
                           (npsin(omega * (norm_times - tau))))

    lspval = 0.5 * ((lspval_top_cos / lspval_bot_cos) +
                    (lspval_top_sin / lspval_bot_sin))

    return lspval
示例#2
0
def townsend_lombscargle_value(times, mags, omega):
    '''
    This calculates the periodogram value for each omega (= 2*pi*f). Mags must
    be normalized to zero with variance scaled to unity.

    '''
    cos_omegat = npcos(omega * times)
    sin_omegat = npsin(omega * times)

    xc = npsum(mags * cos_omegat)
    xs = npsum(mags * sin_omegat)

    cc = npsum(cos_omegat * cos_omegat)
    ss = npsum(sin_omegat * sin_omegat)

    cs = npsum(cos_omegat * sin_omegat)

    tau = nparctan(2 * cs / (cc - ss)) / (2 * omega)

    ctau = npcos(omega * tau)
    stau = npsin(omega * tau)

    leftsumtop = (ctau * xc + stau * xs) * (ctau * xc + stau * xs)
    leftsumbot = ctau * ctau * cc + 2.0 * ctau * stau * cs + stau * stau * ss
    leftsum = leftsumtop / leftsumbot

    rightsumtop = (ctau * xs - stau * xc) * (ctau * xs - stau * xc)
    rightsumbot = ctau * ctau * ss - 2.0 * ctau * stau * cs + stau * stau * cc
    rightsum = rightsumtop / rightsumbot

    pval = 0.5 * (leftsum + rightsum)

    return pval
示例#3
0
    def robot_curve(self, curve_type: CurveType, side: RobotSide):
        """
        Calculates the given curve for the given side of the robot.
        :param curve_type: The type of the curve to calculate
        :param side: The side to use in the calculation
        :return: The points of the calculated curve
        """
        coeff = (self.robot.robot_info[3] /
                 2) * (1 if side == RobotSide.LEFT else -1)
        cp = self.control_points()

        t = linspace(0, 1, samples=Trajectory.SAMPLE_SIZE + 1)
        curves = [
            Curve(control_points=points,
                  spline_type=SplineType.QUINTIC_HERMITE) for points in cp
        ]

        dx, dy = npconcat([c.calculate(t, CurveType.VELOCITY)
                           for c in curves]).T
        theta = nprads(angle_from_slope(dx, dy))

        points = npconcat([c.calculate(t, curve_type) for c in curves])
        normals = coeff * nparray([-npsin(theta), npcos(theta)]).T

        return points + normals
示例#4
0
def specwindow_lsp_value(times, mags, errs, omega):
    '''This calculates the peak associated with the spectral window function
    for times and at the specified omega.

    NOTE: this is classical Lomb-Scargle, not the Generalized
    Lomb-Scargle. `mags` and `errs` are silently ignored since we're calculating
    the periodogram of the observing window function. These are kept to present
    a consistent external API so the `pgen_lsp` function below can call this
    transparently.

    Parameters
    ----------

    times,mags,errs : np.array
        The time-series to calculate the periodogram value for.

    omega : float
        The frequency to calculate the periodogram value at.

    Returns
    -------

    periodogramvalue : float
        The normalized periodogram at the specified test frequency `omega`.

    '''

    norm_times = times - times.min()

    tau = ((1.0 / (2.0 * omega)) * nparctan(
        npsum(npsin(2.0 * omega * norm_times)) /
        npsum(npcos(2.0 * omega * norm_times))))

    lspval_top_cos = (npsum(1.0 * npcos(omega * (norm_times - tau))) *
                      npsum(1.0 * npcos(omega * (norm_times - tau))))
    lspval_bot_cos = npsum((npcos(omega * (norm_times - tau))) *
                           (npcos(omega * (norm_times - tau))))

    lspval_top_sin = (npsum(1.0 * npsin(omega * (norm_times - tau))) *
                      npsum(1.0 * npsin(omega * (norm_times - tau))))
    lspval_bot_sin = npsum((npsin(omega * (norm_times - tau))) *
                           (npsin(omega * (norm_times - tau))))

    lspval = 0.5 * ((lspval_top_cos / lspval_bot_cos) +
                    (lspval_top_sin / lspval_bot_sin))

    return lspval
示例#5
0
def wind_components(wind_speed, wind_direction):
    '''
    return U and V wind components from wind speed and 
    wind direction (in degrees)
    '''
    U = wind_speed * npsin(npradians(wind_direction)) * -1
    V = wind_speed * npcos(npradians(wind_direction)) * -1
    return U, V
示例#6
0
        def overlap_value(x, y, r, th):
            """
			Find the overlap area between a cartesian and a polar bin.
			"""

            thmin = max(th - dth / 2, atan2(y - 0.5, x + 0.5))
            thmax = min(th + dth / 2, atan2(y + 0.5, x - 0.5))

            rin = lambda theta: maximum(
                r - dr / 2,
                maximum((x - 0.5) / npcos(theta), (y - 0.5) / npsin(theta)))
            rout = lambda theta: minimum(
                r + dr / 2,
                minimum((x + 0.5) / npcos(theta), (y + 0.5) / npsin(theta)))

            integrand = lambda theta: maximum(
                rout(theta)**2 - rin(theta)**2, 0)

            return 0.5 * quad(integrand, thmin, thmax)[0]
示例#7
0
def sine_series_sum(fourierparams, times, mags, errs):
    '''This generates a sinusoidal light curve using a sine series.

    The series is generated using the coefficients provided in
    fourierparams. This is a sequence like so:

    [period,
     epoch,
     [ampl_1, ampl_2, ampl_3, ..., ampl_X],
     [pha_1, pha_2, pha_3, ..., pha_X]]

    where X is the Fourier order.

    '''

    period, epoch, famps, fphases = fourierparams

    # figure out the order from the length of the Fourier param list
    forder = len(famps)

    # phase the times with this period
    iphase = (times - epoch) / period
    iphase = iphase - npfloor(iphase)

    phasesortind = npargsort(iphase)
    phase = iphase[phasesortind]
    ptimes = times[phasesortind]
    pmags = mags[phasesortind]
    perrs = errs[phasesortind]

    # calculate all the individual terms of the series
    fseries = [
        famps[x] * npsin(2.0 * MPI * x * phase + fphases[x])
        for x in range(forder)
    ]

    # this is the zeroth order coefficient - a constant equal to median mag
    modelmags = npmedian(mags)

    # sum the series
    for fo in fseries:
        modelmags += fo

    return modelmags, phase, ptimes, pmags, perrs
    s=session.run(merged)
    writer.add_summary(s, k)
    if k%200 == 0:
        print("k =",k, "A1 =", session.run(A1[0]), "A2 =", session.run(A2[0]),"f1 =", session.run(f1[0]), "f2 =", session.run(f2[0]), "loss =",session.run(loss) )

AA1,ff1,AA2,ff2 = session.run([A1,f1,A2,f2])
AA1 = AA1[0]
AA2 = AA2[0]
ff1 = ff1[0]
ff2 = ff2[0]

HW.write("QUESTION 2: The wave equation is: y = " + str(AA1) + "sin(" + str(ff1) + "x) + " + str(AA2) + "sin(" + str(ff2) + "x)\n\n")

#Answer to question 3
plt.plot(x_data,y_data, alpha=0.2)
plt.plot(x_data,AA1*npsin(ff1*x_data) + AA2*npsin(ff2*x_data))
plt.legend(['Raw_data', 'Result'])
plt.show()

HW.write("QUESTION 3: ATTACHED\n\n")

#Answer to Question 4
HW.write("QUESTION 4 : Value of regression at x=0.6pi: " + str(AA1*npsin(ff1*.6*pi) + AA2*npsin(ff2*.6*pi)) + '\n\n')

tb = program.TensorBoard()
tb.configure(argv=[None, '--logdir', "./logs/2/train"])
url = tb.launch()

#Answer to Question 5
HW.write("QUESTION 5 : One obviously sees that we could have gotten away with 600 iterations\n\n")
HW.write("REMARK: Another interesting observation is that we could have gotten away with a much larger training step since the loss function is highly convergent.\n\n")
示例#9
0
def phrt_plan(im, energy, distance, pixsize, regpar, thresh, method, padding):
    """Pre-compute data to save time in further execution of phase_retrieval.

	Parameters
	----------
	im : array_like
		Image data as numpy array. Only image size (shape) is actually used.
	
	energy [KeV]: double
		Energy in KeV of the incident X-ray beam.
	
	distance [mm]: double
		Sample-to-detector distance in mm.
	
	pixsize [mm]: double
		Size in mm of the detector element.
	
	regpar: double
		Regularization parameter: RegPar is - log10 of the constant to be added to the denominator
		to regularize the singularity at zero frequency, i.e. 1/sin(x) -> 1/(sin(x)+10^-RegPar). 
		Typical values in the range [2.0, 3.0]. (Suggestion for default: 2.5).
	
	thresh: double
		Parameter for Quasiparticle phase retrieval which defines the width of the rings to be cropped 
		around the zero crossing of the CTF denominator in Fourier space. Typical values in the range
		[0.01, 0.1]. (Suggestion for default: 0.1).
	
	method : int 
		Phase retrieval algorithm {1 = TIE (default), 2 = CTF, 3 = CTF first-half sine, 4 = Quasiparticle, 
		5 = Quasiparticle first half sine}.
	
	padding : bool
		Apply image padding to better process the boundary of the image.

	References
	----------


	Credits
	-------
	Julian Moosmann, KIT (Germany) is acknowledged for this code
	
	"""
    # Adapt input values:
    distance = distance / 1000.0  # Conversion to m
    pixsize = pixsize / 1000.0  # Conversion to m

    # Get additional values:
    lam = 6.62606896e-34 * 299792458 / (energy * 1.60217733e-16)
    k = 2 * pi * lam * distance / (pixsize**2)

    # Replicate pad image:
    dim0_o = im.shape[0]
    dim1_o = im.shape[1]
    if (padding):
        n_pad0 = im.shape[0] + im.shape[0] / 2
        n_pad1 = im.shape[1] + im.shape[1] / 2
    else:
        n_pad0 = dim0_o
        n_pad1 = dim1_o

    # Ensure even size:
    if (n_pad0 % 2 == 1):
        n_pad0 = n_pad0 + 1
    if (n_pad1 % 2 == 1):
        n_pad1 = n_pad1 + 1

    # Create coordinates grid:
    xi = concatenate((arange(
        0,
        ceil((n_pad1 - 1) / 2.0) + 1), arange(-(floor(
            (n_pad1 - 1) / 2.0)), 0))) / n_pad1
    eta = concatenate((arange(
        0,
        ceil((n_pad0 - 1) / 2.0) + 1), arange(-(floor(
            (n_pad0 - 1) / 2.0)), 0))) / n_pad0

    [u, v] = meshgrid(xi, eta)
    u = k * (u * u + v * v) / 2.0

    # Filter:
    if method == 1:  # TIE:
        filter = 0.5 / (u + 10**-regpar)

    elif method == 2:  # CTF:
        v = npsin(u)
        filter = 0.5 * sign(v) / (fabs(v) + 10**-regpar)

    elif method == 3:  # CTF first-half sine:
        v = npsin(u)
        filter = 0.5 * sign(v) / (fabs(v) + 10**-regpar)
        filter[u >= pi] = 0

    elif method == 4:  # Quasiparticle:
        v = npsin(u)
        filter = 0.5 * sign(v) / (fabs(v) + 10**-regpar)
        filter[logical_and(u > pi / 2.0, fabs(v) < thresh)] = 0

    elif method == 5:  # Quasiparticle first half sine:
        v = npsin(u)
        filter = 0.5 * sign(v) / (fabs(v) + 10**-regpar)
        filter[logical_and(u > pi / 2.0, fabs(v) < thresh)] = 0
        filter[u >= pi] = 0

    elif method == 6:  #	Projected CTF (alternative implementation):
        v = npsin(u)
        filter = 0.5 * sign(v) / (fabs(v) + 10**-regpar)
        tmp = sign(filter) / (2 * (thresh + 10**-regpar))
        msk = logical_and(u > pi / 2.0, fabs(v) < thresh)
        filter = filter * (1 - msk) + tmp * msk
        #filter[ u >= pi ] = 0

    # Restore zero frequency component:
    filter[0, 0] = 0.5 * 10**regpar

    return {
        'dim0': dim0_o,
        'dim1': dim1_o,
        'npad0': n_pad0,
        'npad1': n_pad1,
        'filter': filter
    }
示例#10
0
def sin(x):
    return npsin(x)
示例#11
0
def sin(x):
    r"""Sine
    """
    return npsin(x)
示例#12
0
def aovhm_theta(times, mags, errs, frequency, nharmonics, magvariance):
    '''This calculates the harmonic AoV theta statistic for a frequency.

    This is a mostly faithful translation of the inner loop in `aovper.f90`. See
    the following for details:

    - http://users.camk.edu.pl/alex/
    - Schwarzenberg-Czerny (`1996
      <http://iopscience.iop.org/article/10.1086/309985/meta>`_)

    Schwarzenberg-Czerny (1996) equation 11::

        theta_prefactor = (K - 2N - 1)/(2N)
        theta_top = sum(c_n*c_n) (from n=0 to n=2N)
        theta_bot = variance(timeseries) - sum(c_n*c_n) (from n=0 to n=2N)

        theta = theta_prefactor * (theta_top/theta_bot)

        N = number of harmonics (nharmonics)
        K = length of time series (times.size)

    Parameters
    ----------

    times,mags,errs : np.array
        The input time-series to calculate the test statistic for. These should
        all be of nans/infs and be normalized to zero.

    frequency : float
        The test frequency to calculate the statistic for.

    nharmonics : int
        The number of harmonics to calculate up to.The recommended range is 4 to
        8.

    magvariance : float
        This is the (weighted by errors) variance of the magnitude time
        series. We provide it as a pre-calculated value here so we don't have to
        re-calculate it for every worker.

    Returns
    -------

    aov_harmonic_theta : float
        THe value of the harmonic AoV theta for the specified test `frequency`.

    '''

    period = 1.0 / frequency

    ndet = times.size
    two_nharmonics = nharmonics + nharmonics

    # phase with test period
    phasedseries = phase_magseries_with_errs(times,
                                             mags,
                                             errs,
                                             period,
                                             times[0],
                                             sort=True,
                                             wrap=False)

    # get the phased quantities
    phase = phasedseries['phase']
    pmags = phasedseries['mags']
    perrs = phasedseries['errs']

    # this is sqrt(1.0/errs^2) -> the weights
    pweights = 1.0 / perrs

    # multiply by 2.0*PI (for omega*time)
    phase = phase * 2.0 * pi_value

    # this is the z complex vector
    z = npcos(phase) + 1.0j * npsin(phase)

    # multiply phase with N
    phase = nharmonics * phase

    # this is the psi complex vector
    psi = pmags * pweights * (npcos(phase) + 1j * npsin(phase))

    # this is the initial value of z^n
    zn = 1.0 + 0.0j

    # this is the initial value of phi
    phi = pweights + 0.0j

    # initialize theta to zero
    theta_aov = 0.0

    # go through all the harmonics now up to 2N
    for _ in range(two_nharmonics):

        # this is <phi, phi>
        phi_dot_phi = npsum(phi * phi.conjugate())

        # this is the alpha_n numerator
        alpha = npsum(pweights * z * phi)

        # this is <phi, psi>. make sure to use npvdot and NOT npdot to get
        # complex conjugate of first vector as expected for complex vectors
        phi_dot_psi = npvdot(phi, psi)

        # make sure phi_dot_phi is not zero
        phi_dot_phi = npmax([phi_dot_phi, 10.0e-9])

        # this is the expression for alpha_n
        alpha = alpha / phi_dot_phi

        # update theta_aov for this harmonic
        theta_aov = (theta_aov +
                     npabs(phi_dot_psi) * npabs(phi_dot_psi) / phi_dot_phi)

        # use the recurrence relation to find the next phi
        phi = phi * z - alpha * zn * phi.conjugate()

        # update z^n
        zn = zn * z

    # done with all harmonics, calculate the theta_aov for this freq
    # the max below makes sure that magvariance - theta_aov > zero
    theta_aov = ((ndet - two_nharmonics - 1.0) * theta_aov /
                 (two_nharmonics * npmax([magvariance - theta_aov, 1.0e-9])))

    return theta_aov
示例#13
0
def generalized_lsp_value_notau(times, mags, errs, omega):
    '''
    This is the simplified version not using tau.

    W = sum (1.0/(errs*errs) )
    w_i = (1/W)*(1/(errs*errs))

    Y = sum( w_i*y_i )
    C = sum( w_i*cos(wt_i) )
    S = sum( w_i*sin(wt_i) )

    YY = sum( w_i*y_i*y_i ) - Y*Y
    YC = sum( w_i*y_i*cos(wt_i) ) - Y*C
    YS = sum( w_i*y_i*sin(wt_i) ) - Y*S

    CpC = sum( w_i*cos(w_t_i)*cos(w_t_i) )
    CC = CpC - C*C
    SS = (1 - CpC) - S*S
    CS = sum( w_i*cos(w_t_i)*sin(w_t_i) ) - C*S

    D(omega) = CC*SS - CS*CS
    P(omega) = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*D)

    '''

    one_over_errs2 = 1.0 / (errs * errs)

    W = npsum(one_over_errs2)
    wi = one_over_errs2 / W

    sin_omegat = npsin(omega * times)
    cos_omegat = npcos(omega * times)

    sin2_omegat = sin_omegat * sin_omegat
    cos2_omegat = cos_omegat * cos_omegat
    sincos_omegat = sin_omegat * cos_omegat

    # calculate some more sums and terms
    Y = npsum(wi * mags)
    C = npsum(wi * cos_omegat)
    S = npsum(wi * sin_omegat)

    YpY = npsum(wi * mags * mags)

    YpC = npsum(wi * mags * cos_omegat)
    YpS = npsum(wi * mags * sin_omegat)

    CpC = npsum(wi * cos2_omegat)
    # SpS = npsum( wi*sin2_omegat )

    CpS = npsum(wi * sincos_omegat)

    # the final terms
    YY = YpY - Y * Y
    YC = YpC - Y * C
    YS = YpS - Y * S
    CC = CpC - C * C
    SS = 1 - CpC - S * S  # use SpS = 1 - CpC
    CS = CpS - C * S

    # P(omega) = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*D)
    # D(omega) = CC*SS - CS*CS
    Domega = CC * SS - CS * CS
    lspval = (SS * YC * YC + CC * YS * YS - 2.0 * CS * YC * YS) / (YY * Domega)

    return lspval
示例#14
0
def generalized_lsp_value(times, mags, errs, omega):
    '''Generalized LSP value for a single omega.

    The relations used are::

        P(w) = (1/YY) * (YC*YC/CC + YS*YS/SS)

        where: YC, YS, CC, and SS are all calculated at T

        and where: tan 2omegaT = 2*CS/(CC - SS)

        and where:

        Y = sum( w_i*y_i )
        C = sum( w_i*cos(wT_i) )
        S = sum( w_i*sin(wT_i) )

        YY = sum( w_i*y_i*y_i ) - Y*Y
        YC = sum( w_i*y_i*cos(wT_i) ) - Y*C
        YS = sum( w_i*y_i*sin(wT_i) ) - Y*S

        CpC = sum( w_i*cos(w_T_i)*cos(w_T_i) )
        CC = CpC - C*C
        SS = (1 - CpC) - S*S
        CS = sum( w_i*cos(w_T_i)*sin(w_T_i) ) - C*S

    Parameters
    ----------

    times,mags,errs : np.array
        The time-series to calculate the periodogram value for.

    omega : float
        The frequency to calculate the periodogram value at.

    Returns
    -------

    periodogramvalue : float
        The normalized periodogram at the specified test frequency `omega`.

    '''

    one_over_errs2 = 1.0 / (errs * errs)

    W = npsum(one_over_errs2)
    wi = one_over_errs2 / W

    sin_omegat = npsin(omega * times)
    cos_omegat = npcos(omega * times)

    sin2_omegat = sin_omegat * sin_omegat
    cos2_omegat = cos_omegat * cos_omegat
    sincos_omegat = sin_omegat * cos_omegat

    # calculate some more sums and terms
    Y = npsum(wi * mags)
    C = npsum(wi * cos_omegat)
    S = npsum(wi * sin_omegat)

    CpS = npsum(wi * sincos_omegat)
    CpC = npsum(wi * cos2_omegat)
    CS = CpS - C * S
    CC = CpC - C * C
    SS = 1 - CpC - S * S  # use SpS = 1 - CpC

    # calculate tau
    tan_omega_tau_top = 2.0 * CS
    tan_omega_tau_bottom = CC - SS
    tan_omega_tau = tan_omega_tau_top / tan_omega_tau_bottom
    tau = nparctan(tan_omega_tau) / (2.0 * omega)

    YpY = npsum(wi * mags * mags)

    YpC = npsum(wi * mags * cos_omegat)
    YpS = npsum(wi * mags * sin_omegat)

    # SpS = npsum( wi*sin2_omegat )

    # the final terms
    YY = YpY - Y * Y
    YC = YpC - Y * C
    YS = YpS - Y * S

    periodogramvalue = (YC * YC / CC + YS * YS / SS) / YY

    return periodogramvalue
示例#15
0
def generalized_lsp_value_notau(times, mags, errs, omega):
    '''
    This is the simplified version not using tau.

    The relations used are::

        W = sum (1.0/(errs*errs) )
        w_i = (1/W)*(1/(errs*errs))

        Y = sum( w_i*y_i )
        C = sum( w_i*cos(wt_i) )
        S = sum( w_i*sin(wt_i) )

        YY = sum( w_i*y_i*y_i ) - Y*Y
        YC = sum( w_i*y_i*cos(wt_i) ) - Y*C
        YS = sum( w_i*y_i*sin(wt_i) ) - Y*S

        CpC = sum( w_i*cos(w_t_i)*cos(w_t_i) )
        CC = CpC - C*C
        SS = (1 - CpC) - S*S
        CS = sum( w_i*cos(w_t_i)*sin(w_t_i) ) - C*S

        D(omega) = CC*SS - CS*CS
        P(omega) = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*D)

    Parameters
    ----------

    times,mags,errs : np.array
        The time-series to calculate the periodogram value for.

    omega : float
        The frequency to calculate the periodogram value at.

    Returns
    -------

    periodogramvalue : float
        The normalized periodogram at the specified test frequency `omega`.

    '''

    one_over_errs2 = 1.0 / (errs * errs)

    W = npsum(one_over_errs2)
    wi = one_over_errs2 / W

    sin_omegat = npsin(omega * times)
    cos_omegat = npcos(omega * times)

    sin2_omegat = sin_omegat * sin_omegat
    cos2_omegat = cos_omegat * cos_omegat
    sincos_omegat = sin_omegat * cos_omegat

    # calculate some more sums and terms
    Y = npsum(wi * mags)
    C = npsum(wi * cos_omegat)
    S = npsum(wi * sin_omegat)

    YpY = npsum(wi * mags * mags)

    YpC = npsum(wi * mags * cos_omegat)
    YpS = npsum(wi * mags * sin_omegat)

    CpC = npsum(wi * cos2_omegat)
    # SpS = npsum( wi*sin2_omegat )

    CpS = npsum(wi * sincos_omegat)

    # the final terms
    YY = YpY - Y * Y
    YC = YpC - Y * C
    YS = YpS - Y * S
    CC = CpC - C * C
    SS = 1 - CpC - S * S  # use SpS = 1 - CpC
    CS = CpS - C * S

    # P(omega) = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*D)
    # D(omega) = CC*SS - CS*CS
    Domega = CC * SS - CS * CS
    lspval = (SS * YC * YC + CC * YS * YS - 2.0 * CS * YC * YS) / (YY * Domega)

    return lspval
示例#16
0
def exact_u(x):
    L = xf - xi
    return p * L**2 * npsin(pi * x / L) / (100.0 * pi**2) + (p * L /
                                                             (100.0 * pi)) * x
示例#17
0
def generalized_lsp_value(times, mags, errs, omega):
    '''Generalized LSP value for a single omega.

    P(w) = (1/YY) * (YC*YC/CC + YS*YS/SS)

    where: YC, YS, CC, and SS are all calculated at T

    and where: tan 2omegaT = 2*CS/(CC - SS)

    and where:

    Y = sum( w_i*y_i )
    C = sum( w_i*cos(wT_i) )
    S = sum( w_i*sin(wT_i) )

    YY = sum( w_i*y_i*y_i ) - Y*Y
    YC = sum( w_i*y_i*cos(wT_i) ) - Y*C
    YS = sum( w_i*y_i*sin(wT_i) ) - Y*S

    CpC = sum( w_i*cos(w_T_i)*cos(w_T_i) )
    CC = CpC - C*C
    SS = (1 - CpC) - S*S
    CS = sum( w_i*cos(w_T_i)*sin(w_T_i) ) - C*S

    '''

    one_over_errs2 = 1.0/(errs*errs)

    W = npsum(one_over_errs2)
    wi = one_over_errs2/W

    sin_omegat = npsin(omega*times)
    cos_omegat = npcos(omega*times)

    sin2_omegat = sin_omegat*sin_omegat
    cos2_omegat = cos_omegat*cos_omegat
    sincos_omegat = sin_omegat*cos_omegat

    # calculate some more sums and terms
    Y = npsum( wi*mags )
    C = npsum( wi*cos_omegat )
    S = npsum( wi*sin_omegat )

    YpY = npsum( wi*mags*mags)

    YpC = npsum( wi*mags*cos_omegat )
    YpS = npsum( wi*mags*sin_omegat )

    CpC = npsum( wi*cos2_omegat )
    # SpS = npsum( wi*sin2_omegat )

    CpS = npsum( wi*sincos_omegat )

    # the final terms
    YY = YpY - Y*Y
    YC = YpC - Y*C
    YS = YpS - Y*S
    CC = CpC - C*C
    SS = 1 - CpC - S*S # use SpS = 1 - CpC
    CS = CpS - C*S

    # calculate tau
    tan_omega_tau_top = 2.0*CS
    tan_omega_tau_bottom = CC - SS
    tan_omega_tau = tan_omega_tau_top/tan_omega_tau_bottom
    tau = nparctan(tan_omega_tau/(2.0*omega))

    periodogramvalue = (YC*YC/CC + YS*YS/SS)/YY

    return periodogramvalue
示例#18
0
def generalized_lsp_value_notau(times, mags, errs, omega):
    '''
    This is the simplified version not using tau.

    W = sum (1.0/(errs*errs) )
    w_i = (1/W)*(1/(errs*errs))

    Y = sum( w_i*y_i )
    C = sum( w_i*cos(wt_i) )
    S = sum( w_i*sin(wt_i) )

    YY = sum( w_i*y_i*y_i ) - Y*Y
    YC = sum( w_i*y_i*cos(wt_i) ) - Y*C
    YS = sum( w_i*y_i*sin(wt_i) ) - Y*S

    CpC = sum( w_i*cos(w_t_i)*cos(w_t_i) )
    CC = CpC - C*C
    SS = (1 - CpC) - S*S
    CS = sum( w_i*cos(w_t_i)*sin(w_t_i) ) - C*S

    D(omega) = CC*SS - CS*CS
    P(omega) = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*D)

    '''

    one_over_errs2 = 1.0/(errs*errs)

    W = npsum(one_over_errs2)
    wi = one_over_errs2/W

    sin_omegat = npsin(omega*times)
    cos_omegat = npcos(omega*times)

    sin2_omegat = sin_omegat*sin_omegat
    cos2_omegat = cos_omegat*cos_omegat
    sincos_omegat = sin_omegat*cos_omegat

    # calculate some more sums and terms
    Y = npsum( wi*mags )
    C = npsum( wi*cos_omegat )
    S = npsum( wi*sin_omegat )

    YpY = npsum( wi*mags*mags)

    YpC = npsum( wi*mags*cos_omegat )
    YpS = npsum( wi*mags*sin_omegat )

    CpC = npsum( wi*cos2_omegat )
    # SpS = npsum( wi*sin2_omegat )

    CpS = npsum( wi*sincos_omegat )

    # the final terms
    YY = YpY - Y*Y
    YC = YpC - Y*C
    YS = YpS - Y*S
    CC = CpC - C*C
    SS = 1 - CpC - S*S # use SpS = 1 - CpC
    CS = CpS - C*S

    # P(omega) = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*D)
    # D(omega) = CC*SS - CS*CS
    Domega = CC*SS - CS*CS
    lspval = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*Domega)

    return lspval
示例#19
0
def prepare_plan(im, beta, delta, energy, distance, pixsize, method=1, padding=False):
	"""Pre-compute data to save time in further execution of phase_retrieval

	Parameters
	----------
	im : array_like
		Image data as numpy array. Only image size (shape) is actually used.
	beta : double
		Immaginary part of the complex X-ray refraction index.
	delta : double
		Decrement from unity of the complex X-ray refraction index.
	energy [KeV]: double
		Energy in KeV of the incident X-ray beam.
	distance [mm]: double
		Sample-to-detector distance in mm.
	pixsize [mm]: double
		Size in mm of the detector element.
	method : int 
		Phase retrieval algorithm {1 = TIE (default), 2 = Born, 3 = Rytov, 4 = Wu}
	padding : bool
		Apply image padding to better process the boundary of the image
	
	"""
	# Get additional values:
	lam = (12.398424 * 10 ** (-7)) / energy # in mm
	mu = 4 * pi * beta / lam
		
	# Replicate pad image to power-of-2 dimensions:
	dim0_o = im.shape[0]
	dim1_o = im.shape[1]
	if (padding):		
		n_pad0 = dim0_o
		n_pad1 = n_pad1 = im.shape[1] + im.shape[1] / 2
	else:
		n_pad0 = dim0_o
		n_pad1 = dim1_o

	# Set the transformed frequencies according to pixelsize:
	rows = n_pad0
	cols = n_pad1
	ulim = arange(-(cols) / 2, (cols) / 2)
	ulim = ulim * (2 * pi / (cols * pixsize))
	vlim = arange(-(rows) / 2, (rows) / 2)  
	vlim = vlim * (2 * pi / (rows * pixsize))
	u,v = meshgrid(ulim, vlim)

	# Apply formula:
	if method == 1:    
		den = 1 + distance * delta / mu * (u * u + v * v) + finfo(float32).eps # Avoids division by zero		
	elif method == 2:
		chi = pi * lam * distance * (u * u + v * v)
		den = (beta / delta) * npcos(chi) + npsin(chi) + finfo(float32).eps # Avoids division by zero		
	elif method == 3:
		chi = pi * lam * distance * (u * u + v * v)
		den = (beta / delta) * npcos(chi) + npsin(chi) + finfo(float32).eps # Avoids division by zero		
	elif method == 4:
		den = 1 + pi * (delta / beta) * lam * distance * (u * u + v * v) + finfo(float32).eps        
		
	# Shift the denominator now:
	den = fftshift(den)

	return {'dim0':dim0_o, 'dim1':dim1_o ,'npad0':n_pad0, 'npad1':n_pad1, 'den':den , 'mu':mu }
示例#20
0
def generalized_lsp_value(times, mags, errs, omega):
    '''Generalized LSP value for a single omega.

    P(w) = (1/YY) * (YC*YC/CC + YS*YS/SS)

    where: YC, YS, CC, and SS are all calculated at T

    and where: tan 2omegaT = 2*CS/(CC - SS)

    and where:

    Y = sum( w_i*y_i )
    C = sum( w_i*cos(wT_i) )
    S = sum( w_i*sin(wT_i) )

    YY = sum( w_i*y_i*y_i ) - Y*Y
    YC = sum( w_i*y_i*cos(wT_i) ) - Y*C
    YS = sum( w_i*y_i*sin(wT_i) ) - Y*S

    CpC = sum( w_i*cos(w_T_i)*cos(w_T_i) )
    CC = CpC - C*C
    SS = (1 - CpC) - S*S
    CS = sum( w_i*cos(w_T_i)*sin(w_T_i) ) - C*S

    '''

    one_over_errs2 = 1.0 / (errs * errs)

    W = npsum(one_over_errs2)
    wi = one_over_errs2 / W

    sin_omegat = npsin(omega * times)
    cos_omegat = npcos(omega * times)

    sin2_omegat = sin_omegat * sin_omegat
    cos2_omegat = cos_omegat * cos_omegat
    sincos_omegat = sin_omegat * cos_omegat

    # calculate some more sums and terms
    Y = npsum(wi * mags)
    C = npsum(wi * cos_omegat)
    S = npsum(wi * sin_omegat)

    YpY = npsum(wi * mags * mags)

    YpC = npsum(wi * mags * cos_omegat)
    YpS = npsum(wi * mags * sin_omegat)

    CpC = npsum(wi * cos2_omegat)
    # SpS = npsum( wi*sin2_omegat )

    CpS = npsum(wi * sincos_omegat)

    # the final terms
    YY = YpY - Y * Y
    YC = YpC - Y * C
    YS = YpS - Y * S
    CC = CpC - C * C
    SS = 1 - CpC - S * S  # use SpS = 1 - CpC
    CS = CpS - C * S

    # calculate tau
    tan_omega_tau_top = 2.0 * CS
    tan_omega_tau_bottom = CC - SS
    tan_omega_tau = tan_omega_tau_top / tan_omega_tau_bottom
    tau = nparctan(tan_omega_tau / (2.0 * omega))

    periodogramvalue = (YC * YC / CC + YS * YS / SS) / YY

    return periodogramvalue
示例#21
0
def phrt_plan (im, energy, distance, pixsize, regpar, thresh, method, padding):
	"""Pre-compute data to save time in further execution of phase_retrieval.

	Parameters
	----------
	im : array_like
		Image data as numpy array. Only image size (shape) is actually used.
	
	energy [KeV]: double
		Energy in KeV of the incident X-ray beam.
	
	distance [mm]: double
		Sample-to-detector distance in mm.
	
	pixsize [mm]: double
		Size in mm of the detector element.
	
	regpar: double
		Regularization parameter: RegPar is - log10 of the constant to be added to the denominator
		to regularize the singularity at zero frequency, i.e. 1/sin(x) -> 1/(sin(x)+10^-RegPar). 
		Typical values in the range [2.0, 3.0]. (Suggestion for default: 2.5).
	
	thresh: double
		Parameter for Quasiparticle phase retrieval which defines the width of the rings to be cropped 
		around the zero crossing of the CTF denominator in Fourier space. Typical values in the range
		[0.01, 0.1]. (Suggestion for default: 0.1).
	
	method : int 
		Phase retrieval algorithm {1 = TIE (default), 2 = CTF, 3 = CTF first-half sine, 4 = Quasiparticle, 
		5 = Quasiparticle first half sine}.
	
	padding : bool
		Apply image padding to better process the boundary of the image.

	References
	----------


	Credits
	-------
	Julian Moosmann, KIT (Germany) is acknowledged for this code
	
	"""
	# Adapt input values:
	distance = distance / 1000.0 # Conversion to m
	pixsize = pixsize / 1000.0 # Conversion to m

	# Get additional values:
	lam = 6.62606896e-34*299792458/(energy*1.60217733e-16)
	k	= 2*pi*lam*distance/(pixsize**2)
			
	# Replicate pad image:
	dim0_o = im.shape[0]
	dim1_o = im.shape[1]
	if (padding):		
		n_pad0 = im.shape[0] + im.shape[0] / 2
		n_pad1 = im.shape[1] + im.shape[1] / 2
	else:
		n_pad0 = dim0_o
		n_pad1 = dim1_o

	# Ensure even size:
	if (n_pad0 % 2 == 1):
		n_pad0 = n_pad0 + 1
	if (n_pad1 % 2 == 1):
		n_pad1 = n_pad1 + 1

	# Create coordinates grid:
	xi  = concatenate((arange(0, ceil((n_pad1 - 1)/2.0) + 1) , arange(-(floor((n_pad1 - 1)/2.0)),0))) / n_pad1 
	eta = concatenate((arange(0, ceil((n_pad0 - 1)/2.0) + 1) , arange(-(floor((n_pad0 - 1)/2.0)),0))) / n_pad0

	[u, v] = meshgrid(xi,eta)	
	u      = k*(u*u + v*v)/2.0

	# Filter:
	if method == 1:	# TIE:
		filter = 0.5 / (u + 10**-regpar)
		
	elif method == 2: # CTF:
		v   = npsin(u)
		filter = 0.5 * sign(v) / (fabs(v) + 10**-regpar)

	elif method == 3: # CTF first-half sine:
		v   = npsin(u)
		filter = 0.5 * sign(v) / (fabs(v) + 10**-regpar)
		filter[ u >= pi ] = 0		

	elif method == 4: # Quasiparticle:
		v   = npsin(u);
		filter = 0.5 * sign(v) / (fabs(v) + 10**-regpar)
		filter[ logical_and( u > pi/2.0, fabs(v) < thresh) ] = 0
		
	elif method == 5: # Quasiparticle first half sine:
		v   = npsin(u)
		filter = 0.5 * sign(v) / (fabs(v) + 10**-regpar)
		filter[ logical_and(u > pi/2.0, fabs(v) < thresh) ] = 0
		filter[ u >= pi ] = 0

	elif method == 6: #	Projected CTF (alternative implementation):
		v   = npsin(u)
		filter = 0.5 * sign(v) / (fabs(v) + 10**-regpar)		
		tmp    = sign(filter) / (2*(thresh + 10**-regpar))
		msk    = logical_and( u > pi/2.0, fabs(v) < thresh)
		filter = filter*(1 - msk) + tmp*msk
		#filter[ u >= pi ] = 0
		
	# Restore zero frequency component:
	filter[0,0] = 0.5 *10**regpar
	
	return {'dim0':dim0_o, 'dim1':dim1_o ,'npad0':n_pad0, 'npad1':n_pad1, 'filter':filter }