Exemple #1
0
def _space_grid(Sm, spacing):
    '''
    Zero everywhere, except for the points at a certain grid interval.
    '''
    Ix, Iy, Iz = np.indices(Sm.shape)
    return Sm * (np.fmod(Ix, spacing) == 0) * (np.fmod(Iy, spacing) == 0) * \
           (np.fmod(Iz, spacing) == 0)
def compute_lscore_over_time(events, historyScores, run, outputfn):
    runlen = max(np.fmod(historyScores[:, 0], Global_BINWIDTH)) + 1
    myrun = run
    myeventids = ["total"]
    myruncounts = [1]
    hidmin = myrun * Global_BINWIDTH
    hidmax = hidmin + runlen
    tmpi = np.where((historyScores[:, 0] >= hidmin) & (historyScores[:, 0] <= hidmax))[0]
    costsarray = np.mean(historyScores[tmpi, 1:3], axis=1)
    totalscores = np.cumsum(np.exp(-1 * Global_K * costsarray))
    mydata = totalscores / totalscores[runlen - 1]
    for event in events:
        event.histories = listout_ranges(event.histRanges)
        myeventids.append(event.id)
        sys.stderr.write("working on event: %s\n" % (event.id))
        myruncounts.append(event.numsims)
        lscores = np.zeros(runlen)
        hids = np.array(event.histories)
        myhids = hids[np.where((hids <= hidmax) & (hids >= hidmin))]
        myhis = np.fmod(hids, Global_BINWIDTH)
        for i in xrange(myhis.size):
            totalscore = totalscores[myhis[i]]
            lscore = compute_likelihood_histories(myhids[: (i + 1)], historyScores, totalscore)
            lscores[myhis[i]] = lscore
        for i in xrange(max(myhis), hidmax):
            totalscore = totalscores[i]
            lscore = compute_likelihood_histories(myhids, historyScores, totalscore)
            lscores[i] = lscore
        mydata = np.vstack((mydata, lscores))
    np.savetxt(
        outputfn, mydata.T, delimiter="\t", header="\t".join(myeventids) + "\n" + "\t".join(map(str, myruncounts))
    )
def historyids_to_indices(historyids, historyScores): 
	hids=np.array(historyids, dtype=int)
	itr=np.fmod(hids, Global_BINWIDTH)
	sim=np.round(hids/Global_BINWIDTH)
	runlen=max(np.fmod(historyScores[:,0], Global_BINWIDTH))+1
	newi=itr+sim*runlen
	return newi.astype('int')
        def test_infix(lhs, rhs):
            ovl0 = no_op(lhs)

            def test_np(fcn):
                ovl_l = fcn(ovl0, rhs)
                ovl_r = fcn(rhs, ovl0)
                ovl_l, ovl_r = evaluate([ovl_l, ovl_r])
                np_l = fcn(lhs, rhs)
                np_r = fcn(rhs, lhs)

                assert np.all(np.equal(ovl_l, np_l))
                assert np.all(np.equal(ovl_r, np_r))

            test_np(lambda x, y: x + y)
            test_np(lambda x, y: x - y)
            test_np(lambda x, y: x * y)
            test_np(lambda x, y: x / y)
            test_np(lambda x, y: x == y)
            test_np(lambda x, y: x != y)
            test_np(lambda x, y: x < y)
            test_np(lambda x, y: x <= y)
            test_np(lambda x, y: x > y)
            test_np(lambda x, y: x >= y)

            # OVL uses c-style fmod, not python style mod, so use numpy fmod function for test
            # see : http://docs.scipy.org/doc/numpy/reference/generated/numpy.fmod.html
            ovl_left, ovl_right = evaluate([ovl0 % rhs, rhs % ovl0])
            np_left = np.fmod(lhs, rhs)
            np_right = np.fmod(rhs, lhs)

            assert np.all(np.equal(ovl_left, np_left))
            assert np.all(np.equal(ovl_right, np_right))

            ovl_neg = evaluate([-ovl0])
            assert np.all(np.equal(ovl_neg, -lhs))
 def rotate(self, angle, mask=None):
     """Rotate the grids (arena centered)
     
     Grids to be rotated can be optionally specified by bool/index array
     *mask*, otherwise population is rotated. Specified *angle* can be a
     scalar value to be applied to the population or a population- or
     mask-sized array depending on whether *mask* is specified.
     """
     rot2D = lambda psi: [[cos(psi), sin(psi)], [-sin(psi),  cos(psi)]]
     if mask is not None and type(mask) is np.ndarray:
         if mask.dtype.kind == 'b':
             mask = mask.nonzero()[0]
         if type(angle) is np.ndarray and angle.size == mask.size:
             for i,ix in enumerate(mask):
                 self._phi[ix] = np.dot(self._phi[ix], rot2D(angle[i]))
         elif type(angle) in (int, float, np.float64):
             angle = float(angle)
             self._phi[mask] = np.dot(self._phi[mask], rot2D(angle))
         else:
             raise TypeError, 'angle must be mask-sized array or float'
         self._psi[mask] = np.fmod(self._psi[mask]+angle, 2*pi)
     elif mask is None:
         if type(angle) is np.ndarray and angle.size == self.num_maps:
             for i in xrange(self.num_maps):
                 self._phi[i] = np.dot(self._phi[i], rot2D(angle[i]))
         elif type(angle) in (int, float, np.float64):
             angle = float(angle)
             self._phi = np.dot(self._phi, rot2D(angle))
         else:
             raise TypeError, 'angle must be num_maps array or float'
         self._psi = np.fmod(self._psi+angle, 2*pi)
     else:
         raise TypeError, 'mask must be bool/index array'
 def _visiting(self, step):
     """
     Assignement of components values based on visiting distribution.
     The way of exploring space depends on the Markov chain stepping
     """
     # It it is the first part of the markov chain
     # Changing all components at the same time
     if step < self._x.size:
         visits = np.array([self._visita() for _ in range(self._x.size)])
         visits[visits > 1.e8] = 1.e8 * self._random_state.random_sample()
         visits[visits < -1e8] = -1.e8 * self._random_state.random_sample()
         self._x = visits + self._xbackup
         a = self._x - self._lower
         b = np.fmod(a, self._xrange) + self._xrange
         self._x = np.fmod(b, self._xrange) + self._lower
         self._x[np.fabs(self._x - self._lower) < 1.e-10] += 1.e-10
     else:
         # Second part of the markov chain
         # Now change only one component at a time
         visit = self._visita()
         if visit > 1.e8:
             visit = 1.e8 * self._random_state.random_sample()
         elif visit < -1e8:
             visit = -1.e8 * self._random_state.random_sample()
         index = step - self._x.size
         self._x[index] = visit + self._xbackup[index]
         a = self._x[index] - self._lower[index]
         b = np.fmod(
             a, self._xrange[index]) + self._xrange[index]
         self._x[index] = np.fmod(
             b, self._xrange[index]) + self._lower[index]
         if np.fabs(self._x[index] - self._lower[
                 index]) < 1.e-10:
             self._x[index] += 1.e-10
Exemple #7
0
def is_leap_year(year, gregorian=True):
    """Return True if this is a leap year in the Julian or Gregorian calendars

    Arguments:
      - `year` : (int) year

    Keywords:
      - `gregorian` : (bool, default=True) If True, use Gregorian calendar,
        else use Julian calendar

    Returns:
      - (bool) True is this is a leap year, else False.

    """
    year = np.atleast_1d(year).astype(np.int64)
    x = np.fmod(year, 4)
    if gregorian:
        x = np.fmod(year, 4)
        y = np.fmod(year, 100)
        z = np.fmod(year, 400)
        return _scalar_if_one(
            np.logical_and(np.logical_not(x),
                           np.logical_or(y, np.logical_not(z))))
    else:
        return _scalar_if_one(x == 0)
Exemple #8
0
def linearinterpolation(nstep,ndata,dt):
    """used for lienar interpolation between transportation matrices.
    returns weights alpha, beta and indices of matrices.
    Parameters
    -------
    nstep   : int Number of timesteps
    ndata   : int Number of matrices
    Returns
    -------
    alpha,beta : array 
                    coefficients for interpolation
    jalpha,jbeta : array
                    indices for interpolation
    """

    t = np.zeros(nstep,dtype=np.float_)
    for i in range(nstep):
        t[i] = np.fmod(0 + i*dt, 1.0)


    beta    = np.array(nstep,dtype=np.float_)
    alpha   = np.array(nstep,dtype=np.float_)

    w       = t * ndata+0.5
    beta    = np.float_(np.fmod(w, 1.0))
    alpha   = np.float_(1.0-beta)
    jalpha  = np.fmod(np.floor(w)+ndata-1.0,ndata).astype(int)
    jbeta   = np.fmod(np.floor(w),ndata).astype(int)

    return alpha,beta,jalpha,jbeta
def fundamental_arguments(t):

    """Compute the fundamental arguments (mean elements) of Sun and Moon.

    `t` - TDB time in Julian centuries since J2000.0, as float or NumPy array

    Outputs fundamental arguments, in radians:
          a[0] = l (mean anomaly of the Moon)
          a[1] = l' (mean anomaly of the Sun)
          a[2] = F (mean argument of the latitude of the Moon)
          a[3] = D (mean elongation of the Moon from the Sun)
          a[4] = Omega (mean longitude of the Moon's ascending node);
                 from Simon section 3.4(b.3),
                 precession = 5028.8200 arcsec/cy)

    """
    a = fa4 * t
    a += fa3
    a *= t
    a += fa2
    a *= t
    a += fa1
    a *= t
    a += fa0
    fmod(a, ASEC360, out=a)
    a *= ASEC2RAD
    if getattr(t, 'shape', ()):
        return a
    return a[:,0]
def mod180deg(x):
    if x >= 0:
        retval = fmod(x+180.0, 360.0)-180.0
    else:
        retval = -(fmod(-x+180.0, 360.0)-180.0)
    assert -180.0 <= retval <= 180.0
    return retval
def rebin_counts_2D_indexing_new(x, y, I, xo, yo, Io):
    # use this one for 2D.
    csx = np.empty((len(x), len(y)-1))
    csx[0, :] = 0.0
    csx[1:, :] = np.cumsum(I, axis=0)

    xindices = np.interp(xo, x, np.arange(len(x), dtype='float'))
    xindices_whole = np.floor(xindices).astype(int)
    xindices_frac = np.fmod(xindices, 1.0)
    # the only way an index will match the highest bin edge is for lookups at or outside the range of the
    # source.  In this case the fractional portion will always == 0.0, so clipping this to the bin edge below
    # is safe and allows us to use the source weights unmodified
    xintegral = csx[xindices_whole, :] + I[xindices_whole.clip(max=len(x)-2), :]*xindices_frac[:, None]
    # rebinned over x
    ix = np.diff(xintegral, axis=0)

    csy = np.empty((len(xo)-1, len(y)))
    csy[:, 0] = 0.0
    csy[:, 1:] = np.cumsum(ix, axis=1)

    yindices = np.interp(yo, y, np.arange(len(y), dtype='float'))
    yindices_whole = np.floor(yindices).astype(int)
    yindices_frac = np.fmod(yindices, 1.0)
    yintegral = csy[:, yindices_whole] + ix[:, yindices_whole.clip(max=len(y)-2)]*yindices_frac[None, :]
    # rebinned over x and y
    ixy = np.diff(yintegral, axis=1)

    Io += ixy
def plot_fft_results(freq, pow, cum, angles, x, y, freqi, powi, angi):
	plt.figure(figsize=(10,15))
	plt.subplot(5,1,1)
	plt.plot(freq, pow, 'x')
	plt.gca().set_xscale('log')
	plt.xlabel('frequency')
	plt.ylabel('power')
	plt.hlines(powi, plt.xlim()[0], plt.xlim()[1], alpha=0.3)
	plt.subplot(5,1,2)
	plt.plot(freq, cum, 'x')
	plt.gca().set_xscale('log')
	plt.ylabel('cumulative probability')
	plt.xlabel('frequency')
	plt.vlines(freqi, 0, 1, alpha=0.3)
	plt.subplot(5,1,3)
	plt.plot(cum, pow**0.5, 'x')
	plt.xlabel('cumulative probability')
	plt.ylabel('amplitude')
	plt.hlines(powi**0.5, plt.xlim()[0], plt.xlim()[1], alpha=0.3)
	plt.subplot(5,1,4)
	plt.plot(cum, numpy.fmod(angles+10*numpy.pi, 2*numpy.pi), 'x')
	plt.xlabel('cumulative probability')
	plt.ylabel('phase')
	plt.hlines(numpy.fmod(angi+10*numpy.pi, 2*numpy.pi), plt.xlim()[0], plt.xlim()[1], alpha=0.3)
	plt.subplot(5,1,5)
	plt.plot(x, y, 'x')
	plt.xlabel('time')
	plt.ylabel('value')
	xnew = numpy.linspace(x.min(), x.max(), 200)
	ynew = powi**0.5 * numpy.cos(xnew * freqi * pi * 2 + angi)
	plt.plot(xnew, ynew, '-', alpha=0.5, lw=3, 
		label="freq=%.5f\npow=%.3f\nang=%.3f" % (freqi, powi, angi))
	plt.legend(loc='best', prop=dict(size=10))
Exemple #13
0
def fix_ps1_coord_bugs(cols, file, hduname):
	# FIXME: Work around PS1 bugs
	ra, dec = cols['ra'], cols['dec']

	if np.any(ra < 0):
		ra[ra < 0] = np.fmod(np.fmod(ra[ra < 0], 360) + 360, 360)

	if np.any(ra >= 360):
		ra[ra >= 360] = np.fmod(np.fmod(ra[ra >= 360], 360) + 360, 360)

	if np.any(np.abs(dec) > 90):
		logger.warning("Encountered %d instances of dec > +/- 90 in file %s, %s HDU. Truncating to +/-90." % (sum(np.abs(dec) > 90), file, hduname))
		dec[dec > 90] = 90
		dec[dec < -90] = -90

	cols['ra'], cols['dec'] = ra, dec

	# Remove any NaN rows
	if np.isnan(cols['ra'].sum()):
		logger.warning("Encountered %d instances of ra == NaN in file %s, %s HDU. Removing those rows." % (sum(np.isnan(ra)), file, hduname))
		keep = ~np.isnan(cols['ra'])
		for name in cols: cols[name] = cols[name][keep]

	if np.isnan(cols['dec'].sum()):
		logger.warning("Encountered %d instances of dec == NaN in file %s, %s HDU. Removing those rows." % (sum(np.isnan(dec)), file, hduname))
		keep = ~np.isnan(cols['dec'])
		for name in cols: cols[name] = cols[name][keep]
Exemple #14
0
def _level1_xfm_no_highpass(X, h0o, h1o, ext_mode):
    """Perform level 1 of the 3d transform discarding highpass subbands.

    """
    # Check shape of input according to ext_mode. Note that shape of X is
    # double original input in each direction.
    if ext_mode == 4 and np.any(np.fmod(X.shape, 2) != 0):
        raise ValueError('Input shape should be a multiple of 2 in each direction when self.ext_mode == 4')
    elif ext_mode == 8 and np.any(np.fmod(X.shape, 4) != 0):
        raise ValueError('Input shape should be a multiple of 4 in each direction when self.ext_mode == 8')

    out = np.zeros_like(X)

    # Loop over 2nd dimension extracting 2D slice from first and 3rd dimensions
    for f in xrange(X.shape[1]):
        # extract slice
        y = X[:, f, :].T
        out[:, f, :] = colfilter(y, h0o).T

  # Loop over 3rd dimension extracting 2D slice from first and 2nd dimensions
    for f in xrange(X.shape[2]):
        y = colfilter(out[:, :, f].T, h0o).T
        out[:, :, f] = colfilter(y, h0o)

    return out
Exemple #15
0
    def render(self, model, params, frame):

        # Scalar animation parameter, based on height and distance
        d = model.edgeCenters[:,2] + 0.5 * model.edgeDistances
        numpy.multiply(d, 1/self.height, d)

        # Add global offset for Z scrolling over time
        numpy.add(d, params.time * self.speed, d)

        # Add an offset that depends on which tree we're in
        numpy.add(d, numpy.choose(model.edgeTree, self.offsets), d)

        # Periodic animation, stored in our color table. Linearly interpolate.
        numpy.fmod(d, self.period, d)
        color = numpy.empty((model.numLEDs, 3))
        color[:,0] = numpy.interp(d, self.colorX, self.colorY[:,0])
        color[:,1] = numpy.interp(d, self.colorX, self.colorY[:,1])
        color[:,2] = numpy.interp(d, self.colorX, self.colorY[:,2])

        # Random flickering noise
        noise = numpy.random.rand(model.numLEDs).reshape(-1, 1)
        numpy.multiply(noise, 0.25, noise)
        numpy.add(noise, 0.75, noise)

        numpy.multiply(color, noise, color)
        numpy.add(frame, color, frame)
Exemple #16
0
 def test_impulse_negative(self):
     """ check the transform of a negative impulse at a random place """
     win_s = 256
     i = int(floor(random()*win_s))
     impulse = -.1
     f = fft(win_s)
     timegrain = fvec(win_s)
     timegrain[0] = 0
     timegrain[i] = impulse
     fftgrain = f ( timegrain )
     #self.plot_this ( fftgrain.phas )
     assert_almost_equal ( fftgrain.norm, abs(impulse), decimal = 5 )
     if impulse < 0:
         # phase can be pi or -pi, as it is not unwrapped
         #assert_almost_equal ( abs(fftgrain.phas[1:-1]) , pi, decimal = 6 )
         assert_almost_equal ( fftgrain.phas[0], pi, decimal = 6)
         assert_almost_equal ( np.fmod(fftgrain.phas[-1], pi), 0, decimal = 6)
     else:
         #assert_equal ( fftgrain.phas[1:-1] == 0, True)
         assert_equal ( fftgrain.phas[0], 0)
         assert_almost_equal ( np.fmod(fftgrain.phas[-1], pi), 0, decimal = 6)
     # now check the resynthesis
     synthgrain = f.rdo ( fftgrain )
     #self.plot_this ( fftgrain.phas.T )
     assert_equal ( fftgrain.phas <= pi, True)
     assert_equal ( fftgrain.phas >= -pi, True)
     #self.plot_this ( synthgrain - timegrain )
     assert_almost_equal ( synthgrain, timegrain, decimal = 6 )
Exemple #17
0
    def transit_depths(self, p):
        p = self.to_params(p)

        dt = self.times[1] - self.times[0]

        depths = np.zeros(self.times.shape)

        left_bounds = self.times - dt/2.0
        right_bounds = self.times + dt/2.0

        left_time_since_transit = np.fmod(left_bounds - p['t0'], p['P'])
        right_time_since_transit = np.fmod(right_bounds - p['t0'], p['P'])

        left_in_transit = (left_time_since_transit > 0) & (left_time_since_transit < p['T'])
        right_in_transit = (right_time_since_transit > 0) & (right_time_since_transit < p['T'])

        depths[left_in_transit & right_in_transit] = 1.0

        entries = right_in_transit & (~left_in_transit)
        exits = left_in_transit & (~right_in_transit)

        depths[entries] = right_time_since_transit[entries] / dt
        depths[exits] = (p['T']-left_time_since_transit[exits])/dt        

        return depths
Exemple #18
0
 def visiting(self, x, step, temperature):
     dim = x.size
     if step < dim:
         # Changing all coordinates with a new visting value
         visits = np.array([self.visit_fn(
             temperature) for _ in range(dim)])
         upper_sample = self.rs.random_sample()
         lower_sample = self.rs.random_sample()
         visits[visits > self.tail_limit] = self.tail_limit * upper_sample
         visits[visits < -self.tail_limit] = -self.tail_limit * lower_sample
         x_visit = visits + x
         a = x_visit - self.lower
         b = np.fmod(a, self.b_range) + self.b_range
         x_visit = np.fmod(b, self.b_range) + self.lower
         x_visit[np.fabs(
             x_visit - self.lower) < self.min_visit_bound] += 1.e-10
     else:
         # Changing only one coordinate at a time based on Markov chain step
         x_visit = np.copy(x)
         visit = self.visit_fn(temperature)
         if visit > self.tail_limit:
             visit = self.tail_limit * self.rs.random_sample()
         elif visit < -self.tail_limit:
             visit = -self.tail_limit * self.rs.random_sample()
         index = step - dim
         x_visit[index] = visit + x[index]
         a = x_visit[index] - self.lower[index]
         b = np.fmod(a, self.b_range[index]) + self.b_range[index]
         x_visit[index] = np.fmod(b, self.b_range[
             index]) + self.lower[index]
         if np.fabs(x_visit[index] - self.lower[
                 index]) < self.min_visit_bound:
             x_visit[index] += self.min_visit_bound
     return x_visit
def analytic_phase_reset_old(phi, dx=0., dy=0.,
        a=0., l_ccw=0.2, l_cw=-1., X=1., Y=1.):

    # this is an older expression derived with several assumptions
    assert(X == 1.)
    assert(Y == 1.)
    assert(l_cw == -1.)

    r0 = iris_fixedpoint(a, l_ccw, l_cw, X, Y)
    T = iris_period(a, l_ccw, l_cw, X, Y)
    if r0 == None:
        raise RuntimeError("No limit cycle found")
    else:
        quad1or2 = np.fmod(phi, 2 * math.pi) < math.pi
        quad1or3 = np.fmod(phi, math.pi) < math.pi/2
        du = (quad1or3 * dy + (1 - quad1or3) * dx) * (1 - 2*quad1or2)
        ds = (quad1or3 * dx + (1 - quad1or3) * dy) * (
                1 - 2*quad1or2*quad1or3 - 2*(1 - quad1or2)*(1 - quad1or3))
        t = np.fmod(phi, math.pi/2)/(math.pi/2) * T/4
        Q = 1/(l_ccw * r0) * np.exp(-l_ccw * t)
        dt0 = -Q * du
        dr = np.exp(-T/4) * (X * -dt0 + np.exp(t) * ds)
        return (
                (dt0
                + -1./(l_ccw * r0)
                * 1./(1 - 1./(l_ccw * r0) * (r0/Y)**(1/l_ccw)) * dr)
                / T * 2*math.pi
                )
Exemple #20
0
	def FindPosition(self,pos,dx,dy):
		"""Finds which index the walker belongs to. 
		Implements periodic boundary conditions on the walk-area
		Should work in 1d as well now"""
		indx = [-1,-1]
		if self.d==1:
			for i in xrange(len(self.x)+1):
				# print 'i = %d, x[i] = '%i,self.x[i],pos-self.x[i]
				if np.abs(pos-self.x[i])<dx/2.0:
					"This test must be implemented in 2D, and in self.checkpos()!"
					return i
		elif self.d==2:
			if pos[0]>self.x1_ or pos[0]<self.x0_:
				print 'this should not happen now! pos[x] = ',pos[0]
				pos[0] = np.fmod(pos[0],(self.X[0,-1]-self.X[0,0])+dx)+self.X[0,0]
				pos[0] *= ((self.X[0,-1]-self.X[0,0])+dx) if pos[0]<0 else 1
			for i in xrange(len(self.X)):
				if np.abs(pos[0]-self.X[i,i])<dx/2.0:
					indx[0] = i
					break
			if pos[1]>self.y1_ or pos[1]<self.y0_:
				print 'this should not happen now, pos[y] = ',pos[1]
				pos[1] = np.fmod(pos[1],(self.Y[-1,0]-self.Y[0,0])+dy)+self.Y[0,0]
				pos[1] *= ((self.Y[-1,0]-self.Y[0,0])+dx) if pos[1]<0 else 1
				# print pos[1]
			for j in xrange(len(self.Y)):
				if np.abs(pos[1]-self.Y[j,j])<dy/2.0:
					indx[1] = j
					break
			if indx[0] == -1 or indx[-1] == -1:
				print 'økadjs g'
			# print indx
			return indx
Exemple #21
0
def MJD_to_Gregorian(mjd):
    """Convert Modified Julian Date to the Gregorian calender.

    :param mjd: Modified Julian Date
    :type mjd float:

    :returns: date and time
    :rtype: :func:`tuple` of :func:`str`
    """
    tt = np.fmod(mjd,1)
    hh = tt*24.
    mm = np.fmod(hh,1)*60.
    ss = np.fmod(mm,1)*60.
    ss = "%08.5f"%(ss)
    j = mjd+2400000.5
    j = int(j)
    j = j - 1721119
    y = (4 * j - 1) / 146097
    j = 4 * j - 1 - 146097 * y
    d = j / 4
    j = (4 * d + 3) / 1461
    d = 4 * d + 3 - 1461 * j
    d = (d + 4) / 4
    m = (5 * d - 3) / 153
    d = 5 * d - 3 - 153 * m
    d = (d + 5) / 5
    y = 100 * y + j
    if m < 10:
        m = m + 3
    else:
        m = m - 9
        y = y + 1
    return("%02d/%02d/%02d"%(d,m,y),"%02d:%02d:%s"%(hh,mm,ss))
Exemple #22
0
def fold(spike_trains, period):
    """Fold `spike_trains` by `period`."""

    # data = {key:[] for key in spike_trains.dtype.names}

    rows = []
    for i,row in spike_trains.iterrows():
        period_num = int( np.ceil(row['duration'] / period) )
        last_period = np.fmod(row['duration'], period)

        spikes = row['spikes']
        for idx in range(period_num):
            lo = idx * period
            hi = (idx+1) * period
            sec = spikes[(spikes>=lo) & (spikes<hi)]
            sec = np.fmod(sec, period)

            r = row.copy()
            r['spikes'] = sec
            r['duration'] = period

            rows.append(r)

        if last_period > 0:
            rows[-1]['duration'] = last_period


    folded_trains = pd.DataFrame(rows)
    folded_trains = folded_trains.reset_index(drop=True)

    return folded_trains
Exemple #23
0
 def store_delta_theta(self):
     """Store relative phase vectors for each external cue around the track
     
     This per-timestep method keeps track of the first clock-wise lap of the
     circle-track trajectory, setting *first_lap* to False once the lap is
     completed. At that point, the modulation traits *local_cue_intensity* 
     and *distal_cue_intensity* are activated, effectively switching on the
     phase feedback mechanism.
     
     During the first lap, initial clock-wise cue crossings trigger the 
     storage of the current reference phase-difference vector in association 
     with the corresponding external cue.
     """
     # Set initial alpha value for trajectory on first timestep
     if self._alpha0 == -1:
         self._alpha0 = self.alpha
         self._alpha_halfwave = float(radian(self._alpha0 - pi))
     else:
         # Binary flag logic for determining when the first lap has passed:
         #   Clockwise -> less-than radian comparisons 
         if not self._alpha_hw_flag:
             if self._alpha_prev > self._alpha_halfwave and \
                 self.alpha <= self._alpha_halfwave:
                 self._alpha_hw_flag = True
         elif self._alpha_prev > self._alpha0 and \
             self.alpha <= self._alpha0:
             self.first_lap = False
             self.out('First lap completed at t=%.2fs'%self.t)
     
         # Store phase reference vectors for local cues
         for i in xrange(self.N_cues_local):
             cue_alpha = \
                 np.fmod(self.local_offset + i*self.cue_spacing_local, 
                     TWO_PI)
             if self._alpha_prev > cue_alpha and \
                 (self.alpha <= cue_alpha or self.alpha-self._alpha_prev > pi):                
                 if self.dtheta_local[i,0] != -99:
                     break
                 self.dtheta_local[i] = \
                     circle_diff_vec(self.theta, TWO_PI*self.omega*self.t)
                 self.cue_t_local[i] = self.t
                 self.out('Stored local cue #%d at t=%.2fs'%(i+1, self.t))
             
         # Store phase reference vectors for distal cues
         for i in xrange(self.N_cues_distal):
             cue_alpha = \
                 np.fmod(self.distal_offset + i*self.cue_spacing_distal,
                     TWO_PI)
             if self._alpha_prev > cue_alpha and \
                 (self.alpha <= cue_alpha or self.alpha-self._alpha_prev > pi):                
                 if self.dtheta_distal[i,0] != -99:
                     break
                 self.dtheta_distal[i] = \
                     circle_diff_vec(self.theta, TWO_PI*self.omega*self.t)
                 self.cue_t_distal[i] = self.t
                 self.out('Stored distal cue #%d at t=%.2fs'%(i+1, self.t))                
     
     # Store current track alpha for next comparison
     self._alpha_prev = self.alpha
Exemple #24
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    bandit = base.Bandit({
                'loss': hp_loguniform('lu', -2, 2) +
                    hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
                    hp_quniform('qu', -4.999, 5, 1) +
                    hp_uniform('u', 0, 10)})
    algo = base.Random(bandit)
    trials = base.Trials()
    exp = base.Experiment(trials, algo)
    exp.catch_bandit_exceptions = False
    N = 1000
    exp.run(N)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print idxs.keys()

    COUNTMAX = 130
    COUNTMIN = 70

    # -- loguniform
    log_lu = np.log(vals['lu'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)

    # -- quantized log uniform
    qlu = vals['qlu']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['qu']
    assert np.min(qu) == -5
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
    assert np.all(40 < bc_qu), bc_qu  # XXX: how to get the distribution flat
    # with new rounding rule?
    assert np.all(bc_qu < 125), bc_qu
    assert np.all(bc_qu < COUNTMAX)

    # -- uniform
    u = vals['u']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)
Exemple #25
0
    def filter_times(self, ts, tsundown):
        tsmod=np.fmod(ts, 24.0)

        tsmod = tsmod - np.fmod(tsundown, 24.0)

        tsmod[tsmod < 0] += 24.0

        return ts[tsmod < 12.0]
Exemple #26
0
 def sync(self):
     while np.abs(np.fmod(time.time(),1)-0.25)> 0.1:
         pass
     while np.abs(np.fmod(time.time(),1)-0.25)< 0.1:
         pass
     stt = int(np.ceil(time.time()))
     self._sendget('pps x%x' % stt)
     print "armed at:",stt
Exemple #27
0
 def _rot2lamina(self,rotangle):
     '''Transform an angle from the rotator system of reference into the
     lamina one.'''
     if self.dirRot != 'CW':
         langle = np.fmod(-rotangle+self.zero,360)
     else:
         langle = np.fmod(rotangle-self.zero,360)
     return langle
Exemple #28
0
 def _lamina2rot(self,langle):
     '''Transform an angle from the lamina system of reference into the
     rotator one.'''
     if self.dirRot != 'CW':
         rotangle = np.fmod(-langle+self.zero,360)
     else:
         rotangle = np.fmod(langle+self.zero,360)
     return rotangle
 def add_matrix_plot(subp, M, name):
     """Add the specified matrix pcolor"""
     ax = f.add_subplot(subp)
     ax.pcolor(M, cmap=cm.hot)
     axis('image')
     if N.fmod(N.fmod(subp, 230), 3) == 1:
         ax.set_ylabel('A --> B')
     ax.set_xlabel('A --> B [%s]'%name)
Exemple #30
0
def shape_context(p, median_dist=None,
                  r_inner=1./8, r_outer=2.,
                  nbins_r=5, nbins_theta=12, nbins_phi=6,
                  outliers=None,
                  make2d=True,
                  sparse=False):
    """
    Computes the shape-context log-polar histograms at each point in p -- the point cloud.
    p is a Nxd matrix of points.
    """
    N, d = p.shape
    assert d==3, "shape_context is implemented only for three dimensions"

    p_mean = np.mean(p, axis=0)
    p_centered = p - p_mean
    T = pca_frame(p)
    R = T[0:3,0:3]
    pt_nd      = np.dot(p_centered, R)
    #pt_nd      = p_centered

    # compute the coordinates : r,theta, phi
    dists    = ssd.pdist(pt_nd, 'euclidean')
    if median_dist==None:
        median_dist = np.median(dists) 
    dists       = dists/median_dist
    dists_nn    = ssd.squareform(dists)

    # theta_nn are in [0,2pi)
    dx_nn, dy_nn, dz_nn  = pt_nd.T[:,None,:]-pt_nd.T[:,:,None]
    theta_nn             = np.arctan2(dy_nn, dx_nn)
    theta_nn             = np.fmod(np.fmod(theta_nn,2*np.pi)+2*np.pi,2*np.pi)

    # phi_nn are in [-pi/2, pi/2]
    dist_xy_nn    = np.sqrt(np.square(dx_nn) + np.square(dy_nn))
    phi_nn        = np.arctan2(dz_nn, dist_xy_nn)

    # define histogram edges
    r_edges     = np.concatenate(([0], loglinspace(r_inner, r_outer, nbins_r)))
    theta_edges = np.linspace(0, 2*np.pi, nbins_theta+1)
    phi_edges   = np.linspace(-np.pi/2, np.pi/2, nbins_phi+1) 

    combined_3nn = np.array([dists_nn, theta_nn, phi_nn])
    
    # compute the bins : 4 dimensional matrix.
    # r,t,p are the number of bins of radius, theta, phi
    sc_nrtp = np.zeros((N, nbins_r, nbins_theta, nbins_phi))
    for i in xrange(N):
        hist, edges = np.histogramdd(combined_3nn[:,i,:].T, bins=[r_edges, theta_edges, phi_edges])
        sc_nrtp[i,:,:,:] = hist

    if make2d:
        sc_nrtp = sc_nrtp.reshape(N, nbins_r*nbins_theta*nbins_phi)

    if sparse: # convert to sparse representation
        sc_nrtp = sc_nrtp.reshape(N, nbins_r*nbins_theta*nbins_phi)
        sc_nrtp = sparse.csc_matrix(sc_nrtp)

    return sc_nrtp 
Exemple #31
0
def avanceEtTourneEtAvance(t):
    mod = np.fmod(t, 1)
    if mod < 0.5:
        return avance(t)
    else:
        return avanceEtTourne(t)
Exemple #32
0
def u_v_2_wd_ws(u, v):
    ws = np.sqrt(u * u + v * v)
    tmp = 270.0 - np.arctan2(v, u) * 180 / np.pi
    wd = np.fmod(tmp, 360.)
    return wd, ws
Exemple #33
0
def phase_exposure(start_time, stop_time, period, nbin=16, gtis=None):
    """Calculate the exposure on each phase of a pulse profile.

    Parameters
    ----------
    start_time, stop_time : float
        Starting and stopping time (or phase if ``period==1``)
    period : float
        The pulse period (if 1, equivalent to phases)

    Returns
    -------
    expo : array of floats
        The normalized exposure of each bin in the pulse profile (1 is the
        highest exposure, 0 the lowest)

    Other parameters
    ----------------
    nbin : int, optional, default 16
        The number of bins in the profile
    gtis : [[gti00, gti01], [gti10, gti11], ...], optional, default None
        Good Time Intervals
    """
    if gtis is None:
        gtis = np.array([[start_time, stop_time]])

    # Use precise floating points -------------
    start_time = np.longdouble(start_time)
    stop_time = np.longdouble(stop_time)
    period = np.longdouble(period)
    gtis = np.array(gtis, dtype=np.longdouble)
    # -----------------------------------------

    expo = np.zeros(nbin)
    phs = np.linspace(0, 1, nbin + 1)
    phs = np.array(list(zip(phs[0:-1], phs[1:])))

    # Discard gtis outside [start, stop]
    good = np.logical_and(gtis[:, 0] < stop_time, gtis[:, 1] > start_time)
    gtis = gtis[good]

    for g in gtis:
        g0 = g[0]
        g1 = g[1]
        if g0 < start_time:
            # If the start of the fold is inside a gti, start from there
            g0 = start_time
        if g1 > stop_time:
            # If the end of the fold is inside a gti, end there
            g1 = stop_time
        length = g1 - g0
        # How many periods inside this length?
        nraw = length / period
        # How many integer periods?
        nper = nraw.astype(int)

        # First raw exposure: the number of periods
        expo += nper / nbin

        # FRACTIONAL PART =================
        # What remains is additional exposure for part of the profile.
        start_phase = np.fmod(g0 / period, 1)
        end_phase = nraw - nper + start_phase

        limits = [[start_phase, end_phase]]
        # start_phase is always < 1. end_phase not always. In this case...
        if end_phase > 1:
            limits = [[0, end_phase - 1], [start_phase, 1]]

        for l in limits:
            l0 = l[0]
            l1 = l[1]
            # Discards bins untouched by these limits
            goodbins = np.logical_and(phs[:, 0] <= l1, phs[:, 1] >= l0)
            idxs = np.arange(len(phs), dtype=int)[goodbins]
            for i in idxs:
                start = np.max([phs[i, 0], l0])
                stop = np.min([phs[i, 1], l1])
                w = stop - start
                expo[i] += w

    return expo / np.max(expo)
Exemple #34
0
NumBeads = int(sys.argv[4])

ncol = naxis + (natom - 1) * 3  # We have considered costheta, phi, chi
print(str(ncol + 1) + 'th column')

#Import the data files here
file1 = "/work/tapas/linear-rotors-PIMC/PIMC-RotDOFs-Rpt10.05Angstrom-DipoleMoment" + str(
    DipoleMoment
) + "Debye-beta0.05Kinv-Blocks20000-Passes200-System2HF-e0vsbeads" + str(
    NumBeads) + "/results/output_instant.dof"

#Data processiong by Numpy
x1 = loadtxt(file1, unpack=True, usecols=[ncol])

if (naxis != 0):
    x1 = np.fabs(np.fmod(x1, 2.0 * pi))

#pyplot calling first
fig = plt.figure()
plt.grid(True)

#X and Y labels
if (naxis != 0):
    plt.xlabel('bins of ' + r'$\phi$')
    fname = 'Phi'
else:
    plt.xlabel('bins of ' + r'$\cos(\theta)$')
    fname = 'CosTheta'

plt.ylabel('Density')
Exemple #35
0
def plot_sky_binned(ra, dec, weights=None, data=None, plot_type='grid',
                    max_bin_area=5, clip_lo=None, clip_hi=None, verbose=False,
                    cmap='viridis', colorbar=True, label=None, basemap=None):
    """Show objects on the sky using a binned plot.

    Bin values either show object counts per unit sky area or, if an array
    of associated data values is provided, mean data values within each bin.
    Objects can have associated weights.

    Requires that matplotlib and basemap are installed. When plot_type is
    "healpix", healpy must also be installed.

    Parameters
    ----------
    ra : array
        Array of object RA values in degrees. Must have the same shape as
        dec and will be flattened if necessary.
    dec : array
        Array of object DEC values in degrees. Must have the same shape as
        ra and will be flattened if necessary.
    weights : array or None
        Optional of weights associated with each object.  All objects are
        assumed to have equal weight when this is None.
    data : array or None
        Optional array of scalar values associated with each object. The
        resulting plot shows the mean data value per bin when data is
        specified.  Otherwise, the plot shows counts per unit sky area.
    plot_type : str
        Must be either 'grid' or 'healpix', and selects whether data in
        binned in healpix or in (sin(DEC), RA).
    max_bin_area : float
        The bin size will be chosen automatically to be as close as
        possible to this value but not exceeding it.
    clip_lo : float or str
        Clipping is applied to the plot data calculated as counts / area
        or the mean data value per bin. See :func:`prepare_data` for
        details.
    clip_hi : float or str
        Clipping is applied to the plot data calculated as counts / area
        or the mean data value per bin. See :func:`prepare_data` for
        details.
    verbose : bool
        Print information about the automatic bin size calculation.
    cmap : colormap name or object
        Matplotlib colormap to use for mapping data values to colors.
    colorbar : bool
        Draw a colorbar below the map when True.
    label : str or None
        Label to display under the colorbar.  Ignored unless colorbar is True.
    basemap : Basemap object or None
        Use the specified basemap or create a default basemap using
        :func:`init_sky` when None.

    Returns
    -------
    basemap
        The basemap used for the plot, which will match the input basemap
        provided, or be a newly created basemap if None was provided.
    """
    ra = np.asarray(ra).reshape(-1)
    dec = np.asarray(dec).reshape(-1)
    if len(ra) != len(dec):
        raise ValueError('Arrays ra,dec must have same size.')

    plot_types = ('grid', 'healpix',)
    if plot_type not in plot_types:
        raise ValueError(
            'Invalid plot_type, should be one of {0}.'
            .format(', '.join(plot_types)))

    if data is not None and weights is None:
        weights = np.ones_like(data)

    if plot_type == 'grid':
        # Convert the maximum pixel area to steradians.
        max_bin_area = max_bin_area * (np.pi / 180.) ** 2

        # Pick the number of bins in cos(DEC) and RA to use.
        n_cos_dec = int(np.ceil(2 / np.sqrt(max_bin_area)))
        n_ra = int(np.ceil(4 * np.pi / max_bin_area / n_cos_dec))
        # Calculate the actual pixel area in sq. degrees.
        bin_area = 360 ** 2 / np.pi / (n_cos_dec * n_ra)
        if verbose:
            print(
                'Using {0} x {1} grid in cos(DEC) x RA'.format(n_cos_dec, n_ra),
                'with pixel area {:.3f} sq.deg.'.format(bin_area))

        # Calculate the bin edges in degrees.
        ra_edges = np.linspace(-180., +180., n_ra + 1)
        dec_edges = np.degrees(np.arcsin(np.linspace(-1., +1., n_cos_dec + 1)))

        # Put RA values in the range [-180, 180).
        ra = np.fmod(ra, 360.)
        ra[ra >= 180.] -= 360.

        # Histogram the input coordinates.
        counts, _, _ = np.histogram2d(
            dec, ra, [dec_edges, ra_edges], weights=weights)

        if data is None:
            grid_data = counts / bin_area
        else:
            sums, _, _ = np.histogram2d(
                dec, ra, [dec_edges, ra_edges], weights=weights * data)
            # This ratio might result in some nan (0/0) or inf (1/0) values,
            # but these will be masked by prepare_data().
            settings = np.seterr(all='ignore')
            grid_data = sums / counts
            np.seterr(**settings)

        grid_data = prepare_data(
            grid_data, clip_lo=clip_lo, clip_hi=clip_hi)

        basemap = plot_grid_map(
            grid_data, ra_edges, dec_edges, cmap, colorbar, label, basemap)

    elif plot_type == 'healpix':

        import healpy as hp

        for n in range(1, 25):
            nside = 2 ** n
            bin_area = hp.nside2pixarea(nside, degrees=True)
            if bin_area <= max_bin_area:
                break
        npix = hp.nside2npix(nside)
        nest = False
        if verbose:
            print(
                'Using healpix map with NSIDE={0}'.format(nside),
                'and pixel area {:.3f} sq.deg.'.format(bin_area))

        pixels = hp.ang2pix(nside, np.radians(90 - dec), np.radians(ra), nest)
        counts = np.bincount(pixels, weights=weights, minlength=npix)
        if data is None:
            grid_data = counts / bin_area
        else:
            sums = np.bincount(pixels, weights=weights * data, minlength=npix)
            grid_data = np.zeros_like(sums, dtype=float)
            nonzero = counts > 0
            grid_data[nonzero] = sums[nonzero] / counts[nonzero]

        grid_data = prepare_data(grid_data, clip_lo=clip_lo, clip_hi=clip_hi)

        basemap = plot_healpix_map(
            grid_data, nest, cmap, colorbar, label, basemap)

    return basemap
Exemple #36
0
    # PRESTO de-disperses at the high frequency channel so determine a
    # correction to the middle of the band
    if not events:
        subpersumsub = fold_pfd.nsub / numsubbands
        # Calculate the center of the summed subband freqs and delays
        sumsubfreqs = (Num.arange(numsubbands)+0.5)*subpersumsub*fold_pfd.subdeltafreq + \
                      (fold_pfd.lofreq-0.5*fold_pfd.chan_wid)
        # Note:  In the following, we cannot use fold_pfd.hifreqdelay since that
        #        is based on the _barycentric_ high frequency (if the barycentric
        #        conversion was available).  For TOAs, we need a topocentric
        #        delay, which is based on the topocentric frequency fold_pfd.hifreq
        sumsubdelays = (
            psr_utils.delay_from_DM(fold_pfd.bestdm, sumsubfreqs) -
            psr_utils.delay_from_DM(fold_pfd.bestdm, fold_pfd.hifreq))
        sumsubdelays_phs = Num.fmod(sumsubdelays / p_dedisp, 1.0)
        # Save the "higest channel within a subband" freqs/delays for use in
        # later DM/timing correction. PBD 2011/11/03
        sumsubfreqs_hi = sumsubfreqs + \
                fold_pfd.subdeltafreq/2.0 - fold_pfd.chan_wid/2.0
        subdelays2 = psr_utils.delay_from_DM(fold_pfd.bestdm, sumsubfreqs) - \
                psr_utils.delay_from_DM(fold_pfd.bestdm, sumsubfreqs_hi)

    else:
        fold_pfd.subfreqs = Num.asarray([0.0])
        sumsubfreqs = Num.asarray([0.0])
        sumsubdelays = Num.asarray([0.0])

    # Read the template profile
    if templatefilenm is not None:
        template = psr_utils.read_profile(templatefilenm, normalize=1)
Exemple #37
0
def write_r2_in(in_fname='R2.in',
                survey_name=None,
                r2_options=None,
                num_regions_flag=1,
                electrode_array=None,
                startingRfile=None,
                fwd_resis=None,
                inv_dict=inv_defaults,
                node_dict=None,
                ncols=10,
                reg_elems=None):
    '''Write R2.in.'''
    with open(in_fname, 'w') as f:

        # Header information
        header = "Project: {}, created using pyres on {}\n".format(\
                    survey_name,time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()))

        f.write(header)
        f.write("    {}    {}   {}    {}    {}  << job_type, mesh_type, flux_type, singularity_type, res_matrix \n".format(\
                                                    r2_options['job_type'],
                                                    r2_options['mesh_type'],
                                                    r2_options['flux_type'],
                                                    r2_options['singular_type'],
                                                    r2_options['res_matrix']))
        f.write("\n")  # blank space

        # Mesh-dependent inputs
        if int(r2_options['mesh_type']) == 4:
            f.write("{0:10.0f}  {1:10.0f}  << numnp_x, numnp_y \n".format(
                node_dict['xx'].shape[0], node_dict['yy'].shape[0]))
            f.write("\n")
            # Write x coordinates
            for ival, xval in enumerate(node_dict['xx']):
                f.write("{0:10.4f}  ".format(xval))
                if ival == len(node_dict['xx']) - 1:
                    f.write(" << xx \n")
                elif np.fmod(ival + 1, ncols) == 0:
                    f.write("\n")

            f.write("\n")  # blank space

            # Write topography

            if node_dict['topog'] is None:
                node_dict['topog'] = np.zeros_like(node_dict['xx'])

            for ival, tval in enumerate(node_dict['topog']):
                f.write("{0:10.4f}  ".format(tval))
                if ival == len(node_dict['topog']) - 1:
                    f.write(" << topog \n")
                elif np.fmod(ival + 1, ncols) == 0:
                    f.write("\n")

            f.write("\n")

            # Write y (i.e., depth below topog) coordinates - positive down
            for ival, yval in enumerate(node_dict['yy']):
                f.write("{0:10.4f}  ".format(yval))
                if ival == len(node_dict['yy']) - 1:
                    f.write(" << yy \n")
                elif np.fmod(ival + 1, ncols) == 0:
                    f.write("\n")

        elif int(r2_options['mesh_type']) == 5:
            f.write("{0:10.0f}  {1:10.0f}  << numnp_x, numnp_y \n".format(
                node_dict['xx'].shape[0], node_dict['yy'].shape[0]))
            f.write("\n")
            # Write x coordinates
            for ival, xval in enumerate(node_dict['xx']):
                f.write("{0:10.4f}  ".format(xval))
                if ival == len(node_dict['xx']) - 1:
                    f.write(" << xx \n")
                elif np.fmod(ival + 1, ncols) == 0:
                    f.write("\n")

            f.write("\n")  # blank space

            # Write y (i.e., depth below topog) coordinates - positive down
            for icol in xrange(node_dict['yy'].shape[1]):
                for ival, irow in enumerate(xrange(node_dict['yy'].shape[0])):
                    f.write("{0:10.4f}  ".format(node_dict['yy'][irow, icol]))
                    if ival == node_dict['yy'].shape[0] - 1:
                        f.write(" << yy for xx column = {}\n".format(icol))
                    elif np.fmod(ival + 1, ncols) == 0:
                        f.write("\n")

        elif int(r2_options['mesh_type']) == 3:
            f.write("    {}  << mesh scale \n".format(inv_dict['mesh_scale']))

        f.write("\n")  # blank space
        # Line 11
        f.write("    {}  << num_regions\n".format(num_regions_flag))

        if num_regions_flag == 0 and startingRfile is not None:
            if len(startingRfile) > 15:
                print("Warning: filename must be 15 characters or less!")

            f.write("{}\n".format(startingRfile))
            f.write("\n")  # blank space
        else:
            f.write("\n")  # blank space
            if num_regions_flag == 1 and reg_elems is None:
                reg_elems = [[
                    electrode_array[0, 1], electrode_array[-1, 1], fwd_resis
                ]]

            for iregion, reg_row in enumerate(reg_elems):
                f.write(
                    "{0:10.0f}  {1:10.0f}  {2:10.2f}  << elem_1, elem_2, res_value\n"
                    .format(*reg_row))

        # Method-specific components
        if r2_options['job_type'] == 1 or r2_options['job_type'] in [
                'inverse', 'Inverse', 'inv', 'I', 'i'
        ]:  # Inverse model options
            f.write("\n")  # blank space

            if int(r2_options['mesh_type']) in [4, 5]:
                f.write(
                    "{0:7.0f}  {1:5.0f}  << no. patches in x, no. patches in y\n"
                    .format(*inv_dict['patch_size_xy']))
                if inv_dict['patch_size_xy'][0] == 0 and inv_dict[
                        'patch_size_xy'][1] == 0:
                    f.write(
                        "{0:7.0f}  {1:5.0f}  << num_param_x, num_param_y\n".
                        format(len(inv_dict['npxy'][0]),
                               len(inv_dict['npxy'][1])))

                    # Write mesh parameters in x
                    f.write("{0:7.0f}  ".format(inv_dict['npxystart'][0]))
                    for ival, npxval in enumerate(inv_dict['npxy'][0]):
                        f.write("{}  ".format(npxval))
                        if ival == len(inv_dict['npxy'][0]) - 1:
                            f.write(" << npxval \n")
                        elif np.fmod(ival + 1, ncols) == 0 and ival > 0:
                            f.write("\n")

                    f.write("\n")  # blank space

                    # Write mesh parameters in y
                    f.write("{0:7.0f}  ".format(inv_dict['npxystart'][1]))
                    for ival, npyval in enumerate(inv_dict['npxy'][1]):
                        f.write("{}  ".format(npyval))
                        if ival == len(inv_dict['npxy'][1]) - 1:
                            f.write(" << npyval \n")
                        elif np.fmod(ival + 1, ncols) == 0 and ival > 0:
                            f.write("\n")

            f.write("\n")  # blank space

            # Line 18
            f.write(
                "    {0:3.0f}  {1:5.2f}  << inverse_type, target_decrease\n".
                format(inv_dict['inverse_type'], inv_dict['target_decrease']))
            f.write("\n")  # blank space
            if inv_dict['inverse_type'] == 3:
                f.write("{} << qual_ratio\n".format(inv_dict['qual_ratio']))
                f.write("\n")  # blank space

                f.write("{} {} << rho_min, rho_max\n".format(
                    inv_dict['rho_min'], inv_dict['rho_max']))
                f.write("\n")  # blank space

            else:
                # Inverse model options
                # Regularization, Line 21
                f.write("{0:6.0f}{1:5.0f}  << data_type, reg_mode \n".format(
                    inv_dict['data_type'], inv_dict['reg_mode']))
                f.write("\n")  # blank space

                if inv_dict['reg_mode'] in [0, 2]:  # normal regularization
                    inv_type_txt = "     ".join([
                        str(tempval) for tempval in [
                            inv_dict['tolerance'], inv_dict['max_iterations'],
                            inv_dict['error_mod'], inv_dict['alpha_aniso']
                        ]
                    ])

                else:  # 1 = background regularisation
                    inv_type_txt = "     ".join([
                        str(tempval) for tempval in [
                            inv_dict['tolerance'], inv_dict['max_iterations'],
                            inv_dict['error_mod'], inv_dict['alpha_aniso'],
                            inv_dict['alpha_s']
                        ]
                    ])
                # Write line 8, inverse type
                f.write(
                    "{}  << tolerance, max_iterations, error_mod, alpha_aniso, (alpha_s)\n"
                    .format(inv_type_txt))
                f.write("\n")  # blank space

                # Error variance model parameters, Line 23 (offset error, relative error)
                model_var_params = [
                    inv_dict['a_wgt'], inv_dict['b_wgt'], inv_dict['rho_min'],
                    inv_dict['rho_max']
                ]
                f.write("{}  {}  {}  {}  << a_wgt, b_wgt, rho_min, rho_max\n".
                        format(*model_var_params))
                f.write("\n")  # blank space

                if 'param_symbol' in inv_dict.keys():
                    for paramy_row in range(inv_dict['param_symbol']):
                        for paramx_val in paramy_row:
                            f.write("{}  ".format(paramx_val))
                        f.write(" \n")
                    f.write(" \n")

        # Bounding polyline for output region, eventually default to foreground outline, Line 25
        f.write("{0:d}  << number of points in polyline\n".format(
            int(inv_dict['num_xy_poly'])))

        if inv_dict[
                'num_xy_poly'] > 0:  # Write poly coordinates if at least one output polyline
            for ipolyxy in inv_dict['xy_poly']:
                f.write("{}  {}  << x_poly, y_poly\n".format(
                    ipolyxy[0], ipolyxy[1]))
        f.write("\n")  # blank space
        f.write("\n")  # blank space

        # Write electrode information
        f.write("{}  << num_electrodes\n".format(electrode_array.shape[0]))
        if r2_options['mesh_type'] == 3:
            for elec_row in electrode_array:
                f.write("{0:8.0f}  {1:8.0f}  << electrode number, mesh node\n".
                        format(*elec_row))
        else:
            for elec_row in electrode_array:
                f.write(
                    "{0:8.0f}  {1:8.0f}  {2:8.0f}  << electrode number, column, row\n"
                    .format(*elec_row))

        f.write("\n")  # blank space
Exemple #38
0
def periodic(domain, idx, iaxis=[0, 1, 2]):
    Nx = domain['Nx']
    for i in iaxis:
        idx[i] = np.fmod(idx[i], Nx[i])
        idx[i][idx[i] < 0] += Nx[i]
Exemple #39
0
 def testTruncateModFloat(self):
   nums, divs = self.floatTestData()
   tf_result = math_ops.truncatemod(nums, divs)
   np_result = np.fmod(nums, divs)
   self.assertAllEqual(tf_result, np_result)
Exemple #40
0
 def testTruncateModFloat(self):
     nums, divs = self.floatTestData()
     with self.test_session():
         tf_result = math_ops.truncatemod(nums, divs).eval()
         np_result = np.fmod(nums, divs)
         self.assertAllEqual(tf_result, np_result)
Exemple #41
0
def column_incremental_stable_principal_component_pursuit(
    c,
    U,
    sv,
    l=None,
    s=None,
    rtol=1e-12,
    maxiter=1000,
    delta=1e-12,
    ls=1.,
    rho=1.,
    update_basis=False,
    adjust_basis_every=np.nan,
    forget=1.,
    max_rank=np.inf,
    min_sv=0.,
    orth_eps=1e-12,
    orthogonalize_basis=False,
    nesterovs_momentum=False,
    restart_every=np.nan,
    prox_ls=lambda q, r, c: prox.ind_l2ball(q, r, c),
    prox_l=lambda q, th, U: prox.squ_l2_from_subspace(q, U, th),
    prox_s=lambda q, ls: prox.l1(q, ls)):
    """
    Column incremental online stable principal component pursuit (OnlSPCP)
    performs the low-rank and sparse matrix approximation by solving
    ([l;s], [zls; zl; zs]) = arg min_(x,z)  g_ls(z_ls) + g_l(z_l) + g_s(z_s)
                                s.t. [zls; zl; zs] = [I I; I O; O I] * [l; s].
    Here, by default,
    g_ls(z_ls) = indicator function, i.e., zero if ||c - z_ls||_2 <= delta, infinity otherwise,
    g_l(z_l) = 0.5 * ||(I-U*U')*z_l||_2^2,
    g_s(z_s) = ||ls.*z_s||_1

    Parameters
    ----------
    c : ndarray, shape (`m`,)
        `m`-dimensional vector to be decomposed into `l` and `s` such that ||d-(l+s)||_2<=delta.
    U : ndarray, shape (`m`,`r`)
        `m` x `r` matrix of left singular vectors approximately spanning the subspace of low-rank components
        (overwritten with the update if update_basis is True).
    sv : array_like, shape ('r',)
        `r`-dimensional vector of singular values
        (overwritten with the update if update_basis is True).
    l : array_like, shape (`m`,), optional, default None
        Initial guess of `l`. If None, `c`-`s` is used.
    s : array_like, shape (`m`,), optional, default None
        Initial guess of `s`. If None, `s` is numpy.zeros_like(c) is used.
    rtol : scalar, optional, default 1e-12
        Relative convergence tolerance of `y` and `z` in ADMM, i.e., the primal and dual residuals.
    maxiter : int, optional, default 1000
        Maximum iterations.
    delta : scalar, optional, default 1e-12
        l2-ball radius used in the indicator function for the approximation error.
    ls : scalar or 1d array, optional, default 1.
        Weight of sparse regularizer.  `ls` can be a 1d array of weights for the entries of `s`.
    rho : scalar, optional, default 1.
        Augmented Lagrangian parameter.
    update_basis : bool, optional, default False
        Update `U` and `sv` with `l` after convergence.
    adjust_basis_every : int, optional, default `np.nan`
        Temporalily update `U` with `l` every `adjust_basis_every` iterations in the ADMM loop. If `np.nan`, this is disabled.
    forget : scalar, optional, default 1.
        Forgetting parameter in updating `U`.
    max_rank : int, optional, default np.inf
        Maximum rank. `U.shape[1]` and `sv.shape[0]` won't be greater than `max_rank`.
    min_sv : scalar, optional, default 0.
        Singular values smaller than `min_sv` is neglected. `sv` >= max(`sv`)*abs(`min_sv`) if `min_sv` is negative.
    orth_eps : scalar, optional, default 1e-12
        Rank increases if the magnitude of `c` in the orthogonal subspace is larger than `orth_eps`.
    orthogonalize_basis : bool, optional, default False
        If True, perform QR decomposition to orthogonalize `U`.
    nesterovs_momentum : bool, optional, default False
        Nesterov acceleration.
    restart_every : int, optional, default `np.nan`
        Restart the Nesterov acceleration every `restart_every` iterations. If `np.nan`, this is disabled.
    prox_ls : function, optional, default `spmlib.proxop.ind_l2ball`
        Proximity operator as a Python function for the regularizer g_ls of `z_ls` = `l`+`s`. By default, `prox_ls` is `lambda q,r,c:spmlib.proxop.ind_l2ball(q,r,c)`, i.e., the prox. of the indicator function of l2-ball with radius 'r' and center 'c'.
    prox_l : function, optional, default `spmlib.proxop.squ_l2_from_subspace`
        Proximity operator as a Python function for the regularizer g_l of `z_l` = `l`. By default, `prox_l` is `lambda q,U:spmlib.proxop.squ_l2_from_subspace(q,U,th)`, i.e., the prox. of the distance function defined as 0.5*(squared l2 distance between `l` and span`U`).
    prox_s : function, optional, default `spmlib.proxop.l1`
        Proximity operator as a Python function for the regularizer g_s of `z_s` = `s`. By default, `prox_s` is `lambda q,ls:spmlib.proxop.l1(q,ls)`, i.e., the soft thresholding operator as the prox. of l1 norm ||ls.*z_s||_1.

    Returns
    -------
    l : ndarray, shape (`m`,)
        Low-rank component.
    s : ndarray, shape (`m`,)
        Sparse component
    U : ndarray
        Matrix of left singular vectors.
    sv : ndarray
        Vector of singular values.
    count : int
        Iteration count.

    References
    ----------
    Tomoya Sakai, Shun Ogawa, and Hiroki Kuhara
    "Sequential decomposition of 3D apparent motion fields basedon low-rank and sparse approximation"
    APSIPA2017 (to appear).

    Example
    -------
    >>> from spmlib import solver as sps
    >>> U, sv = np.empty([0,0]), np.empty(0)  # initialize
    >>> L, S = np.zeros(C.shape), np.zeros(C.shape)
    >>> for j in range(n):
    >>>     L[:,j], S[:,j], U, sv = sps.column_incremental_stable_principal_component_pursuit(C[:,j], U, sv, ls=0.5, update_basis=True, max_rank=50, orth_eps=linalg.norm(Y[:,j])*1e-12)[:4]

    """
    m = c.shape[0]

    # initialize l and s
    if s is None:
        s = np.zeros_like(c)  # np.zeros(m, dtype=c.dtype)
    if l is None:
        l = c.ravel() - s

    if sv.size == 0:
        U, sv, V = linalg.svd(np.atleast_2d(c.T).T, full_matrices=False)
        return l, s, U, sv, 0

    # G = lambda x: np.concatenate((x[:m]+x[m:], x[:m], x[m:]))
    # x = np.concatenate((l,s))
    x = np.zeros(2 * m, dtype=c.dtype)
    x[:m] = l
    x[m:] = s

    # z = G(x)
    z = np.zeros(3 * m, dtype=c.dtype)
    z[:m] = x[:m] + x[m:]
    z[m:2 * m] = x[:m]
    z[2 * m:] = x[m:]

    y = np.zeros_like(z)  # np.zeros(3*m, dtype=c.dtype)

    t = 1.
    count = 0
    Ut = U
    while count < maxiter:
        count += 1

        if np.fmod(count, restart_every) == 0:
            t = 1.
        if nesterovs_momentum:
            told = t
            t = 0.5 * (1. + sqrt(1. + 4. * t * t))

        # update x
        #dx = x.copy()
        q = z - y
        x[:m] = (1. / 3.) * (q[:m] + 2. * q[m:2 * m] - q[2 * m:])
        x[m:] = (1. / 3.) * (q[:m] - q[m:2 * m] + 2. * q[2 * m:])
        #dx = x - dx

        # q = G(x) + y
        q[:m] = x[:m] + x[m:] + y[:m]
        q[m:2 * m] = x[:m] + y[m:2 * m]
        q[2 * m:] = x[m:] + y[2 * m:]

        # update z
        if np.fmod(count, adjust_basis_every) == 0:
            Ut = column_incremental_SVD(x[:m],
                                        U,
                                        sv,
                                        forget=forget,
                                        max_rank=max_rank,
                                        min_sv=min_sv,
                                        orth_eps=orth_eps,
                                        orthogonalize_basis=False)[0]
        dz = z.copy()
        z[:m] = prox_ls(q[:m], delta, c.ravel())
        z[m:2 * m] = prox_l(q[m:2 * m], 1. / rho, Ut)
        z[2 * m:] = prox_s(q[2 * m:], ls / rho)
        dz = z - dz

        # update y
        #y = y + G(x) - z
        dy = y.copy()
        y[:m] += x[:m] + x[m:] - z[:m]
        y[m:2 * m] += x[:m] - z[m:2 * m]
        y[2 * m:] += x[m:] - z[2 * m:]
        dy = y - dy

        # Nesterov acceleration
        if nesterovs_momentum:
            z = z + ((told - 1.) / t) * dz
            y = y + ((told - 1.) / t) * dy

        # check convergence of primal and dual residuals
        if linalg.norm(dy) < rtol * linalg.norm(y) and linalg.norm(
                dz) < rtol * linalg.norm(z):
            break

    l = x[:m]
    s = x[m:]
    if update_basis:
        U, sv = column_incremental_SVD(l,
                                       U,
                                       sv,
                                       forget=forget,
                                       max_rank=max_rank,
                                       min_sv=min_sv,
                                       orth_eps=orth_eps,
                                       orthogonalize_basis=orthogonalize_basis)

    return l, s, U, sv, count
Exemple #42
0
 "bitwise_or":
 lambda c1, c2: c1.bitwiseOR(c2),
 "bitwise_xor":
 lambda c1, c2: c1.bitwiseXOR(c2),
 "copysign":
 F.pandas_udf(lambda s1, s2: np.copysign(s1, s2), DoubleType()),
 "float_power":
 F.pandas_udf(lambda s1, s2: np.float_power(s1, s2), DoubleType()),
 "floor_divide":
 F.pandas_udf(lambda s1, s2: np.floor_divide(s1, s2), DoubleType()),
 "fmax":
 F.pandas_udf(lambda s1, s2: np.fmax(s1, s2), DoubleType()),
 "fmin":
 F.pandas_udf(lambda s1, s2: np.fmin(s1, s2), DoubleType()),
 "fmod":
 F.pandas_udf(lambda s1, s2: np.fmod(s1, s2), DoubleType()),
 "gcd":
 F.pandas_udf(lambda s1, s2: np.gcd(s1, s2), DoubleType()),
 "heaviside":
 F.pandas_udf(lambda s1, s2: np.heaviside(s1, s2), DoubleType()),
 "hypot":
 F.hypot,
 "lcm":
 F.pandas_udf(lambda s1, s2: np.lcm(s1, s2), DoubleType()),
 "ldexp":
 F.pandas_udf(lambda s1, s2: np.ldexp(s1, s2), DoubleType()),
 "left_shift":
 F.pandas_udf(lambda s1, s2: np.left_shift(s1, s2), LongType()),
 "logaddexp":
 F.pandas_udf(lambda s1, s2: np.logaddexp(s1, s2), DoubleType()),
 "logaddexp2":
    def get_direction_matrix(self, julianTime, seasonInt, drot1, rot2, rot3):
        drot1 = np.array(drot1)
        rot2 = np.array(rot2)
        rot3 = np.array(rot3)

        # Determine the season and roll offset based on the julianTime.
        rollTimeData = self.rollTimeModel.get(julianTime,
                                              ["rollOffsets", "seasons"])
        rollOffset = rollTimeData[:, 0]
        seasonInt = rollTimeData[:, 1]

        deg2rad = np.pi / 180.0

        # Calculate the Direction Cosine Matrix to transform from RA and Dec to FPA coordinates
        rot1 = rm.get_parameters(
            'nominalClockingAngle') + rollOffset + seasonInt * 90.0
        rot1 = rot1 + drot1
        # add optional offset in X'-axis rotation
        rot1 = rot1 + 180
        # Need to account for 180 deg rotation of field due to imaging of mirror
        rot1 = np.fmod(rot1, 360)
        # make small if rot1 < -360 or rot1 > 360

        if (rot1.size != rot2.size) | (rot1.size != rot3.size):
            rot2 = np.tile(rot2, rot1.shape)
            rot3 = np.tile(rot3, rot1.shape)

        rot1 = ru.make_col(rot1)
        rot2 = ru.make_col(rot2)
        rot3 = ru.make_col(rot3)

        srac = np.sin(rot3 * deg2rad)
        # sin phi 3 rotation
        crac = np.cos(rot3 * deg2rad)
        # cos phi
        sdec = np.sin(rot2 * deg2rad)
        # sin theta 2 rotation Note 2 rotation is negative of dec in right hand sense
        cdec = np.cos(rot2 * deg2rad)
        # cos theta
        srotc = np.sin(rot1 * deg2rad)
        # sin psi 1 rotation
        crotc = np.cos(rot1 * deg2rad)
        # cos psi

        # Extract the focal plane geometry constants
        geometryData = self.geomModel.get(["chipTrans", "chipOffset"])
        chipTrans = np.transpose(
            np.reshape(geometryData[:, 0], (3, 42), order='F').copy())
        chipOffset = np.transpose(
            np.reshape(geometryData[:, 1], (3, 42), order='F').copy())

        #     DCM for a 3-2-1 rotation, Wertz p764
        DCM11 = cdec * crac
        DCM12 = cdec * srac
        DCM13 = -sdec
        DCM21 = -crotc * srac + srotc * sdec * crac
        DCM22 = crotc * crac + srotc * sdec * srac
        DCM23 = srotc * cdec
        DCM31 = srotc * srac + crotc * sdec * crac
        DCM32 = -srotc * crac + crotc * sdec * srac
        DCM33 = crotc * cdec

        #     Calculate DCM for each chip relative to center of FOV
        nModules2 = rm.get_parameters('nModules') * 2
        DCM11c = np.zeros((nModules2, 1))
        DCM12c = np.zeros((nModules2, 1))
        DCM13c = np.zeros((nModules2, 1))
        DCM21c = np.zeros((nModules2, 1))
        DCM22c = np.zeros((nModules2, 1))
        DCM23c = np.zeros((nModules2, 1))
        DCM31c = np.zeros((nModules2, 1))
        DCM32c = np.zeros((nModules2, 1))
        DCM33c = np.zeros((nModules2, 1))

        #    print(chipTrans)
        for i in range(nModules2):  # step through each chip
            srac = np.sin(deg2rad * chipTrans[i, 0])
            # sin phi 3 rotation
            crac = np.cos(deg2rad * chipTrans[i, 0])
            # cos phi
            sdec = np.sin(deg2rad * chipTrans[i, 1])
            # sin theta 2 rotation
            cdec = np.cos(deg2rad * chipTrans[i, 1])
            # cos theta
            srotc = np.sin(deg2rad * (chipTrans[i, 2] + chipOffset[i, 0]))
            # sin psi 1 rotation includes rotation offset
            crotc = np.cos(deg2rad * (chipTrans[i, 2] + chipOffset[i, 0]))
            # cos psi

            # DCM for a 3-2-1 rotation, Wertz p762
            DCM11c[i, 0] = cdec * crac
            DCM12c[i, 0] = cdec * srac
            DCM13c[i, 0] = -sdec
            DCM21c[i, 0] = -crotc * srac + srotc * sdec * crac
            DCM22c[i, 0] = crotc * crac + srotc * sdec * srac
            DCM23c[i, 0] = srotc * cdec
            DCM31c[i, 0] = srotc * srac + crotc * sdec * crac
            DCM32c[i, 0] = -srotc * crac + crotc * sdec * srac
            DCM33c[i, 0] = crotc * cdec

        return DCM11, DCM12, DCM13, DCM21, DCM22, DCM23, DCM31, DCM32, DCM33, DCM11c, DCM12c, DCM13c, DCM21c, DCM22c, DCM23c, DCM31c, DCM32c, DCM33c, chipOffset
Exemple #44
0
def spline(path_to_files, num_pl=4, korder=5, prefix='', is_day=True):
    '''
	The integration of the motion equations
	for the fast variables by B-splines interpolation.
	'''
    from numpy import fmod, log10, sqrt
    from scipy.interpolate import splint, splev, splrep
    # set of step size in days or years
    SIZE_STEP = 365.25 if is_day else 1.
    # length of the motion equations for the fast variables
    NTERMS = 25000
    # names of files
    fnames = ['q' + str(i + 1) + '.txt' for i in range(num_pl)]
    # reading of planetary masses
    f = open(path_to_files + 'gm.txt', 'r')
    s = f.read().split()
    f.close()
    mu = float(s[0])
    gmass = [float(s[i]) for i in range(1, num_pl + 2)]
    # mass parameters
    mm, am, zo = [], [], []
    zm = [gmass[i] / gmass[0] for i in range(len(gmass))][1:]
    for i in range(num_pl):
        mm.append(1. + sum(zm[:i + 1]))
    zz = [zm[0] / (mm[0] * mu)]  # reduced mass
    zk = [gmass[0] * mm[0]]  # gravitational parameter
    for i in range(1, num_pl):
        zz.append(zm[i] * mm[i - 1] / (mm[i] * mu))
    for i in range(1, num_pl):
        zk.append(gmass[0] * mm[i] / mm[i - 1])
    # reading of initial values of averaged elements
    f = open(path_to_files + 'el.txt', 'r')
    s = f.read().split()
    f.close()
    lm, xm, ym, um, vm, qm = [], [], [], [], [], []
    for i in range(num_pl):
        lm.append(float(s[6 * i + 0]))
        xm.append(float(s[6 * i + 1]))
        ym.append(float(s[6 * i + 2]))
        um.append(float(s[6 * i + 3]))
        vm.append(float(s[6 * i + 4]))
        qm.append(float(s[6 * i + 5]))
    for i in range(num_pl):
        am.append(lm[i]**2 * (zz[i]**-2) / zk[i])
        zo.append(sqrt(zk[i] * am[i]**-3))
# print of initial values
    print '*** initial longitudes and mean motions of planets ***'
    temp2 = 'l = '
    for i in range(num_pl):
        temp2 += str('%.16f' % qm[i]).rjust(20, ' ')
    print temp2
    temp2 = 'n = '
    for i in range(num_pl):
        temp2 += str('%.16f' % zo[i]).rjust(20, ' ')
    print temp2
    # input data for the integration
    step = int(raw_input('> length of one step: '))
    step_ch = str(step)
    number = int(raw_input('> number of steps:    '))
    number_ch = str(int(log10(float(number * step))))
    # reading series for right hands of the motion equations
    num = [0 for i in range(num_pl)]
    rdot = [[0 for i in range(NTERMS)] for j in range(num_pl)]
    idot = [[[0 for k in range(4 * num_pl)] for i in range(NTERMS)]
            for j in range(num_pl)]
    for j in range(num_pl):
        f = open(path_to_files + fnames[j], 'r')
        s = f.read().split('\n')
        f.close()
        for k in range(len(s)):
            if k == 0:
                num[j] = int(s[0])
                print fnames[j][:-4], num[j]
            else:
                r = s[k].split()
                for i in range(len(r)):
                    if i == 0:
                        rdot[j][k] = float(r[0])
                    else:
                        idot[j][k][i - 1] = int(r[i])
# reading of averaged orbital elements (integration results)
    f = open(
        path_to_files + 'result_1e' + number_ch + '_' + step_ch + prefix +
        '_pe.txt', 'r')
    s = f.read().split('\n')
    f.close()
    t = []
    adot = [[] for j in range(num_pl)]
    for n in range(number + 1):
        li, xi, yi, ui, vi = [], [], [], [], []
        r = s[n + 1].split()
        t.append(float(r[0]) * SIZE_STEP)
        for i in range(num_pl):
            li.append(float(r[5 * i + 1]))
            xi.append(float(r[5 * i + 2]))
            yi.append(float(r[5 * i + 3]))
            ui.append(float(r[5 * i + 4]))
            vi.append(float(r[5 * i + 5]))
        for j in range(num_pl):
            temp2 = 0
            for k in range(num[j]):
                temp = 1.
                for i in range(num_pl):
                    temp *=  xi[i]**idot[j][k][0+4*i]*yi[i]**idot[j][k][1+4*i]\
                      *ui[i]**idot[j][k][2+4*i]*vi[i]**idot[j][k][3+4*i]
                temp2 = temp2 + rdot[j][k] * temp
            adot[j].append(temp2)
    n = n + 1
    # start of the integration process
    f = open(
        path_to_files + 'result_1e' + number_ch + '_' + step_ch + prefix +
        '_qe.txt', 'w')
    temp2 = 't'
    for i in range(num_pl):
        temp2 += ' alpha' + str(i + 1)
    f.write(temp2 + '\n')
    temp2 = str(int(t[0] / SIZE_STEP)).rjust(12, ' ')
    for i in range(num_pl):
        temp2 += '\t' + str('%.16f' % (qm[i] * 180. / pi)).rjust(21, ' ')
    temp2 += '\n'
    f.write(temp2)
    # find B-splines for the interpolation
    tck = [splrep(t, adot[j], k=korder) for j in range(num_pl)]
    # the integration from t(1) to t(i)
    for i in range(1, n):
        temp = 2 * i * pi
        qi = [splint(t[0], t[i], tck[j]) for j in range(num_pl)]
        for j in range(num_pl):
            qi[j] += (qm[j] + fmod(zo[j] * (t[i] - t[0]), 2 * pi))
        for j in range(num_pl):
            qi[j] = fmod(qi[j] + temp, 2 * pi)
        temp2 = str(int(t[i] / SIZE_STEP)).rjust(12, ' ')
        for j in range(num_pl):
            temp2 += '\t' + str('%.16f' % (qi[j] * 180. / pi)).rjust(21, ' ')
        temp2 += '\n'
        f.write(temp2)
    f.close()
Exemple #45
0
    im_gt = np.arctan2(im_dy, im_dx)

    # define colors in BGR corresponding to the directions and index
    #                       0               1              2           3          -1
    #                      W/E            NE/SW           N/S        NW/SE       empty
    #                      red            white           blue       green       black
    COLORS = np.array([(0, 0, 255), (255, 255, 255), (255, 0, 0), (0, 255, 0),
                       (0, 0, 0)])

    # flip the negative angles
    im_gt = np.where(im_gt < 0, im_gt + np.pi, im_gt)

    # map the angles to directions in the way that
    # [0, pi/4) [pi/4, pi/2)    [pi/2, 3pi/4)    [3pi/4, pi)
    #   W/E        NE/SW            N/S             NW/SE
    im_gt = np.fmod((im_gt + np.pi / 8), np.pi)

    # map angels to indices from 0 to 4
    im_dir = (im_gt // (np.pi / 4)).astype(int)

    # remove the pixels with 0 magnitude
    im_dir = np.where((im_gm == 0), -1, im_dir)

    # remove the pixels on the boarder
    im_dir[:, 0] = im_dir[:, -1] = im_dir[0, :] = im_dir[-1, :] = -1

    # generate foo_dir image
    out_img_dir = np.where(im_gm < 1.0, -1, im_dir).astype(int)
    out_img_dir = COLORS[out_img_dir]

    # map the max grd to 255
def pol2cart(phi, rho):
    x = rho * np.cos(phi)
    y = rho * np.sin(phi)
    return(x, y)

s = np.sqrt(2)*0.0196
theta = 0
phi = 0
total = 15000
curx = np.zeros((total,1))
cury = np.zeros((total,1))
xn = 0
yn = 0

for n in range(0,total):
    theta = np.fmod(theta+2*pi*s,2*pi)
    phi = np.fmod(phi,2*pi)+theta
    x,y = pol2cart(phi,1)
    xn = x+xn
    yn = y+yn
    curx[n] = xn
    cury[n] = yn

fig = plt.figure()
ax = fig.add_subplot(111)
line = Line2D(curx, cury, linewidth=0.5)
ax.add_line(line)
ax.set_xlim(np.min(curx), np.max(curx))
ax.set_ylim(np.min(cury), np.max(cury))
plt.axis('equal')
plt.show()
Exemple #47
0
def plot_sky_circles(ra_center, dec_center, field_of_view=3.2, data=None,
                     cmap='viridis', facecolors='skyblue', edgecolor='none',
                     colorbar=True, colorbar_ticks=None, label=None,
                     basemap=None):
    """Plot circles on an all-sky projection.

    Pass the optional data array through :func:`prepare_data` to select a
    subset to plot and clip the color map to specified values or percentiles.

    Requires that matplotlib and basemap are installed.

    Parameters
    ----------
    ra_center : array
        1D array of RA in degrees at the centers of each circle to plot.
    dec_center : array
        1D array of DEC in degrees at the centers of each circle to plot.
    field_of_view : array
        Full sky openning angle in degrees of the circles to plot. The default
        is appropriate for a DESI tile.
    data : array or None
        1D array of data associated with each circle, used to set its facecolor.
    cmap : colormap name or object
        Matplotlib colormap to use for mapping data values to colors. Ignored
        unless data is specified.
    facecolors : matplotlib color or array of colors
        Ignored when data is specified. An array must have one entry per circle
        or a single value is used for all circles.
    edgecolor : matplotlib color
        The edge color used for all circles.  Use 'none' to hide edges.
    colorbar : bool
        Draw a colorbar below the map when True and data is provided.
    colorbar_ticks : list or None
        Use the specified colorbar ticks or determine them automatically
        when None.
    label : str or None
        Label to display under the colorbar.  Ignored unless a colorbar is
        displayed.
    basemap : BasemapWithEllipse or None
        An instance of the BasemapWithEllipse class, normally obtained by
        calling :func:`init_sky`.  Create a default basemap when None.

    Returns
    -------
    basemap
        The basemap used for the plot, which will match the input basemap
        provided, or be a newly created basemap if None was provided.
    """
    import matplotlib.pyplot as plt
    import matplotlib.colors
    import matplotlib.cm

    ra_center = np.asarray(ra_center)
    dec_center = np.asarray(dec_center)
    if len(ra_center.shape) != 1:
        raise ValueError('Invalid ra_center, must be a 1D array.')
    if len(dec_center.shape) != 1:
        raise ValueError('Invalid dec_center, must be a 1D array.')
    if len(ra_center) != len(dec_center):
        raise ValueError('Arrays ra_center, dec_center must have same size.')

    if data is not None:
        data = prepare_data(data)
        # Facecolors are determined by the data, when specified.
        if data.shape != ra_center.shape:
            raise ValueError('Invalid data shape, must match ra_center.')
        # Colors associated with masked values in data will be ignored later.
        try:
            # Normalize the data using its vmin, vmax attributes, if present.
            norm = matplotlib.colors.Normalize(vmin=data.vmin, vmax=data.vmax)
        except AttributeError:
            # Otherwise use the data limits.
            norm = matplotlib.colors.Normalize(vmin=data.min(), vmax=data.max())
        cmapper = matplotlib.cm.ScalarMappable(norm, cmap)
        facecolors = cmapper.to_rgba(data)
    else:
        colorbar = False
        # Try to repeat a single fixed color for all circles.
        try:
            facecolors = np.tile(
                [matplotlib.colors.colorConverter.to_rgba(facecolors)],
                (len(ra_center), 1))
        except ValueError:
            # Assume that facecolor is already an array.
            facecolors = np.asarray(facecolors)

    if len(facecolors) != len(ra_center):
        raise ValueError('Invalid facecolor array.')

    if basemap is None:
        basemap = init_sky()

    if basemap.lonmin + 360 != basemap.lonmax:
        raise RuntimeError('Can only handle all-sky projections for now.')

    if len(ra_center) == 0:
        return

    # Convert field-of-view angle into dDEC, dRA.
    dDEC = 0.5 * field_of_view
    dRA = dDEC / np.cos(np.radians(dec_center))

    # Identify circles that wrap around the map edges in RA.
    edge_dist = np.fmod(ra_center - basemap.lonmin, 360)
    wrapped = np.minimum(edge_dist, 360 - edge_dist) < 1.05 * dRA

    # Set the number of vertices for approximating the ellipse based
    # on the field of view.
    n_pt = max(8, int(np.ceil(field_of_view)))

    # Loop over non-wrapped circles.
    for ra, dec, dra, fc in zip(ra_center[~wrapped], dec_center[~wrapped],
                                dRA[~wrapped], facecolors[~wrapped]):
        basemap.ellipse(ra, dec, dra, dDEC, n_pt, facecolor=fc,
                        edgecolor=edgecolor)

    if colorbar:
        mappable = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)
        mappable.set_array(data)
        bar = plt.colorbar(
            mappable, ax=basemap.ax, orientation='horizontal',
            spacing='proportional', pad=0.01, aspect=50,
            ticks=colorbar_ticks)
        if label:
            bar.set_label(label)

    return basemap
def wrap_angle_rad_inplace(angle):
    Modulo = np.fmod(angle, 2 * np.pi)  # positive modulo
    neg_wrap, pos_wrap = Modulo < -np.pi, Modulo > np.pi
    angle[neg_wrap] = Modulo[neg_wrap] + 2 * np.pi
    angle[pos_wrap] = Modulo[pos_wrap] - 2 * np.pi
    angle[~(neg_wrap | pos_wrap)] = Modulo[~(neg_wrap | pos_wrap)]
Exemple #49
0
def makegammas(nzrad):
    """
    Make "Gamma" matrices which can be used to determine first derivative
    of Zernike matrices (Noll 1976).

    Parameters:
        nzrad: Number of Zernike radial orders to calculate Gamma matrices for

    Return:
        ndarray: Array with x, then y gamma matrices
    """
    n = [0]
    m = [0]
    tt = [1]
    trig = 0

    for p in range(1, nzrad + 1):
        for q in range(p + 1):
            if (numpy.fmod(p - q, 2) == 0):
                if (q > 0):
                    n.append(p)
                    m.append(q)
                    trig = not (trig)
                    tt.append(trig)
                    n.append(p)
                    m.append(q)
                    trig = not (trig)
                    tt.append(trig)
                else:
                    n.append(p)
                    m.append(q)
                    tt.append(1)
                    trig = not (trig)
    nzmax = len(n)

    #for j in range(nzmax):
    #print j+1, n[j], m[j], tt[j]

    gamx = numpy.zeros((nzmax, nzmax), "float32")
    gamy = numpy.zeros((nzmax, nzmax), "float32")

    # Gamma x
    for i in range(nzmax):
        for j in range(i + 1):

            # Rule a:
            if (m[i] == 0 or m[j] == 0):
                gamx[i, j] = numpy.sqrt(2.0) * numpy.sqrt(
                    float(n[i] + 1) * float(n[j] + 1))
            else:
                gamx[i, j] = numpy.sqrt(float(n[i] + 1) * float(n[j] + 1))

            # Rule b:
            if m[i] == 0:
                if ((j + 1) % 2) == 1:
                    gamx[i, j] = 0.0
            elif m[j] == 0:
                if ((i + 1) % 2) == 1:
                    gamx[i, j] = 0.0
            else:
                if (((i + 1) % 2) != ((j + 1) % 2)):
                    gamx[i, j] = 0.0

            # Rule c:
            if abs(m[j] - m[i]) != 1:
                gamx[i, j] = 0.0

            # Rule d - all elements positive therefore already true

    # Gamma y
    for i in range(nzmax):
        for j in range(i + 1):

            # Rule a:
            if (m[i] == 0 or m[j] == 0):
                gamy[i, j] = numpy.sqrt(2.0) * numpy.sqrt(
                    float(n[i] + 1) * float(n[j] + 1))
            else:
                gamy[i, j] = numpy.sqrt(float(n[i] + 1) * float(n[j] + 1))

            # Rule b:
            if m[i] == 0:
                if ((j + 1) % 2) == 0:
                    gamy[i, j] = 0.0
            elif m[j] == 0:
                if ((i + 1) % 2) == 0:
                    gamy[i, j] = 0.0
            else:
                if (((i + 1) % 2) == ((j + 1) % 2)):
                    gamy[i, j] = 0.0

            # Rule c:
            if abs(m[j] - m[i]) != 1:
                gamy[i, j] = 0.0

            # Rule d:
            if m[i] == 0:
                pass  # line 1
            elif m[j] == 0:
                pass  # line 1
            elif m[j] == (m[i] + 1):
                if ((i + 1) % 2) == 1:
                    gamy[i, j] *= -1.  # line 2
            elif m[j] == (m[i] - 1):
                if ((i + 1) % 2) == 0:
                    gamy[i, j] *= -1.  # line 3
            else:
                pass  # line 4

#    FITS.Write(gamx, 'gamma_x.fits')
#    FITS.Write(gamy, 'gamma_y.fits')
    return numpy.array([gamx, gamy])
Exemple #50
0
def ConvertVnedToTrkGsVs(vn, ve, vz):
    angle = 360 + np.arctan2(ve, vn) * 180 / np.pi
    trk = np.fmod(angle, 360)
    gs = np.sqrt(vn**2 + ve**2)
    vs = -vz
    return (trk, gs, vs)
Exemple #51
0
    def __init__(self, filename):
        self.pfd_filename = filename
        infile = open(filename, "rb")
        # See if the .bestprof file is around
        try:
            self.bestprof = bestprof(filename + ".bestprof")
        except IOError:
            self.bestprof = 0
        swapchar = '<'  # this is little-endian
        data = infile.read(5 * 4)
        testswap = struct.unpack(swapchar + "i" * 5, data)
        # This is a hack to try and test the endianness of the data.
        # None of the 5 values should be a large positive number.
        if (Num.fabs(Num.asarray(testswap))).max() > 100000:
            swapchar = '>'  # this is big-endian
        (self.numdms, self.numperiods, self.numpdots, self.nsub, self.npart) = \
                      struct.unpack(swapchar+"i"*5, data)
        (self.proflen, self.numchan, self.pstep, self.pdstep, self.dmstep, \
         self.ndmfact, self.npfact) = struct.unpack(swapchar+"i"*7, infile.read(7*4))

        self.filenm = infile.read(
            struct.unpack(swapchar + "i", infile.read(4))[0])
        self.candnm = infile.read(
            struct.unpack(swapchar + "i", infile.read(4))[0]).decode("utf-8")
        self.telescope = infile.read(
            struct.unpack(swapchar + "i", infile.read(4))[0]).decode("utf-8")
        self.pgdev = infile.read(
            struct.unpack(swapchar + "i", infile.read(4))[0])
        test = infile.read(16)
        if not test[:8] == b"Unknown" and b':' in test:
            self.rastr = test[:test.find(b'\0')]
            test = infile.read(16)
            self.decstr = test[:test.find(b'\0')]
        else:
            self.rastr = "Unknown"
            self.decstr = "Unknown"
            if ':' not in test:
                infile.seek(-16, 1)  # rewind the file before the bad read
        (self.dt, self.startT) = struct.unpack(swapchar + "dd",
                                               infile.read(2 * 8))
        (self.endT, self.tepoch, self.bepoch, self.avgvoverc, self.lofreq, \
         self.chan_wid, self.bestdm) = struct.unpack(swapchar+"d"*7, infile.read(7*8))
        # The following "fixes" (we think) the observing frequency of the Spigot
        # based on tests done by Ingrid on 0737 (comparing it to GASP)
        # The same sorts of corrections should be made to WAPP data as well...
        # The tepoch corrections are empirically determined timing corrections
        # Note that epoch is only double precision and so the floating
        # point accuracy is ~1 us!
        if self.telescope == 'GBT':
            if (Num.fabs(Num.fmod(self.dt, 8.192e-05) < 1e-12) and \
                ("spigot" in filename.lower() or "guppi" not in filename.lower()) and \
                (self.tepoch < 54832.0)):
                sys.stderr.write("Assuming SPIGOT data...\n")
                if self.chan_wid == 800.0 / 1024:  # Spigot 800 MHz mode 2
                    self.lofreq -= 0.5 * self.chan_wid
                    # original values
                    #if self.tepoch > 0.0: self.tepoch += 0.039334/86400.0
                    #if self.bestprof: self.bestprof.epochf += 0.039334/86400.0
                    # values measured with 1713+0747 wrt BCPM2 on 13 Sept 2007
                    if self.tepoch > 0.0: self.tepoch += 0.039365 / 86400.0
                    if self.bestprof:
                        self.bestprof.epochf += 0.039365 / 86400.0
                elif self.chan_wid == 800.0 / 2048:
                    self.lofreq -= 0.5 * self.chan_wid
                    if self.tepoch < 53700.0:  # Spigot 800 MHz mode 16 (downsampled)
                        if self.tepoch > 0.0: self.tepoch += 0.039352 / 86400.0
                        if self.bestprof:
                            self.bestprof.epochf += 0.039352 / 86400.0
                    else:  # Spigot 800 MHz mode 14
                        # values measured with 1713+0747 wrt BCPM2 on 13 Sept 2007
                        if self.tepoch > 0.0: self.tepoch += 0.039365 / 86400.0
                        if self.bestprof:
                            self.bestprof.epochf += 0.039365 / 86400.0
                elif self.chan_wid == 50.0 / 1024 or self.chan_wid == 50.0 / 2048:  # Spigot 50 MHz modes
                    self.lofreq += 0.5 * self.chan_wid
                    # Note: the offset has _not_ been measured for the 2048-lag mode
                    if self.tepoch > 0.0: self.tepoch += 0.039450 / 86400.0
                    if self.bestprof:
                        self.bestprof.epochf += 0.039450 / 86400.0
        (self.topo_pow, tmp) = struct.unpack(swapchar + "f" * 2,
                                             infile.read(2 * 4))
        (self.topo_p1, self.topo_p2, self.topo_p3) = struct.unpack(swapchar+"d"*3, \
                                                                   infile.read(3*8))
        (self.bary_pow, tmp) = struct.unpack(swapchar + "f" * 2,
                                             infile.read(2 * 4))
        (self.bary_p1, self.bary_p2, self.bary_p3) = struct.unpack(swapchar+"d"*3, \
                                                                   infile.read(3*8))
        (self.fold_pow, tmp) = struct.unpack(swapchar + "f" * 2,
                                             infile.read(2 * 4))
        (self.fold_p1, self.fold_p2, self.fold_p3) = struct.unpack(swapchar+"d"*3, \
                                                                   infile.read(3*8))
        # Save current p, pd, pdd
        # NOTE: Fold values are actually frequencies!
        self.curr_p1, self.curr_p2, self.curr_p3 = \
                psr_utils.p_to_f(self.fold_p1, self.fold_p2, self.fold_p3)
        self.pdelays_bins = Num.zeros(self.npart, dtype='d')
        (self.orb_p, self.orb_e, self.orb_x, self.orb_w, self.orb_t, self.orb_pd, \
         self.orb_wd) = struct.unpack(swapchar+"d"*7, infile.read(7*8))
        self.dms = Num.asarray(struct.unpack(swapchar+"d"*self.numdms, \
                                             infile.read(self.numdms*8)))
        if self.numdms == 1:
            self.dms = self.dms[0]
        self.periods = Num.asarray(struct.unpack(swapchar+"d"*self.numperiods, \
                                                 infile.read(self.numperiods*8)))
        self.pdots = Num.asarray(struct.unpack(swapchar+"d"*self.numpdots, \
                                               infile.read(self.numpdots*8)))
        self.numprofs = self.nsub * self.npart
        if (swapchar == '<'):  # little endian
            self.profs = Num.zeros((self.npart, self.nsub, self.proflen),
                                   dtype='d')
            for ii in range(self.npart):
                for jj in range(self.nsub):
                    self.profs[ii,
                               jj, :] = Num.fromfile(infile, Num.float64,
                                                     self.proflen)
        else:
            self.profs = Num.asarray(struct.unpack(swapchar+"d"*self.numprofs*self.proflen, \
                                                   infile.read(self.numprofs*self.proflen*8)))
            self.profs = Num.reshape(self.profs,
                                     (self.npart, self.nsub, self.proflen))
        if (self.numchan == 1):
            try:
                idata = infodata.infodata(
                    self.filenm[:self.filenm.rfind(b'.')] + b".inf")
                try:
                    if idata.waveband == "Radio":
                        self.bestdm = idata.DM
                        self.numchan = idata.numchan
                except:
                    self.bestdm = 0.0
                    self.numchan = 1
            except IOError:
                print("Warning!  Can't open the .inf file for " + filename +
                      "!")
        self.binspersec = self.fold_p1 * self.proflen
        self.chanpersub = self.numchan // self.nsub
        self.subdeltafreq = self.chan_wid * self.chanpersub
        self.hifreq = self.lofreq + (self.numchan - 1) * self.chan_wid
        self.losubfreq = self.lofreq + self.subdeltafreq - self.chan_wid
        self.subfreqs = Num.arange(self.nsub, dtype='d')*self.subdeltafreq + \
                        self.losubfreq
        self.subdelays_bins = Num.zeros(self.nsub, dtype='d')
        # Save current DM
        self.currdm = 0
        self.killed_subbands = []
        self.killed_intervals = []
        self.pts_per_fold = []
        # Note: a foldstats struct is read in as a group of 7 doubles
        # the correspond to, in order:
        #    numdata, data_avg, data_var, numprof, prof_avg, prof_var, redchi
        self.stats = Num.zeros((self.npart, self.nsub, 7), dtype='d')
        for ii in range(self.npart):
            currentstats = self.stats[ii]
            for jj in range(self.nsub):
                if (swapchar == '<'):  # little endian
                    currentstats[jj] = Num.fromfile(infile, Num.float64, 7)
                else:
                    currentstats[jj] = Num.asarray(struct.unpack(swapchar+"d"*7, \
                                                                 infile.read(7*8)))
            self.pts_per_fold.append(
                self.stats[ii][0][0])  # numdata from foldstats
        self.start_secs = Num.add.accumulate([0] +
                                             self.pts_per_fold[:-1]) * self.dt
        self.pts_per_fold = Num.asarray(self.pts_per_fold)
        self.mid_secs = self.start_secs + 0.5 * self.dt * self.pts_per_fold
        if (not self.tepoch == 0.0):
            self.start_topo_MJDs = self.start_secs / 86400.0 + self.tepoch
            self.mid_topo_MJDs = self.mid_secs / 86400.0 + self.tepoch
        if (not self.bepoch == 0.0):
            self.start_bary_MJDs = self.start_secs / 86400.0 + self.bepoch
            self.mid_bary_MJDs = self.mid_secs / 86400.0 + self.bepoch
        self.Nfolded = Num.add.reduce(self.pts_per_fold)
        self.T = self.Nfolded * self.dt
        self.avgprof = (self.profs / self.proflen).sum()
        self.varprof = self.calc_varprof()
        # nominal number of degrees of freedom for reduced chi^2 calculation
        self.DOFnom = float(self.proflen) - 1.0
        # corrected number of degrees of freedom due to inter-bin correlations
        self.dt_per_bin = self.curr_p1 / self.proflen / self.dt
        self.DOFcor = self.DOFnom * self.DOF_corr()
        infile.close()
        self.barysubfreqs = None
        if self.avgvoverc == 0:
            if self.candnm.startswith("PSR_"):
                # If this doesn't work, we should try to use the barycentering calcs
                # in the presto module.
                try:
                    psrname = self.candnm[4:]
                    self.polycos = polycos.polycos(psrname,
                                                   filenm=self.pfd_filename +
                                                   ".polycos")
                    midMJD = self.tepoch + 0.5 * self.T / 86400.0
                    self.avgvoverc = self.polycos.get_voverc(
                        int(midMJD), midMJD - int(midMJD))
                    #sys.stderr.write("Approximate Doppler velocity (in c) is:  %.4g\n"%self.avgvoverc)
                    # Make the Doppler correction
                    self.barysubfreqs = self.subfreqs * (1.0 + self.avgvoverc)
                except IOError:
                    self.polycos = 0
        if self.barysubfreqs is None:
            self.barysubfreqs = self.subfreqs
Exemple #52
0
    def adjust_period(self, p=None, pd=None, pdd=None, interp=0):
        """
        adjust_period(p=*bestp*, pd=*bestpd*, pdd=*bestpdd*):
            Rotate (internally) the profiles so that they are adjusted to
                the given period and period derivatives.  By default,
                use the 'best' values as determined by prepfold's seaqrch.
                This should orient all of the profiles so that they are
                almost identical to what you see in a prepfold plot which
                used searching.  Use FFT-based interpolation if 'interp'
                is non-zero.  (NOTE: It is off by default, as in prepfold!)
        """
        if self.fold_pow == 1.0:
            bestp = self.bary_p1
            bestpd = self.bary_p2
            bestpdd = self.bary_p3
        else:
            bestp = self.topo_p1
            bestpd = self.topo_p2
            bestpdd = self.topo_p3
        if p is None:
            p = bestp
        if pd is None:
            pd = bestpd
        if pdd is None:
            pdd = bestpdd

        # Cast to single precision and back to double precision to
        # emulate prepfold_plot.c, where parttimes is of type "float"
        # but values are upcast to "double" during computations.
        # (surprisingly, it affects the resulting profile occasionally.)
        parttimes = self.start_secs.astype('float32').astype('float64')

        # Get delays
        f_diff, fd_diff, fdd_diff = self.freq_offsets(p, pd, pdd)
        delays = psr_utils.delay_from_foffsets(f_diff, fd_diff, fdd_diff,
                                               parttimes)

        # Convert from delays in phase to delays in bins
        bin_delays = Num.fmod(delays * self.proflen,
                              self.proflen) - self.pdelays_bins
        if interp:
            new_pdelays_bins = bin_delays
        else:
            new_pdelays_bins = Num.floor(bin_delays + 0.5)

        # Rotate subintegrations
        for ii in range(self.nsub):
            for jj in range(self.npart):
                tmp_prof = self.profs[jj, ii, :]
                # Negative sign in num bins to shift because we calculated delays
                # Assuming +ve is shift-to-right, psr_utils.rotate assumes +ve
                # is shift-to-left
                if interp:
                    self.profs[jj, ii] = psr_utils.fft_rotate(
                        tmp_prof, -new_pdelays_bins[jj])
                else:
                    self.profs[jj,ii] = psr_utils.rotate(tmp_prof, \
                                            -new_pdelays_bins[jj])
        self.pdelays_bins += new_pdelays_bins
        if interp:
            # Note: Since the rotation process slightly changes the values of the
            # profs, we need to re-calculate the average profile value
            self.avgprof = (self.profs / self.proflen).sum()

        self.sumprof = self.profs.sum(0).sum(0)
        if Num.fabs((self.sumprof / self.proflen).sum() - self.avgprof) > 1.0:
            print("self.avgprof is not the correct value!")

        # Save current p, pd, pdd
        self.curr_p1, self.curr_p2, self.curr_p3 = p, pd, pdd
Exemple #53
0
def normalize_angle(angle):
    """Normalize angle to stay between -PI and PI"""
    result = np.fmod(angle + np.pi, 2.0 * np.pi)
    if result <= 0:
        return result + np.pi
    return result - np.pi
Exemple #54
0
 def step(self):
     self.p=self.p+self.konst*np.sin(self.q)
     self.q=np.fmod(self.q+self.p,dospi)
     if(self.q<0):
         self.q+=dospi
Exemple #55
0
def constrain_theta(theta):
    theta = np.fmod(theta, 2 * no_of_points)
    if (theta < 0):
        theta = theta + 2 * no_of_points
    return theta
Exemple #56
0
def em(params_init,
       seqs_train,
       max_iters=500,
       tol=1.0E-8,
       min_var_frac=0.01,
       freq_ll=5,
       verbose=False):
    """
    Fits GPFA model parameters using expectation-maximization (EM) algorithm.

    Parameters
    ----------
    params_init : dict
        GPFA model parameters at which EM algorithm is initialized
        covType : {'rbf', 'tri', 'logexp'}
            type of GP covariance
        gamma : np.ndarray of shape (1, #latent_vars)
            related to GP timescales by
            'bin_width / sqrt(gamma)'
        eps : np.ndarray of shape (1, #latent_vars)
            GP noise variances
        d : np.ndarray of shape (#units, 1)
            observation mean
        C : np.ndarray of shape (#units, #latent_vars)
            mapping between the neuronal data space and the
            latent variable space
        R : np.ndarray of shape (#units, #latent_vars)
            observation noise covariance
    seqs_train : np.recarray
        training data structure, whose n-th entry (corresponding to the n-th
        experimental trial) has fields
        T : int
            number of bins
        y : np.ndarray (yDim x T)
            neural data
    max_iters : int, optional
        number of EM iterations to run
        Default: 500
    tol : float, optional
        stopping criterion for EM
        Default: 1e-8
    min_var_frac : float, optional
        fraction of overall data variance for each observed dimension to set as
        the private variance floor.  This is used to combat Heywood cases,
        where ML parameter learning returns one or more zero private variances.
        Default: 0.01
        (See Martin & McDonald, Psychometrika, Dec 1975.)
    freq_ll : int, optional
        data likelihood is computed at every freq_ll EM iterations.
        freq_ll = 1 means that data likelihood is computed at every
        iteration.
        Default: 5
    verbose : bool, optional
        specifies whether to display status messages
        Default: False

    Returns
    -------
    params_est : dict
        GPFA model parameter estimates, returned by EM algorithm (same
        format as params_init)
    seqs_latent : np.recarray
        a copy of the training data structure, augmented with the new
        fields:
        xsm : np.ndarray of shape (#latent_vars x #bins)
            posterior mean of latent variables at each time bin
        Vsm : np.ndarray of shape (#latent_vars, #latent_vars, #bins)
            posterior covariance between latent variables at each
            timepoint
        VsmGP : np.ndarray of shape (#bins, #bins, #latent_vars)
            posterior covariance over time for each latent
            variable
    ll : list
        list of log likelihoods after each EM iteration
    iter_time : list
        lisf of computation times (in seconds) for each EM iteration
    """
    params = params_init
    t = seqs_train['T']
    y_dim, x_dim = params['C'].shape
    lls = []
    ll_old = ll_base = ll = 0.0
    iter_time = []
    var_floor = min_var_frac * np.diag(np.cov(np.hstack(seqs_train['y'])))
    seqs_latent = None

    # Loop once for each iteration of EM algorithm
    for iter_id in trange(1,
                          max_iters + 1,
                          desc='EM iteration',
                          disable=not verbose):
        if verbose:
            print()
        tic = time.time()
        get_ll = (np.fmod(iter_id, freq_ll) == 0) or (iter_id <= 2)

        # ==== E STEP =====
        if not np.isnan(ll):
            ll_old = ll
        seqs_latent, ll = exact_inference_with_ll(seqs_train,
                                                  params,
                                                  get_ll=get_ll)
        lls.append(ll)

        # ==== M STEP ====
        sum_p_auto = np.zeros((x_dim, x_dim))
        for seq_latent in seqs_latent:
            sum_p_auto += seq_latent['Vsm'].sum(axis=2) \
                + seq_latent['xsm'].dot(seq_latent['xsm'].T)
        y = np.hstack(seqs_train['y'])
        xsm = np.hstack(seqs_latent['xsm'])
        sum_yxtrans = y.dot(xsm.T)
        sum_xall = xsm.sum(axis=1)[:, np.newaxis]
        sum_yall = y.sum(axis=1)[:, np.newaxis]

        # term is (xDim+1) x (xDim+1)
        term = np.vstack([
            np.hstack([sum_p_auto, sum_xall]),
            np.hstack([sum_xall.T, t.sum().reshape((1, 1))])
        ])
        # yDim x (xDim+1)
        cd = gpfa_util.rdiv(np.hstack([sum_yxtrans, sum_yall]), term)

        params['C'] = cd[:, :x_dim]
        params['d'] = cd[:, -1]

        # yCent must be based on the new d
        # yCent = bsxfun(@minus, [seq.y], currentParams.d);
        # R = (yCent * yCent' - (yCent * [seq.xsm]') * currentParams.C')
        #     / sum(T);
        c = params['C']
        d = params['d'][:, np.newaxis]
        if params['notes']['RforceDiagonal']:
            sum_yytrans = (y * y).sum(axis=1)[:, np.newaxis]
            yd = sum_yall * d
            term = ((sum_yxtrans - d.dot(sum_xall.T)) * c).sum(axis=1)
            term = term[:, np.newaxis]
            r = d**2 + (sum_yytrans - 2 * yd - term) / t.sum()

            # Set minimum private variance
            r = np.maximum(var_floor, r)
            params['R'] = np.diag(r[:, 0])
        else:
            sum_yytrans = y.dot(y.T)
            yd = sum_yall.dot(d.T)
            term = (sum_yxtrans - d.dot(sum_xall.T)).dot(c.T)
            r = d.dot(d.T) + (sum_yytrans - yd - yd.T - term) / t.sum()

            params['R'] = (r + r.T) / 2  # ensure symmetry

        if params['notes']['learnKernelParams']:
            res = learn_gp_params(seqs_latent, params, verbose=verbose)
            params['gamma'] = res['gamma']

        t_end = time.time() - tic
        iter_time.append(t_end)

        # Verify that likelihood is growing monotonically
        if iter_id <= 2:
            ll_base = ll
        elif verbose and ll < ll_old:
            print('\nError: Data likelihood has decreased ',
                  'from {0} to {1}'.format(ll_old, ll))
        elif (ll - ll_base) < (1 + tol) * (ll_old - ll_base):
            break

    if len(lls) < max_iters:
        print('Fitting has converged after {0} EM iterations.)'.format(
            len(lls)))

    if np.any(np.diag(params['R']) == var_floor):
        warnings.warn('Private variance floor used for one or more observed '
                      'dimensions in GPFA.')

    return params, seqs_latent, lls, iter_time
Exemple #57
0
 def fmod_pos(x, p):
     return np.fmod(np.fmod(x, p) + p, p)
Exemple #58
0
def euler(ai, bi, select, FK4=0):
    """
   NAME:
       euler
   PURPOSE:
       Transform between Galactic, celestial, and ecliptic coordinates.
   EXPLANATION:
       Use the procedure ASTRO to use this routine interactively
   
   CALLING SEQUENCE:
        EULER, AI, BI, AO, BO, [ SELECT, /FK4, SELECT = ] 
   
   INPUTS:
         AI - Input Longitude in DEGREES, scalar or vector.  If only two 
                 parameters are supplied, then  AI and BI will be modified
                 to contain the output longitude and latitude.
         BI - Input Latitude in DEGREES
   
   OPTIONAL INPUT:
         SELECT - Integer (1-6) specifying type of coordinate
                  transformation.
   
        SELECT   From          To        |   SELECT      From         To
         1     RA-Dec (2000)  Galactic   |     4       Ecliptic     RA-Dec
         2     Galactic       RA-DEC     |     5       Ecliptic    Galactic
         3     RA-Dec         Ecliptic   |     6       Galactic    Ecliptic
   
        If not supplied as a parameter or keyword, then EULER will prompt
        for the value of SELECT
        Celestial coordinates (RA, Dec) should be given in equinox J2000 
        unless the /FK4 keyword is set.
   OUTPUTS:
         AO - Output Longitude in DEGREES
         BO - Output Latitude in DEGREES
   
   INPUT KEYWORD:
         /FK4 - If this keyword is set and non-zero, then input and output 
               celestial and ecliptic coordinates should be given in
               equinox B1950.
         /SELECT  - The coordinate conversion integer (1-6) may
                    alternatively be specified as a keyword
   NOTES:
         EULER was changed in December 1998 to use J2000 coordinates as the
         default, ** and may be incompatible with earlier versions***.
   REVISION HISTORY:
         Written W. Landsman,  February 1987
         Adapted from Fortran by Daryl Yentis NRL
         Converted to IDL V5.0   W. Landsman   September 1997
         Made J2000 the default, added /FK4 keyword
          W. Landsman December 1998
         Add option to specify SELECT as a keyword W. Landsman March 2003
         Converted to python by K. Ganga December 2007
   """

    # npar = N_params()
    #  if npar LT 2 then begin
    #     print,'Syntax - EULER, AI, BI, A0, B0, [ SELECT, /FK4, SELECT= ]'
    #     print,'    AI,BI - Input longitude,latitude in degrees'
    #     print,'    AO,BO - Output longitude, latitude in degrees'
    #     print,'    SELECT - Scalar (1-6) specifying transformation type'
    #     return
    #  endif

    PI = np.pi
    twopi = 2.0 * PI
    fourpi = 4.0 * PI
    deg_to_rad = 180.0 / PI
    #
    # ;   J2000 coordinate conversions are based on the following constants
    # ;   (see the Hipparcos explanatory supplement).
    # ;  eps = 23.4392911111 # Obliquity of the ecliptic
    # ;  alphaG = 192.85948d           Right Ascension of Galactic North Pole
    # ;  deltaG = 27.12825d            Declination of Galactic North Pole
    # ;  lomega = 32.93192d            Galactic longitude of celestial equator
    # ;  alphaE = 180.02322d           Ecliptic longitude of Galactic North Pole
    # ;  deltaE = 29.811438523d        Ecliptic latitude of Galactic North Pole
    # ;  Eomega  = 6.3839743d          Galactic longitude of ecliptic equator
    #
    if FK4 == 1:

        equinox = '(B1950)'
        psi = [
            0.57595865315, 4.9261918136, 0.00000000000, 0.0000000000,
            0.11129056012, 4.7005372834
        ]
        stheta = [
            0.88781538514, -0.88781538514, 0.39788119938, -0.39788119938,
            0.86766174755, -0.86766174755
        ]
        ctheta = [
            0.46019978478, 0.46019978478, 0.91743694670, 0.91743694670,
            0.49715499774, 0.49715499774
        ]
        phi = [
            4.9261918136, 0.57595865315, 0.0000000000, 0.00000000000,
            4.7005372834, 0.11129056012
        ]
    else:

        equinox = '(J2000)'
        psi = [
            0.57477043300, 4.9368292465, 0.00000000000, 0.0000000000,
            0.11142137093, 4.71279419371
        ]
        stheta = [
            0.88998808748, -0.88998808748, 0.39777715593, -0.39777715593,
            0.86766622025, -0.86766622025
        ]
        ctheta = [
            0.45598377618, 0.45598377618, 0.91748206207, 0.91748206207,
            0.49714719172, 0.49714719172
        ]
        phi = [
            4.9368292465, 0.57477043300, 0.0000000000, 0.00000000000,
            4.71279419371, 0.11142137093
        ]
    #
    i = select - 1  # IDL offset
    a = ai / deg_to_rad - phi[i]
    b = bi / deg_to_rad
    sb = np.sin(b)
    cb = np.cos(b)
    cbsa = cb * np.sin(a)
    b = -stheta[i] * cbsa + ctheta[i] * sb
    #bo    = math.asin(where(b<1.0, b, 1.0)*deg_to_rad)
    bo = np.arcsin(b) * deg_to_rad
    #
    a = np.arctan2(ctheta[i] * cbsa + stheta[i] * sb, cb * np.cos(a))
    ao = np.fmod((a + psi[i] + fourpi), twopi) * deg_to_rad
    return ao, bo
    def optimize_param(self,y,ninit=4*24*7,rtol=0.01,\
                        eta=0.01,lr=0.05,nmax=100):
        """optimize_param
        Use gradient descent to find optimum parameters for learning 
        rates alpha,beta,gamma.  Wait till all of their values are 
        settled to a relative tolerance.
        Cost is root Mean Square Error over whole time series.
        Currently tries to predict day ahead.  
        Input:
        y - series to fit
        ninit - number of values to use in initial parameter fitting
        rtol - relative tolerance on parameters
        eta - fraction for finite-difference step
        lr  - learning rate
        nmax - maximum number of iterations.
        """
        self.fit_init_params(y)
        #Super clunky way of specifiying names.
        #Why did I think this was superior?
        names = ['_alpha', '_beta', '_g00', '_g01', '_g10', '_g11']
        pred0 = self.predict_all_days(y, ninit=ninit)
        J = self.rmse(y[ninit:], pred0[ninit:])
        Ni = 0
        oldJ = J
        J_opt = J
        #loop over iterations
        for i in range(nmax):
            dJ_max = 0
            #for each name, tweak the model's variables.
            eta = eta * 0.99
            for n in names:
                #do finite-difference estimate of update.
                p0 = self.__getattribute__(n)
                self.__setattr__(n, p0 * (1 + eta))
                pred = self.predict_all_days(y, ninit=ninit)
                J2 = self.rmse(pred[ninit:], y[ninit:])
                dJ = np.abs((J2 - J) / J)
                #crop gradient to within +/- 1
                p = p0 + lr * np.fmod(dJ / (eta * p0), 1)
                if (p > 1):
                    print('param {} >1(!): {}'.format(n, p))
                    print('J,J2,p', J, J2, p0)
                    p = min(p0 + 0.5 * (1 - p0), 0.99)
                elif (p < 0):
                    print('param {} <0(!): {}'.format(n, p))
                    print('J,J2,p', J, J2, p0)
                    p = 0.5 * p0
                self.__setattr__(n, p)
                J = J2
                dJ_max = max(dJ, dJ_max)
            Ni += 1
            if (J < J_opt):
                J_opt = J
                self.save_opt_param()
            if (dJ_max < rtol):
                clear_output(wait=True)
                print("Hit tolerance {} at iter {}".format(dJ, Ni))
                self.plot_pred([pred, y], ['Predicted', 'Actual'])
                return pred
            if (Ni % 10 == 0):
                clear_output(wait=True)
                print("Cost, Old Cost = {},{}".format(J, oldJ))
                oldJ = J.copy()
                self.plot_pred([pred, y, pred - y],
                               ['Predicted', 'Actual', 'Error'])
                for n in names:
                    p0 = self.__getattribute__(n)
                    print(n, p0)

        print("Failed to hit tolerance after {} iter\n".format(Ni))
        print("Cost:", J, J2)
        return pred
Exemple #60
0
def draw_colored_pedigree(pedobj,
                          shading,
                          gfilename='pedigree',
                          gtitle='My_Pedigree',
                          gformat='jpg',
                          gsize='f',
                          gdot='1',
                          gorient='l',
                          gdirec='',
                          gname=0,
                          gfontsize=10,
                          garrow=1,
                          gtitloc='b',
                          gtitjust='c',
                          ghatch='hatch',
                          gprog='dot'):
    """
    draw_colored_pedigree() uses the pydot bindings to the graphviz library to produce a
    directed graph of your pedigree with paths of inheritance as edges and animals as
    nodes.  If there is more than one generation in the pedigree as determind by the "gen"
    attributes of the animals in the pedigree, draw_pedigree() will use subgraphs to try
    and group animals in the same generation together in the drawing.  Nodes will be
    colored based on the number of outgoing connections (number of offspring).
    """
    from pyp_utils import string_to_table_name
    _gtitle = string_to_table_name(gtitle)

    if gtitloc not in ['t', 'b']:
        gtitloc = 'b'
    if gtitjust not in ['c', 'l', 'r']:
        gtitjust = 'c'

    print '[DEBUG]: Entered draw_colored_pedigree()'

    #     try:
    import pydot

    # Build a list of generations -- if we have more than on, we can use the
    # "rank=same" option in dot to get nicer output.
    gens = pedobj.metadata.unique_gen_list
    # Set some properties for the graph.
    g = pydot.Dot(label=gtitle,
                  labelloc=gtitloc,
                  labeljust=gtitjust,
                  graph_name=_gtitle,
                  type='graph',
                  strict=False,
                  suppress_disconnected=True,
                  simplify=True)

    # Make sure that gfontsize has a valid value.
    try:
        gfontsize = int(gfontsize)
    except:
        gfontsize = 10
    if gfontsize < 10:
        gfontsize = 10
    gfontsize = str(gfontsize)
    #     print 'gfontsize = %s' % (gfontsize)
    g.set_page("8.5,11")
    g.set_size("7.5,10")
    if gorient == 'l':
        g.set_orientation("landscape")
    else:
        g.set_orientation("portrait")
    if gsize != 'l':
        g.set_ratio("auto")
    if gdirec == 'RL':
        g.set_rankdir('RL')
    elif gdirec == 'LR':
        g.set_rankdir('LR')
    else:
        pass
    g.set_center('true')
    g.set_concentrate('true')
    g.set_ordering('out')
    if gformat not in g.formats:
        gformat = 'jpg'
    # If we do not have any generations, we have to draw a less-nice graph.
    colormin = min(shading.values())
    colormax = max(shading.values())
    color_map = {}
    if len(gens) <= 1:
        animalCounter = 0
        print '\t[DEBUG]: Only one generation'
        for _m in pedobj.pedigree:
            animalCounter = animalCounter + 1
            if numpy.fmod(animalCounter, pedobj.kw['counter']) == 0:
                print '\t[DEBUG]: Records read: %s ' % (animalCounter)
            # Add a node for the current animal and set some properties.
            if gname:
                _node_name = _m.name
            else:
                _node_name = _m.animalID
            _an_node = pydot.Node(_node_name)
            _an_node.set_fontname('Helvetica')
            # _an_node.set_fontsize('10')
            _an_node.set_fontsize(gfontsize)
            _an_node.set_height('0.35')
            if _m.sex == 'M' or _m.sex == 'm':
                _an_node.set_shape('box')
            elif _m.sex == 'F' or _m.sex == 'f':
                _an_node.set_shape('ellipse')
            else:
                pass
            #print _m.userField, ghatch, ( _m.userField == ghatch )
            if _m.userField == ghatch:
                _an_node.set_style('filled,peripheries=2')
            else:
                _an_node.set_style('filled')
            try:
                _color = color_map[shading[_m.animalID]]
            except KeyError:
                _color = get_color_32(shading[_m.animalID], colormin, colormax)
                color_map[shading[_m.animalID]] = _color
                print '\t[DEBUG]: %s added to cache' % (_color)
            _an_node.set_fillcolor(_color)
            g.add_node(_an_node)
            # Add the edges to the parent nodes, if any.
            if _m.sireID != pedobj.kw['missing_parent']:
                if gname:
                    if garrow:
                        g.add_edge(
                            pydot.Edge(
                                pedobj.pedigree[int(_m.sireID) - 1].name,
                                _m.name))
                    else:
                        g.add_edge(
                            pydot.Edge(pedobj.pedigree[int(_m.sireID) -
                                                       1].name,
                                       _m.name,
                                       dir='none'))
                else:
                    if garrow:
                        g.add_edge(
                            pydot.Edge(
                                pedobj.pedigree[int(_m.sireID) - 1].originalID,
                                _m.originalID))
                    else:
                        g.add_edge(
                            pydot.Edge(pedobj.pedigree[int(_m.sireID) -
                                                       1].originalID,
                                       _m.originalID,
                                       dir='none'))
            if _m.damID != pedobj.kw['missing_parent']:
                if gname:
                    if garrow:
                        g.add_edge(
                            pydot.Edge(pedobj.pedigree[int(_m.damID) - 1].name,
                                       _m.name))
                    else:
                        g.add_edge(
                            pydot.Edge(pedobj.pedigree[int(_m.damID) - 1].name,
                                       _m.name,
                                       dir='none'))
                else:
                    if garrow:
                        g.add_edge(
                            pydot.Edge(
                                pedobj.pedigree[int(_m.damID) - 1].originalID,
                                _m.originalID))
                    else:
                        g.add_edge(
                            pydot.Edge(pedobj.pedigree[int(_m.damID) -
                                                       1].originalID,
                                       _m.originalID,
                                       dir='none'))
    # Otherwise we can draw a nice graph.
    else:
        for _g in gens:
            print '\t[DEBUG]: Looping over generations'
            _sg_anims = []
            _sg_name = 'sg%s' % (_g)
            sg = pydot.Subgraph(graph_name=_sg_name,
                                suppress_disconnected=True,
                                simplify=True)
            sg.set_simplify(True)
            animalCounter = 0
            for _m in pedobj.pedigree:
                animalCounter = animalCounter + 1
                if numpy.fmod(animalCounter, pedobj.kw['counter']) == 0:
                    print '\t[DEBUG]: Records read: %s ' % (animalCounter)
                if int(_m.gen) == int(_g):
                    _sg_anims.append(_m.animalID)
                # Add a node for the current animal and set some properties.
                if gname:
                    _node_name = _m.name
                else:
                    _node_name = _m.animalID
                _an_node = pydot.Node(_node_name)
                _an_node.set_fontname('Helvetica')
                _an_node.set_fontsize(gfontsize)
                _an_node.set_height('0.35')
                if _m.sex == 'M' or _m.sex == 'm':
                    _an_node.set_shape('box')
                elif _m.sex == 'F' or _m.sex == 'f':
                    _an_node.set_shape('ellipse')
                else:
                    pass
                if _m.userField == ghatch:
                    _an_node.set_style('filled,peripheries=2')
                else:
                    _an_node.set_style('filled')
                _color = get_color_32(shading[_m.animalID], colormin, colormax)
                _an_node.set_fillcolor(_color)
                sg.add_node(_an_node)
                # Add the edges to the parent nodes, if any.
                if _m.sireID != pedobj.kw['missing_parent']:
                    if gname:
                        if garrow:
                            sg.add_edge(
                                pydot.Edge(
                                    pedobj.pedigree[int(_m.sireID) - 1].name,
                                    _m.name))
                        else:
                            sg.add_edge(
                                pydot.Edge(pedobj.pedigree[int(_m.sireID) -
                                                           1].name,
                                           _m.name,
                                           dir='none'))
                    else:
                        if garrow:
                            sg.add_edge(
                                pydot.Edge(
                                    pedobj.pedigree[int(_m.sireID) -
                                                    1].originalID,
                                    _m.originalID))
                        else:
                            sg.add_edge(
                                pydot.Edge(pedobj.pedigree[int(_m.sireID) -
                                                           1].originalID,
                                           _m.originalID,
                                           dir='none'))
                #if int(_m.damID) != 0:
                if _m.damID != pedobj.kw['missing_parent']:
                    if gname:
                        if garrow:
                            sg.add_edge(
                                pydot.Edge(
                                    pedobj.pedigree[int(_m.damID) - 1].name,
                                    _m.name))
                        else:
                            sg.add_edge(
                                pydot.Edge(pedobj.pedigree[int(_m.damID) -
                                                           1].name,
                                           _m.name,
                                           dir='none'))
                    else:
                        if garrow:
                            sg.add_edge(
                                pydot.Edge(
                                    pedobj.pedigree[int(_m.damID) -
                                                    1].originalID,
                                    _m.originalID))
                        else:
                            sg.add_edge(
                                pydot.Edge(pedobj.pedigree[int(_m.damID) -
                                                           1].originalID,
                                           _m.originalID,
                                           dir='none'))
            if len(_sg_anims) > 0:
                _sg_list = ''
                for _a in _sg_anims:
                    if len(_sg_list) == 0:
                        _sg_list = 'same,%s' % (_a)
                    else:
                        _sg_list = '%s,%s' % (_sg_list, _a)
            sg.set_rank(_sg_list)
            g.add_subgraph(sg)
    # For large graphs it is nice to write out the .dot file so that it does not have to be recreated
    # whenever draw_pedigree is called.  Especially when I am debugging.  :-)
    if gdot:
        dfn = '%s.dot' % (gfilename)
        #             try:
        g.write(dfn)
#             except:
#                 pass
# Write the graph to an output file.
    outfile = '%s.%s' % (gfilename, gformat)
    if gprog not in ['dot', 'neato', 'none']:
        gprog = 'dot'
    if gprog != 'none':
        g.write(outfile, prog=gprog, format=gformat)
    return 1