Example #1
0
def test_fk4_no_e_fk5():

    t = Table.read(os.path.join(ROOT, 'fk4_no_e_fk4.csv'), format='ascii')

    # FK4 to FK5
    c1 = FK4(t['ra_in'], t['dec_in'],
             unit=(u.degree, u.degree),
             obstime=Time(t['obstime'], scale='utc'))
    c2 = c1.transform_to(FK4NoETerms)

    # Find difference
    diff = angular_separation(c2.ra.radian, c2.dec.radian,
                              np.radians(t['ra_fk4ne']), np.radians(t['dec_fk4ne']))

    assert np.all(np.degrees(diff) * 3600. < TOLERANCE)

    # FK5 to FK4
    c1 = FK4NoETerms(t['ra_in'], t['dec_in'],
                     unit=(u.degree, u.degree),
                     obstime=Time(t['obstime'], scale='utc'))
    c2 = c1.transform_to(FK4)

    # Find difference
    diff = angular_separation(c2.ra.radian, c2.dec.radian,
                              np.radians(t['ra_fk4']), np.radians(t['dec_fk4']))

    assert np.all(np.degrees(diff) * 3600. < TOLERANCE)
Example #2
0
 def deformation_theta(theta = -math.pi/2., plot = False):
     """Return lists of deformation os SMA actuator per theta"""
     
     theta_list = np.linspace(-theta, theta)
     eps_s_list = []
     eps_l_list = []
     
     for theta in theta_list:
         s.theta = theta
         l.theta = theta
         
         s.update()
         l.update()
         
         l.calculate_force()
         
         eps_s_list.append(s.eps)
         eps_l_list.append(l.eps)
     if plot:
         import matplotlib.pyplot as plt
         plt.figure()
         plt.plot(np.degrees(theta_list), eps_s_list, 'r', np.degrees(theta_list), eps_l_list, 'b')  
         plt.xlabel('$\\theta (degrees)$')
         plt.ylabel('$\epsilon$')
 
     return eps_s_list, theta_list
Example #3
0
def j2000tob1950(ra, dec):
    """
    Convert J2000 to B1950 coordinates.

    This routine was derived by taking the inverse of the b1950toj2000 routine
    """

    # Convert to radians
    ra = np.radians(ra)
    dec = np.radians(dec)

    # Convert RA, Dec to rectangular coordinates
    x = np.cos(ra) * np.cos(dec)
    y = np.sin(ra) * np.cos(dec)
    z = np.sin(dec)

    # Apply the precession matrix
    x2 = P2[0, 0] * x + P2[1, 0] * y + P2[2, 0] * z
    y2 = P2[0, 1] * x + P2[1, 1] * y + P2[2, 1] * z
    z2 = P2[0, 2] * x + P2[1, 2] * y + P2[2, 2] * z

    # Convert the new rectangular coordinates back to RA, Dec
    ra = np.arctan2(y2, x2)
    dec = np.arcsin(z2)

    # Convert to degrees
    ra = np.degrees(ra)
    dec = np.degrees(dec)

    # Make sure ra is between 0. and 360.
    ra = np.mod(ra, 360.0)
    dec = np.mod(dec + 90.0, 180.0) - 90.0

    return ra, dec
Example #4
0
 def get_center(self):
     """
     Return the RA, Dec of the center of the circle bounding
     this trixel (RA, Dec both in degrees)
     """
     ra, dec = sphericalFromCartesian(self.bounding_circle[0])
     return np.degrees(ra), np.degrees(dec)
Example #5
0
def b1950toj2000(ra, dec):
    """
    Convert B1950 to J2000 coordinates.

    This routine is based on the technique described at
    http://www.stargazing.net/kepler/b1950.html
    """

    # Convert to radians
    ra = np.radians(ra)
    dec = np.radians(dec)

    # Convert RA, Dec to rectangular coordinates
    x = np.cos(ra) * np.cos(dec)
    y = np.sin(ra) * np.cos(dec)
    z = np.sin(dec)

    # Apply the precession matrix
    x2 = P1[0, 0] * x + P1[1, 0] * y + P1[2, 0] * z
    y2 = P1[0, 1] * x + P1[1, 1] * y + P1[2, 1] * z
    z2 = P1[0, 2] * x + P1[1, 2] * y + P1[2, 2] * z

    # Convert the new rectangular coordinates back to RA, Dec
    ra = np.arctan2(y2, x2)
    dec = np.arcsin(z2)

    # Convert to degrees
    ra = np.degrees(ra)
    dec = np.degrees(dec)

    # Make sure ra is between 0. and 360.
    ra = np.mod(ra, 360.0)
    dec = np.mod(dec + 90.0, 180.0) - 90.0

    return ra, dec
Example #6
0
 def lambet(self):
     """Ecliptic longitude and latitude."""
     from astropy.coordinates import Angle
     lam = np.arctan2(self._rot.T[1], self._rot.T[0])
     bet = np.arctan2(self._rot.T[2],
                      np.sqrt(self._rot.T[0]**2 + self._rot.T[1]**2))
     return Angle(np.degrees(lam) * u.deg), Angle(np.degrees(bet) * u.deg)
Example #7
0
def star(a,b,c,alpha,beta,gamma):
    "Calculate unit cell volume, reciprocal cell volume, reciprocal lattice parameters"
    alpha=np.radians(alpha)
    beta=np.radians(beta)
    gamma=np.radians(gamma)
    V=2*a*b*c*\
        np.sqrt(np.sin((alpha+beta+gamma)/2)*\
               np.sin((-alpha+beta+gamma)/2)*\
               np.sin((alpha-beta+gamma)/2)*\
               np.sin((alpha+beta-gamma)/2))
    Vstar=(2*np.pi)**3/V;
    astar=2*np.pi*b*c*np.sin(alpha)/V
    bstar=2*np.pi*a*c*np.sin(beta)/V
    cstar=2*np.pi*b*a*np.sin(gamma)/V
    alphastar=np.arccos((np.cos(beta)*np.cos(gamma)-\
                        np.cos(alpha))/ \
                       (np.sin(beta)*np.sin(gamma)))
    betastar= np.arccos((np.cos(alpha)*np.cos(gamma)-\
                        np.cos(beta))/ \
                       (np.sin(alpha)*np.sin(gamma)))
    gammastar=np.arccos((np.cos(alpha)*np.cos(beta)-\
                        np.cos(gamma))/ \
                       (np.sin(alpha)*np.sin(beta)))
    V=V
    alphastar=np.degrees(alphastar)
    betastar=np.degrees(betastar)
    gammastar=np.degrees(gammastar)
    return astar,bstar,cstar,alphastar,betastar,gammastar
Example #8
0
def azalt_to_lb(az, alt, lat, lon, unixtime=None):
    """
    Converts az/alt coordiantes to galactic coordinates. All inputs are
    expected to be in degrees.
    """
    # Get the of-date ra/dec
    ra, dec = azalt_to_radec(az, alt, lat, lon, unixtime)

    # Convert of-date ra/dec to J2000
    ra, dec = get_radec_j2000(ra, dec, unixtime)

    # Convert degrees to radians
    ra  = _np.radians(ra)
    dec = _np.radians(dec)

    # Location on unit sphere
    theta = _np.pi / 2 - dec
    phi   = ra
    x = _np.sin(theta) * _np.cos(phi)
    y = _np.sin(theta) * _np.sin(phi)
    z = _np.cos(theta)
    cartesian = _np.array([x, y, z])

    # Perform the final matrix multilication to get l/b
    lb_cart = _np.dot(matrix_radec_lb(), cartesian)
    x, y, z = lb_cart
    r = _np.sqrt(x**2 + y**2 + z**2) # should be 1
    theta = _np.arccos(z / r)
    phi = _np.arctan2(y, x)
    gal_l = _np.degrees(phi)
    gal_b = _np.degrees(_np.pi / 2 - theta)
    if gal_l < 0:
        gal_l += 360.0

    return (gal_l, gal_b)
Example #9
0
 def _lambet(self):
     """Lower overhead ecliptic longitude and latitude. [deg]"""
     from astropy.coordinates import Angle
     lam = np.arctan2(self._rot.T[1], self._rot.T[0])
     bet = np.arctan2(self._rot.T[2],
                      np.sqrt(self._rot.T[0]**2 + self._rot.T[1]**2))
     return np.degrees(lam), np.degrees(bet)
    def __init__(self, raCol='fieldRA', decCol='fieldDec', mjdCol='expMJD', latRad=None,
                 lonRad=None, height=None, tempCentigrade=None, lapseRate=None,
                 humidity=None, pressure=None):

        self.raCol = raCol
        self.decCol = decCol
        self.mjdCol = mjdCol

        if latRad is None:
            latDeg = None
        else:
            latDeg = np.degrees(latRad)

        if lonRad is None:
            lonDeg = None
        else:
            lonDeg = np.degrees(lonRad)

        self.site = Site(longitude=lonDeg, latitude=latDeg,
                         temperature=tempCentigrade,
                         height=height, humidity=humidity,
                         pressure=pressure, lapseRate=lapseRate,
                         name='LSST')

        self.units = ['radians']
        self.colsAdded = ['PA']
        self.colsReq = [self.raCol, self.decCol, self.mjdCol]
Example #11
0
def lb_to_azalt(gal_l, gal_b, lat, lon, unixtime=None):
    """
    Converts galactic coordiantes to az/alt coordinates. The input
    angles are expected to be in degrees.
    """
    # Convert degrees to radians
    gal_l = _np.radians(gal_l)
    gal_b = _np.radians(gal_b)

    # Location on unit sphere
    theta = _np.pi / 2 - gal_b
    phi   = gal_l
    x = _np.sin(theta) * _np.cos(phi)
    y = _np.sin(theta) * _np.sin(phi)
    z = _np.cos(theta)
    cartesian = _np.array([x, y, z])

    # Get the of-date ra/dec to convert to az/alt
    radec_cart = _np.dot(matrix_lb_radec(), cartesian)
    x, y, z = radec_cart
    r = _np.sqrt(x**2 + y**2 + z**2) # should be 1
    theta = _np.arccos(z / r)
    phi = _np.arctan2(y, x)
    ra = _np.degrees(phi)
    dec = _np.degrees(_np.pi / 2 - theta)
    if ra < 0:
        ra += 360.0
    ra, dec = get_radec_ofdate(ra, dec, unixtime)

    # Convert ra/dec to az/alt (in degrees)
    return radec_to_azalt(ra, dec, lat, lon, unixtime)
Example #12
0
def great_circle(**kwargs):
    """
        Named arguments:
        distance  = distance to travel, or numpy array of distances
        azimuth   = angle, in DEGREES of HEADING from NORTH, or numpy array of azimuths
        latitude  = latitude, in DECIMAL DEGREES, or numpy array of latitudes
        longitude = longitude, in DECIMAL DEGREES, or numpy array of longitudes
        rmajor    = radius of earth's major axis. default=6378137.0 (WGS84)
        rminor    = radius of earth's minor axis. default=6356752.3142 (WGS84)

        Returns a dictionary with:
        'latitude' in decimal degrees
        'longitude' in decimal degrees
        'reverse_azimuth' in decimal degrees

    """

    distance  = kwargs.pop('distance')
    azimuth   = np.radians(kwargs.pop('azimuth'))
    latitude  = np.radians(kwargs.pop('latitude'))
    longitude = np.radians(kwargs.pop('longitude'))
    rmajor    = kwargs.pop('rmajor', 6378137.0)
    rminor    = kwargs.pop('rminor', 6356752.3142)
    f         = (rmajor - rminor) / rmajor

    vector_pt = np.vectorize(vinc_pt)
    lat_result, lon_result, angle_result = vector_pt(f, rmajor,
                                                     latitude,
                                                     longitude,
                                                     azimuth,
                                                     distance)
    return {'latitude': np.degrees(lat_result),
            'longitude': np.degrees(lon_result),
            'reverse_azimuth': np.degrees(angle_result)}
Example #13
0
def waypoints_to_commands(coords):
	#cmd = [[vx,az,time],etc]
	#Convert waypoints to value in stage
	lin_vel = 0.2
	ang_vel = math.radians(45)    #45 deg/s in rad/s
	init_ang = 0;
	move_ang = [0]
	move_dist = [0]
	for i in range(len(coords)-1):
		p1 = coords[i]
		p2 = coords[i+1]
		move_ang.append(math.atan2(p2[1]-p1[1],p2[0]-p1[0]))
		move_dist.append(math.sqrt((p2[1]-p1[1])**2+(p2[0]-p1[0])**2))

	print np.degrees(move_ang)
	print len(move_dist)
	move_cmd = []

	for i in range(len(move_ang)-1):
		ang_cmd = (move_ang[i+1]-move_ang[i])
		ang_time = ang_cmd/ang_vel
		dist_cmd =move_dist[i+1]-move_dist[i]
		dist_time = dist_cmd/lin_vel
		move_cmd.append([0,np.sign(ang_cmd),math.fabs(ang_time)])
		move_cmd.append([np.sign(dist_cmd),0,math.fabs(dist_time)])

	print move_cmd
	print len(move_cmd)
	return move_cmd
Example #14
0
 def proj(lons, lats, reverse=False):
     if not reverse:
         lambdas, phis = numpy.radians(lons), numpy.radians(lats)
         cos_phis = numpy.cos(phis)
         lambdas -= lambda0
         # calculate the sine of the distance between projection center
         # and each of the points to project
         sin_dist = numpy.sqrt(
             numpy.sin((phi0 - phis) / 2.0) ** 2.0
             + cos_phi0 * cos_phis * numpy.sin(lambdas / 2.0) ** 2.0
         )
         if (sin_dist > sin_pi_over_4).any():
             raise ValueError('some points are too far from the projection '
                              'center lon=%s lat=%s' %
                              (numpy.degrees(lambda0), numpy.degrees(phi0)))
         xx = numpy.cos(phis) * numpy.sin(lambdas)
         yy = cos_phi0 * numpy.sin(phis) \
              - sin_phi0 * cos_phis * numpy.cos(lambdas)
         return xx * EARTH_RADIUS, yy * EARTH_RADIUS
     else:
         # "reverse" mode, arguments are actually abscissae
         # and ordinates in 2d space
         xx, yy = lons / EARTH_RADIUS, lats / EARTH_RADIUS
         cos_c = numpy.sqrt(1 - (xx ** 2 + yy ** 2))
         phis = numpy.arcsin(cos_c * sin_phi0 + yy * cos_phi0)
         lambdas = numpy.arctan2(xx, cos_phi0 * cos_c - yy * sin_phi0)
         return numpy.degrees(lambda0 + lambdas), numpy.degrees(phis)
Example #15
0
    def test_degrees(self):

        # the following doesn't make much sense in terms of the name of the
        # routine, but we check it gives the correct result.
        q1 = np.rad2deg(60. * u.degree)
        assert_allclose(q1.value, 60.)
        assert q1.unit == u.degree

        q2 = np.degrees(60. * u.degree)
        assert_allclose(q2.value, 60.)
        assert q2.unit == u.degree

        q3 = np.rad2deg(np.pi * u.radian)
        assert_allclose(q3.value, 180.)
        assert q3.unit == u.degree

        q4 = np.degrees(np.pi * u.radian)
        assert_allclose(q4.value, 180.)
        assert q4.unit == u.degree

        with pytest.raises(TypeError):
            np.rad2deg(3. * u.m)

        with pytest.raises(TypeError):
            np.degrees(3. * u.m)
    def testObservedFromICRS(self):
        obs = ObservationMetaData(pointingRA=35.0, pointingDec=-45.0, mjd=43572.0)
        for pmRaList in [self.pm_raList, None]:
            for pmDecList in [self.pm_decList, None]:
                for pxList in [self.pxList, None]:
                    for vRadList in [self.v_radList, None]:
                        for includeRefraction in [True, False]:

                            raRad, decRad = utils._observedFromICRS(self.raList, self.decList,
                                                                    pm_ra=pmRaList, pm_dec=pmDecList,
                                                                    parallax=pxList, v_rad=vRadList,
                                                                    obs_metadata=obs, epoch=2000.0,
                                                                    includeRefraction=includeRefraction)

                            raDeg, decDeg = utils.observedFromICRS(np.degrees(self.raList),
                                                                   np.degrees(self.decList),
                                                                   pm_ra=utils.arcsecFromRadians(pmRaList),
                                                                   pm_dec=utils.arcsecFromRadians(pmDecList),
                                                                   parallax=utils.arcsecFromRadians(pxList),
                                                                   v_rad=vRadList,
                                                                   obs_metadata=obs, epoch=2000.0,
                                                                   includeRefraction=includeRefraction)

                            dRa = utils.arcsecFromRadians(raRad - np.radians(raDeg))
                            np.testing.assert_array_almost_equal(dRa, np.zeros(self.nStars), 9)

                            dDec = utils.arcsecFromRadians(decRad - np.radians(decDeg))
                            np.testing.assert_array_almost_equal(dDec, np.zeros(self.nStars), 9)
Example #17
0
  def pitch_roll(self, px, pz):
    """works out the pitch (rx) and roll (rz) to apply to an object
    on the surface of the map at this point

    * returns a tuple (pitch, roll) in degrees

    Arguments:
      *px*
        x location
      *pz*
        z location
    """
    px -= self.unif[0]
    pz -= self.unif[2]
    halfw = self.width/2.0
    halfd = self.depth/2.0
    dx = self.width/self.ix
    dz = self.depth/self.iy
    x0 = int(math.floor((halfw + px)/dx + 0.5))
    if x0 < 0: x0 = 0
    if x0 > self.ix-1: x0 = self.ix-1
    z0 = int(math.floor((halfd + pz)/dz + 0.5))
    if z0 < 0: z0 = 0
    if z0 > self.iy-1: z0 = self.iy-1
    normp = array(self.buf[0].normals[z0*self.ix + x0])
    # slight simplification to working out cross products as dirctn always 0,0,1
    #sidev = cross(normp, dirctn)
    sidev = array([normp[1], -normp[0], 0.0])
    sidev = sidev / sqrt(sidev.dot(sidev))
    #forwd = cross(sidev, normp)
    forwd = array([-normp[2]*normp[0], -normp[2]*normp[1],
                  normp[0]*normp[0] + normp[1]*normp[1]])
    forwd = forwd / sqrt(forwd.dot(forwd))
    return (degrees(arcsin(-forwd[1])), degrees(arctan2(sidev[1], normp[1])))
def place_label(x,y,label,indice=None,cotan=False,color='k'):
    """ Routine qui se débrouille pour mettre un label semi-transparent au 
    niveau de la courbe données par ses coordonnées x et y. Si on sait que le 
    label sera presque vertical avec possibilité de dépasser 90°, on peut 
    utiliser cotan=True pour corriger (considération purement esthétique). 
    'indice' correspond à la position dans les tableaux x et y où devra 
    s'afficher le label demandé. """
    print(x[0],y[0],label) # un peu de feedback pour savoir ce qu'on calcule
    N = len(x)//2          # Emplacement par défaut
    if indice: N=indice    # sauf si l'utilisateur impose la valeur
    xi,xf = plt.xlim()     # Les limites en x du graphe
    yi,yf = plt.ylim()     # Pareil en y
    Xsize = xf - xi        # La largeur
    # Pour la hauteur et la pente, cela dépend si les ordonnées sont en repère 
    # logarithmique ou non.
    if Plogscale:
        Ysize = np.log10(yf) - np.log10(yi)
        a = (np.log10(y[N+1])-np.log10(y[N-1]))/(x[N+1]-x[N-1]) * Xsize/Ysize
    else:
        Ysize = yf - yi
        a = (y[N+1]-y[N-1])/(x[N+1]-x[N-1]) * Xsize/Ysize
    bbox = plt.gca().get_window_extent() # Récupération de la taille de la figure
    a *= bbox.height / bbox.width        # Correction de la pente avec la taille 
    rot = np.degrees(np.arctan(a))       # Calcul de l'angle de rotation
    if cotan:                            # Si on dépasse la verticale
        rot = 90 - np.degrees(np.arctan(1/a))
    t = plt.text(x[N],y[N],label,        # On met le texte au bon endroit
    ha='center',va='center',color=color,rotation = str(rot)) # Avec la bonne rotation
    # On se débrouille pour que la "boîte" d'écriture soit semi-transparente
    #t.set_bbox(dict(facecolor='w',edgecolor='None',alpha=0.8))
    t.set_bbox(dict(boxstyle="round",facecolor='w',edgecolor='None',alpha=0.85))
    def testAppGeoFromICRS(self):
        mjd = 42350.0
        for pmRaList in [self.pm_raList, None]:
            for pmDecList in [self.pm_decList, None]:
                for pxList in [self.pxList, None]:
                    for vRadList in [self.v_radList, None]:
                        raRad, decRad = utils._appGeoFromICRS(self.raList, self.decList,
                                                              pmRaList, pmDecList,
                                                              pxList, vRadList,
                                                              mjd=ModifiedJulianDate(TAI=mjd))

                        raDeg, decDeg = utils.appGeoFromICRS(np.degrees(self.raList),
                                                             np.degrees(self.decList),
                                                             utils.arcsecFromRadians(pmRaList),
                                                             utils.arcsecFromRadians(pmDecList),
                                                             utils.arcsecFromRadians(pxList),
                                                             vRadList,
                                                             mjd=ModifiedJulianDate(TAI=mjd))

                        dRa = utils.arcsecFromRadians(
                            raRad - np.radians(raDeg))
                        np.testing.assert_array_almost_equal(
                            dRa, np.zeros(self.nStars), 9)

                        dDec = utils.arcsecFromRadians(
                            raRad - np.radians(raDeg))
                        np.testing.assert_array_almost_equal(
                            dDec, np.zeros(self.nStars), 9)
Example #20
0
def raDecFromVec(v):
    """
    Taken from
    http://www.math.montana.edu/frankw/ccp/multiworld/multipleIVP/spherical/learn.htm
    Search for "convert from Cartestion to spherical coordinates"

    Adapted because I'm dealing with declination which is defined
    with 90degrees at zenith
    """

    # Ensure v is a normal vector
    v /= np.linalg.norm(v)

    ra_deg = 0  # otherwise not in namespace0
    dec_rad = np.arcsin(v[2])
    s = np.hypot(v[0], v[1])
    if s == 0:
        ra_rad = 0
    else:
        ra_rad = np.arcsin(v[1] / s)
        ra_deg = np.degrees(ra_rad)
        if v[0] >= 0:
            if v[1] >= 0:
                pass
            else:
                ra_deg = 360 + ra_deg
        else:
            if v[1] > 0:
                ra_deg = 180 - ra_deg
            else:
                ra_deg = 180 - ra_deg

    raDec = ra_deg, np.degrees(dec_rad)
    return np.array(raDec)
Example #21
0
def makeAlignment(lX,lX_orient,lX_centerx,lX_centery,maxPixel,crop=2):
    imageSize = maxPixel * maxPixel
    
    #cropped_size = (maxPixel-2*crop)*(maxPixel-2*crop)
    names = lX.columns
    lX_new = np.zeros((lX.shape[0],lX.shape[1]),dtype=np.float32)
    t0 = time()
    for i,img in enumerate(lX.values):
	img = np.reshape(img, (maxPixel, maxPixel)).astype('float32')
	#crop border
	img[0:crop,:]=1.0
	img[-crop:,:]=1.0
	img[:,0:crop]=1.0
	img[:,-crop:]=1.0	
	img = 1.0 - img
	
	#print "Orientation:",lX_orient[i], " Degree:",np.degrees(lX_orient[i])
	#showImage(img,maxPixel,fac=10,matrix=True)
	#
	#
	angle = np.degrees(lX_orient[i])
	#angle = 0.0
	M1 = cv2.getRotationMatrix2D((maxPixel/2,maxPixel/2),np.degrees(angle),1)
	img_new = cv2.warpAffine(img,M1,(maxPixel,maxPixel))
	img_new = 1.0 - img_new
	#img_new = rotate(img, angle, reshape=False)
	#img_new = 255 - img_new
	#showImage(img_new,maxPixel,fac=10,matrix=True)
	lX_new[i]=img_new.ravel().astype('float32')

    print("Alignment done in %0.3fs" % (time() - t0))
    lX_new = pd.DataFrame(lX_new,columns = names,dtype=np.float32)
    print lX_new.shape
    print lX_new.describe()
    return lX_new
    def setUp(self):
        self.metadata={}

        #below are metadata values that need to be set in order for
        #get_getFocalPlaneCoordinates to work.  If we had been querying the database,
        #these would be set to meaningful values.  Because we are generating
        #an artificial set of inputs that must comport to the baseline SLALIB
        #inputs, these are set arbitrarily by hand
        self.metadata['pointingRA'] = (numpy.radians(200.0), float)
        self.metadata['pointingDec'] = (numpy.radians(-30.0), float)
        self.metadata['Opsim_rotskypos'] = (1.0, float)

        # these were the LSST site parameters as coded when this unit test was written
        self.test_site=Site(longitude=numpy.degrees(-1.2320792),
                            latitude=numpy.degrees(-0.517781017),
                            height=2650.0,
                            temperature=11.505,
                            pressure=749.3,
                            lapseRate=0.0065,
                            humidity=0.4)

        self.obs_metadata=ObservationMetaData(mjd=50984.371741,
                                     boundType='circle',
                                     boundLength=0.05,
                                     phoSimMetaData=self.metadata,
                                     site=self.test_site)

        self.tol=1.0e-5
Example #23
0
def sendSearchRequest(matches):
    ra=list(np.degrees(matches[matches['isinobs']==False]['can_ra']))
    dec=list(np.degrees(matches[matches['isinobs']==False]['can_dec']))
    bands='[g,r,i,z]'
    req='http://desdev3.cosmology.illinois.edu:8000/api?username=lzullo&password=lzu70chips&ra=%s&dec=%s&bands=%s' % (ra,dec,bands)
    submit = requests.get(req)
    return submit.json()['job']
Example #24
0
def GetHealPixRectangles(nside, dbrange, nest):
    hpindex = np.arange(hp.nside2npix(nside))

    vec_corners = hp.boundaries(nside, hpindex, nest=nest)
    vec_corners = np.transpose(vec_corners, (0,2,1))
    vec_corners = np.reshape(vec_corners, (vec_corners.shape[0]*vec_corners.shape[1], vec_corners.shape[2]))
   
    theta_corners, phi_corners = hp.vec2ang(vec_corners)
    theta_corners = np.reshape(theta_corners, (theta_corners.shape[0]/4, 4))
    phi_corners = np.reshape(phi_corners, (phi_corners.shape[0]/4, 4))

    ra_corners = np.degrees(phi_corners)
    dec_corners = 90.0 - np.degrees(theta_corners)

    rainside = ( (ra_corners > dbrange[0]) & (ra_corners < dbrange[1]) )
    rakeep = np.sum(rainside, axis=-1)
    decinside = ( (dec_corners > dbrange[2]) & (dec_corners < dbrange[3]) )
    deckeep = np.sum(decinside, axis=-1)
    keep = ( (rakeep > 0) & (deckeep > 0) )
    ra_corners, dec_corners, hpindex = Cut(ra_corners, dec_corners, hpindex, cut=keep)

    ramin = np.amin(ra_corners, axis=-1)
    ramax = np.amax(ra_corners, axis=-1)
    decmin = np.amin(dec_corners, axis=-1)
    decmax = np.amax(dec_corners, axis=-1)

    return ramin, ramax, decmin, decmax, hpindex
Example #25
0
def Jacobsen(h1, Xm_1, h2, Xm_2):
    alp = np.degrees(np.arccos(np.dot(h1, h2) /
                               (np.linalg.norm(h1) * np.linalg.norm(h2))))
    bet = np.degrees(np.arccos(np.dot(Xm_1, Xm_2) /
                               (np.linalg.norm(Xm_1) * np.linalg.norm(Xm_2))))
    if ((alp - bet)**2) > 1:
        print('check your indexing!')

    a = 3.567  # diamond lattice parameter
    # recip lattice par(note this is the mantid convention: no 2 pi)
    ast = (2 * np.pi) / a
    B = np.array([[ast, 0, 0], [0, ast, 0], [0, 0, ast]])
    Xm_g = np.cross(Xm_1, Xm_2)
    Xm = np.column_stack([Xm_1, Xm_2, Xm_g])

    # Vector Q1 is described in reciprocal space by its coordinate matrix h1
    Xa_1 = B.dot(h1)
    Xa_2 = B.dot(h2)
    Xa_g = np.cross(Xa_1, Xa_2)
    Xa = np.column_stack((Xa_1, Xa_2, Xa_g))

    R = Xa.dot(np.linalg.inv(Xm))
    U = np.linalg.inv(R)

    UB = U.dot(B)

    return UB
Example #26
0
    def run(self, stars, visits, **kwargs):
        # XXX-Double check extinction is close to the Opsim transparency
        extinc_mags = visits['transparency']
        if extinc_mags != 0.:
            # need to decide on how to get extinc_mags from Opsim
            # Maybe push some of these params up to be setable?
            SFtheta, SFsf = self.SF.CloudSf(500., 300., 5., extinc_mags, .55)
            # Call the Clouds
            self.cloud.makeCloudImage(SFtheta,SFsf,extinc_mags, fov=self.fov)
            # Interpolate clouds to correct position.  Nearest neighbor for speed?
            nim = self.cloud.cloudimage[0,:].size
            # calc position in cloud image of each star
            starx_interp = (np.degrees(stars['x']) + self.fov/2.)*3600./ self.cloud.pixscale
            stary_interp = (np.degrees(stars['y']) + self.fov/2.)*3600./ self.cloud.pixscale

            # Round off position and make it an int
            starx_interp = np.round(starx_interp).astype(int)
            stary_interp = np.round(stary_interp).astype(int)

            # Handle any stars that are out of the field for some reason
            starx_interp[np.where(starx_interp < 0)] = 0
            starx_interp[np.where(starx_interp > nim-1)] = nim-1
            stary_interp[np.where(stary_interp < 0)] = 0
            stary_interp[np.where(stary_interp > nim-1)] = nim-1

            dmag = self.cloud.cloudimage[starx_interp,stary_interp]
        else:
            dmag = np.zeros(stars.size)
        return dmag
Example #27
0
def cone(plunge, bearing, angle, segments=100):
    """
    Calculates the longitude and latitude of the small circle (i.e. a cone)
    centered at the given *plunge* and *bearing* with an apical angle of
    *angle*, all in degrees.

    Parameters
    ----------
    plunge : number or sequence of numbers
        The plunge of the center of the cone(s) in degrees. The plunge is
        measured in degrees downward from the end of the feature specified by
        the bearing.
    bearing : number or sequence of numbers
        The bearing (azimuth) of the center of the cone(s) in degrees.
    angle : number or sequence of numbers
        The apical angle (i.e. radius) of the cone(s) in degrees.
    segments : int, optional
        The number of vertices in the small circle.

    Returns
    -------
    lon, lat : arrays
        `num_measurements` x `num_segments` arrays of longitude and latitude in
        radians.
    """
    plunges, bearings, angles = np.atleast_1d(plunge, bearing, angle)
    lons, lats = [], []
    for plunge, bearing, angle in zip(plunges, bearings, angles):
        lat = (90 - angle) * np.ones(segments, dtype=float)
        lon = np.linspace(-180, 180, segments)
        lon, lat = _rotate(lon, lat, -plunge, axis='y')
        lon, lat = _rotate(np.degrees(lon), np.degrees(lat), bearing, axis='x')
        lons.append(lon)
        lats.append(lat)
    return np.vstack(lons), np.vstack(lats)
Example #28
0
 def draw(self):
     gL.glPushMatrix()
     if self.b:
         self.draw_rod(self.b)
         gL.glTranslatef(0, 0, self.b)
     gL.glRotatef(degrees(self.gamma), 0, 0, 1)
     if self.d:
         gL.glPushMatrix()
         gL.glRotatef(90, 0, 1, 0)
         self.draw_rod(self.d)
         gL.glPopMatrix()
         gL.glTranslatef(self.d, 0, 0)
     gL.glRotatef(degrees(self.alpha), 1, 0, 0)
     if self.r:
         self.draw_rod(self.r)
         gL.glTranslatef(0, 0, self.r)
     gL.glRotatef(degrees(self.theta), 0, 0, 1)
     if self.shift:
         gL.glPushMatrix()
         shift = self.shift * self.length
         self.draw_rod(shift)
         gL.glTranslatef(0, 0, shift)
         self.draw_joint()
         gL.glPopMatrix()
     else:
         self.draw_joint()
     for child in self.children:
         child.draw()
     gL.glPopMatrix()
Example #29
0
def plot_lm(d, snrs, l1s, m1s, outroot):
    """ Plot the lm coordinates (relative to phase center) for all candidates.
    """

    outname = os.path.join(d["workdir"], "plot_" + outroot + "_impeak.png")

    snrmin = 0.8 * min(d["sigma_image1"], d["sigma_image2"])
    fig4 = plt.Figure(figsize=(10, 10))
    ax4 = fig4.add_subplot(111)

    # plot positive
    good = n.where(snrs > 0)
    sizes = (snrs[good] - snrmin) ** 5  # set scaling to give nice visual sense of SNR
    xarr = 60 * n.degrees(l1s[good])
    yarr = 60 * n.degrees(m1s[good])
    ax4.scatter(xarr, yarr, s=sizes, facecolor="none", alpha=0.5, clip_on=False)
    # plot negative
    good = n.where(snrs < 0)
    sizes = (n.abs(snrs[good]) - snrmin) ** 5  # set scaling to give nice visual sense of SNR
    xarr = 60 * n.degrees(l1s[good])
    yarr = 60 * n.degrees(m1s[good])
    ax4.scatter(xarr, yarr, s=sizes, marker="x", edgecolors="k", alpha=0.5, clip_on=False)

    ax4.set_xlabel("Dec Offset (amin)")
    ax4.set_ylabel("RA Offset (amin)")
    fov = n.degrees(1.0 / d["uvres"]) * 60.0
    ax4.set_xlim(fov / 2, -fov / 2)
    ax4.set_ylim(-fov / 2, fov / 2)
    canvas4 = FigureCanvasAgg(fig4)
    canvas4.print_figure(outname)
def makeObservationMetaData():
    #create a cartoon ObservationMetaData object
    mjd = 52000.0
    alt = numpy.pi/2.0
    az = 0.0
    band = 'r'
    testSite = Site(latitude=numpy.degrees(0.5), longitude=numpy.degrees(1.1), height=3000,
                    temperature=260.0, pressure=725.0, lapseRate=0.005, humidity=0.4)
    obsTemp = ObservationMetaData(site=testSite, mjd=mjd)
    centerRA, centerDec = _raDecFromAltAz(alt, az, obsTemp)
    rotTel = _getRotTelPos(centerRA, centerDec, obsTemp, 0.0)

    obsDict = calcObsDefaults(centerRA, centerDec, alt, az, rotTel, mjd, band,
                 testSite.longitude_rad, testSite.latitude_rad)

    obsDict['Opsim_expmjd'] = mjd
    radius = 0.1
    phoSimMetaData = OrderedDict([
                      (k, (obsDict[k],numpy.dtype(type(obsDict[k])))) for k in obsDict])

    obs_metadata = ObservationMetaData(boundType='circle', boundLength=2.0*radius,
                                       phoSimMetaData=phoSimMetaData, site=testSite)



    return obs_metadata
Example #31
0
	print "Pixel scale = {0} [arcseconds]".format(options.scale)
	print "Pointing centre RA = {0} [deg]".format(np.round(PC_RA),3)
	print "Pointing centre DEC = {0} [deg]".format(np.round(PC_DEC),3)

	# Creating the Alt, Az, Zenith and radial orthographic vectors.
	Alt, Az, Zen = mwa_alt_az_za(options.obsid, RA, DEC, degrees=True)
	r = np.cos(np.radians(Alt))

	#################################################################################
	# Specifying circle parameters:
	#################################################################################
	
	theta_PA = np.radians(metadata['azimuth'])
	d = np.cos(np.radians(metadata['altitude']))
	
	print "Pointing centre azmiuth = {0} [deg]".format(np.degrees(theta_PA))
	print "Pointing centre altitude = {0} [deg]".format(np.round(metadata['altitude']),5)
	print "Max apparent int flux = {0} [Jy]".format(np.round(np.max(App_int_flux)),3)
	print "Total apparent flux = {0} [Jy]".format(np.round(np.sum(App_int_flux)),3)
		
	#################################################################################
	# Subsetting the pb.
	#################################################################################

	print "########################################################################"
	print "# Finding and subtracting the number of sources in the primary beam pb.#"
	print "########################################################################"

	centre,circ_ind,dRA,dDEC = lobe_subset(RA,DEC,App_int_flux,options.obsid,d,theta_PA,Az=Az,r=r,verb_cond=verbcond,plot_cond=plotcond)

	# Determining the pixel dimensions:
Example #32
0
def azel2radec(az_deg: float, el_deg: float,
               lat_deg: float, lon_deg: float,
               time: datetime) -> Tuple[float, float]:
    """
    convert azimuth, elevation to right ascension, declination

    Inputs

    az_deg
        Numpy ndarray of azimuth to point [degrees]

    el_deg
        Numpy ndarray of elevation to point [degrees]

    lat_deg
        scalar observer WGS84 latitude [degrees]

    lon_deg
        scalar observer WGS84 longitude [degrees]

    time
        time of observation

    Outputs

    ra_deg
        Numpy ndarray of right ascension values [degrees]

    dec_deg
        Numpy ndarray of declination values [degrees]

    from D.Vallado Fundamentals of Astrodynamics and Applications
    p.258-259
    """
    az = atleast_1d(az_deg)
    el = atleast_1d(el_deg)
    lat = atleast_1d(lat_deg)
    lon = atleast_1d(lon_deg)

    if az.shape != el.shape:
        raise ValueError('az and el must be same shape ndarray')
    if not(lat.size == 1 and lon.size == 1):
        raise ValueError('need one observer and one or more  (az,el).')
    if ((lat < -90) | (lat > 90)).any():
        raise ValueError('-90 <= lat <= 90')
    if ((lon < -180) | (lon > 360)).any():
        raise ValueError('-180 <= lat <= 360')

    az = radians(az)
    el = radians(el)
    lat = radians(lat)
    lon = radians(lon)
# %% Vallado "algorithm 28" p 268
    dec = arcsin(sin(el) * sin(lat) + cos(el) * cos(lat) * cos(az))

    lha = arctan2(-(sin(az) * cos(el)) / cos(dec),
                  (sin(el) - sin(lat) * sin(dec)) / (cos(dec) * cos(lat)))

    lst = datetime2sidereal(time, lon)  # lon, ra in RADIANS

    """ by definition right ascension [0, 360) degrees """
    return degrees(lst - lha) % 360, degrees(dec)
Example #33
0
    def listener_callback(self, msg):
        # create numpy array
        occdata = np.array(msg.data)
        # compute histogram to identify bins with -1, values between 0 and below 50,
        # and values between 50 and 100. The binned_statistic function will also
        # return the bin numbers so we can use that easily to create the image
        occ_counts, edges, binnum = stats.binned_statistic(occdata,
                                                           np.nan,
                                                           statistic='count',
                                                           bins=occ_bins)
        # get width and height of map
        iwidth = msg.info.width
        iheight = msg.info.height
        # calculate total number of bins
        total_bins = iwidth * iheight
        # log the info
        # self.get_logger().info('Unmapped: %i Unoccupied: %i Occupied: %i Total: %i' % (occ_counts[0], occ_counts[1], occ_counts[2], total_bins))

        # find transform to obtain base_link coordinates in the map frame
        # lookup_transform(target_frame, source_frame, time)
        try:
            trans = self.tfBuffer.lookup_transform('map', 'base_link',
                                                   rclpy.time.Time())
        except (LookupException, ConnectivityException,
                ExtrapolationException) as e:
            self.get_logger().info('No transformation found')
            return

        print(trans)
        cur_pos = trans.transform.translation
        cur_rot = trans.transform.rotation
        self.get_logger().info('Trans: %f, %f' % (cur_pos.x, cur_pos.y))
        # convert quaternion to Euler angles
        roll, pitch, yaw = euler_from_quaternion(cur_rot.x, cur_rot.y,
                                                 cur_rot.z, cur_rot.w)
        self.get_logger().info('Rot-Yaw: Rad: %f Deg: %f' %
                               (yaw, np.degrees(yaw)))

        # get map resolution
        map_res = msg.info.resolution
        # get map origin struct has fields of scripts, y, and z
        map_origin = msg.info.origin.position
        # get map grid positions for scripts, y position
        grid_x = round((cur_pos.x - map_origin.x) / map_res)
        grid_y = round(((cur_pos.y - map_origin.y) / map_res))
        self.get_logger().info('Grid Y: %i Grid X: %i' % (grid_y, grid_x))

        # binnum go from 1 to 3 so we can use uint8
        # convert into 2D array using column order
        odata = np.uint8(binnum.reshape(msg.info.height, msg.info.width))
        # set current robot location to 0
        odata[grid_y][grid_x] = 0
        # create image from 2D array using PIL
        img = Image.fromarray(odata)
        # find center of image
        i_centerx = iwidth / 2
        i_centery = iheight / 2
        # find how much to shift the image to move grid_x and grid_y to center of image
        shift_x = round(grid_x - i_centerx)
        shift_y = round(grid_y - i_centery)
        self.get_logger().info('Shift Y: %i Shift X: %i' % (shift_y, shift_x))

        # pad image to move robot position to the center
        # adapted from https://note.nkmk.me/en/python-pillow-add-margin-expand-canvas/
        left = 0
        right = 0
        top = 0
        bottom = 0
        if shift_x > 0:
            # pad right margin
            right = 2 * shift_x
        else:
            # pad left margin
            left = 2 * (-shift_x)

        if shift_y > 0:
            # pad bottom margin
            bottom = 2 * shift_y
        else:
            # pad top margin
            top = 2 * (-shift_y)

        # create new image
        new_width = iwidth + right + left
        new_height = iheight + top + bottom
        img_transformed = Image.new(img.mode, (new_width, new_height),
                                    map_bg_color)
        img_transformed.paste(img, (left, top))

        # rotate by 90 degrees so that the forward direction is at the top of the image
        rotated = img_transformed.rotate(np.degrees(yaw) - 90,
                                         expand=True,
                                         fillcolor=map_bg_color)

        # show the image using grayscale map
        # plt.imshow(img, cmap='gray', origin='lower')
        # plt.imshow(img_transformed, cmap='gray', origin='lower')
        plt.imshow(rotated, cmap='gray', origin='lower')
        plt.draw_all()
        # pause to make sure the plot gets created
        plt.pause(0.00000000001)
        plt.imshow(warpedimage,vmin=warpedimage.min(),vmax=warpedimage.max(),cmap='gray')
        plt.show()


if __name__ == "__main__":
    # Read image
    ref = cv2.imread('rot1.png',0)
    cmp = cv2.imread('rot2.png',0)
    # plt.imshow(ref,cmap="gray")
    ref = cv2.resize(ref,(360,360))
    cmp = cv2.resize(cmp,(360,360))

    # # reference parameter (you can change this)
    match = imregpoc(ref,cmp)
    # print(match.peak,match.param)
    print(np.degrees(match.getParam()[2]))
    print((match.getParam()[0], match.getParam()[1]))
    # match_para = imregpoc(ref,cmp,fitting='Parabola')
    # print(match_para.peak,match_para.param)
    # match_cog = imregpoc(ref,cmp,fitting='COG')
    # print(match_cog.peak,match_cog.param)

    match.showRotatePeak()
    match.showTranslationPeak()
    # match.stitching()
    # match_para.stitching()
    # match_cog.stitching()
    
    # center = np.array(ref.shape)/2
    # persp = match.poc2warp(center,[-5.40E+01,-2.00E+00,9.72E+01/180*math.pi,6.03E-01])
    # match.stitching(persp)
Example #35
0
def quadrant_check(RA,DEC,weights=None,gcd_cond=False):
	"""
	This function is designed to deal with fringe case sources, when the RA values are both less than
	pi/2 and greater than 3pi/2. This corresponds to sources in quadrants 4 and 1, where the angle
	wraps back around again. To properly determine the angular distances between sources, and the centre
	of mass (com) sources need to be shifted into two other neighbouring quadrants that don't wrap. This
	function flips the RA values then calculates the new com. It then shifts the com RA value back to
	it's appropriate quadrant. This function also can alternatively return the angular distance between
	each source and the com, if gcd_cond=True. This is useful for determining the size of the images
	required map the lobes.

	Args:

	RA : vector_like; Vector of Right Ascention values in radians.
	DEC : vector_like; Vector of Declination values in radians.
	weights : vector_like; (default=None) Vector of weights.
	gcd_cond : boolean; If True, the function determines the angular distance to every given point 
	relative to the centre of mass. It does this in the shifted frame since the distance from the com
	is only relative.
	"""

	if (np.any(RA > 0.0) and np.any(RA <= pi/2.0)) and (np.any(RA <= 2*pi) and np.any(RA >= (3.0/2.0)*pi)):

		# shifting the RA values into neighbouring unwrapped quadrants. 
		RA[RA > (3.0/2.0)*pi] = pi + (2*pi - RA[RA > (3.0/2.0)*pi])
		RA[RA < pi/2.0] = pi - RA[RA < pi/2.0]

		# Calculating the weighted or unweighted com.
		#RA_com,DEC_com = com(RA,DEC,weights)
		RA_com,DEC_com = com(RA,DEC)#,weights)

		if gcd_cond == True:

			# The angular separtation between the com and every source.
			sep_vec = gcd(np.degrees(RA_com),np.degrees(DEC_com),np.degrees(RA),np.degrees(DEC))

			return sep_vec
		else:
			pass
			
		# Shifting the com back to the original quadrant.
		if RA_com >= pi:
	
			RA_com = 2*pi - (RA_com - pi)
	
		elif RA_com < pi:
	
			RA_com = pi - RA_com

		return np.degrees(RA_com), np.degrees(DEC_com)
		#return RA_com, DEC_com

	else:

		# Calculating the weighted or unweighted com.
		RA_com,DEC_com = com(RA,DEC)#,weights)
		#RA_com,DEC_com = com(RA,DEC,weights)

		if gcd_cond == True:

			# The angular separation between the com and every source.
			sep_vec = gcd(np.degrees(RA_com),np.degrees(DEC_com),np.degrees(RA),np.degrees(DEC))

			return sep_vec

		else:

			return np.degrees(RA_com), np.degrees(DEC_com)
Example #36
0
def sidelobe_finder(r,Az,weights=None,plot_cond=False,verb_cond=False):
	"""
	This function is run after the pb has been subtracted from the dataset. This function takes an 
	input radius and azimuth vecotr for an orthographic poar projection. It then seperates the data
	into azimuthal slices that are 2pi/8 in width. It counts the number of sources in each slice,
	selecting the slice with the maximum number of sources. This will likely correspond to a grating
	lobe. It then finds the weighted average and position angle of the slice, which should be close 
	to the centre of the grating lobe. This function then outputs the weighted average radius and 
	position angle.

	Args:

	r : vector_like; Vector of radius values for the orthographic polar projection.
	Az : vector_like; Vector of azimuthal values for the orthographic polar projection.
	Weights : vector_like; (default = None) Vector of weights with the same dimensions as radius and 
	azimuth vectors. This vector is used to find the weighted average radius and position angle for 
	the azimuthal slice with the most number of sources.
	plot_cond : boolean; (default = False) Option to plot the orthographic projection in polar coordinates, 
	with the defined subsetting regions.
	verb_cond : boolean; (default = False) Option to give verbose output, this prints additional information 
	to the command line about the operations of the function. This is useful for diagnostics tests.
	"""
	phi_slice = np.linspace(0,2*pi,9)

	N_sources_per_slice = []

	for i in range(len(phi_slice)):

		if i==0:

			pass

		else:
			
			# Creating a temporary phi slice radius vector.
			r_tpm = r[np.logical_and(Az >= np.degrees(phi_slice[i-1]),Az <= np.degrees(phi_slice[i]))]
			
			# Determining the number of sources per slice.
			N_sources_per_slice.append(len(r_tpm))

			if verb_cond == True:
				print "Azimuthal slice: [{0},{1}] [deg]".format(np.round(np.degrees(phi_slice[i-1]),3),np.round(np.degrees(phi_slice[i]),3))
				print "Mean radius = {0} [sin(theta)]".format(np.round(np.mean(r_tpm),3))
				print "Number of sources in slice = {0}\n".format(len(r_tpm))

			if plot_cond == True and np.any(weights) != None:
			
				# Creating a phi slice apparent flux vector.
				App_flux_slice = weights[np.logical_and(Az >= np.degrees(phi_slice[i-1]),Az <= np.degrees(phi_slice[i]))]

				# Plotting the histogram of every slice.
				plt.hist(np.degrees(np.arcsin(r_tpm)),bins=25,edgecolor='k',label='radius',weights=App_flux_slice)
				plt.title(r"$\theta \in  [{0},{1}]  $".format(np.round(np.degrees(phi_slice[i-1]),3),np.round(np.degrees(phi_slice[i]),3)))
				plt.xlabel(r'$\rm{radius [\sin(\theta)]}$',fontsize=14)
				plt.show()
				plt.close()

			else:

				pass

	if np.max(N_sources_per_slice) <= 10:
		# Case when there is no cler sidelobe, return none.
		return None

	else:
		pass

	# Index for the slice with the max number of sources.
	Max_slice_ind = np.argmax(N_sources_per_slice)

	sub_set_ind = np.logical_and(Az >= np.degrees(phi_slice[Max_slice_ind]),Az <= np.degrees(phi_slice[Max_slice_ind+1]))

	# Subset of the orthographic radius values for sources in slice.
	r_max_slice = r[sub_set_ind]
	
	# Subset of azimuth values for sources in slice.
	Az_max_slice = Az[sub_set_ind]

	if np.any(weights) == None:

		# Determining the position angle of the maximum slice.
		PA_max_slice = np.radians(np.mean(Az_max_slice))
		# Determining the mean radius for the maximum slice.
		r_mean_max_slice = np.mean(r_max_slice)

	else:
		# weights will need to be subsetted too.
		weights = weights[sub_set_ind]

		# This option is for determining the weighted average.
		PA_max_slice = np.radians(np.sum(Az_max_slice*weights)/np.sum(weights))

		r_mean_max_slice = np.sum(r_max_slice*weights)/np.sum(weights)

	return r_mean_max_slice, PA_max_slice
Example #37
0
def lobe_subset(RA,DEC,Flux,OBSID,d,PA,R=0.1,Az=None,r=None,plot_cond=False,output_cond=False,verb_cond=False):
	"""
	This function takes an input RA and DEC vector, transforms them into their corresponding Alt and Az vectors for a given
	OBSID. It then uses the position angle of the pointing centre in azimuthal cooridinates, as well as the distance to the
	poitning centre from zenith to define the centre of the primary beam (pb). It then subsets the sources in the pb by 
	drawing a circle in polar coordinates using the functions 'draw_circle()' and 'circle_subset()'. It then iterates this
	process by increasing the input radius 'R' by i*0.01. It repeats this process until the same number of sources are found 
	in subsequent iterations. The function the determines the weighted centre of mass, using the integrated flux densities
	of sources, and writes the string of the weighted RA and DEC in 'hms', 'dms' format. This process can be repeated for
	the grating lobes, or sidelobes of the tile beam pattern, so long as the position angle 'PA', and distance to the 
	supposed centre 'd' is defined.

	Args:
	RA : vector_like [deg]; Vector of right ascension positions in degrees.
	DEC : vector_like [deg]; Vector of declination positions in degrees.
	Flux : Vector_like; Vector of the integrated apparent flux densities for the given sources, corresponds to the RA and 
	OBSID : scalar; GPS time of the obsid.
	DEC positions.
	d : scalar; Radial distance in the orthographic projection from zenith to the supposed centre of the lobe.
	PA : scalar; Position angle of the supposed centre of the lobe.
	R : scalar; Initial radius of arbitrary circle, (default = 0.1).
	Az : Vector_like; (default = None) Optional to include the azimuth vector for each source, computed from the OBSID
	r : Vector_like; (default = None) Optional to include the rorthographic radius vector for each source, computed from the OBSID
	plot_cond : boolean; (default = False) Option to plot the orthographic projection in polar coordinates, with the defined
	subsetting regions.
	output_cond : boolean; (default = False) Option to give addition output, this provides the subsetted RA, DEC, Az, r vectors.
	The non-verbose output gives only the circle indices.
	verb_cond : boolean; (default = False) Option to give verbose output, this prints additional information to the command
	line about the operations of the function. This is useful for diagnostics tests.
	"""
	
	if plot_cond == True:

		fig = plt.figure(figsize = (12.5,9.5), dpi=90)
	
	if verb_cond == True:
		
		print "Radius of search circle = {0}".format(R)
		print "Distance to circle centre = {0}".format(np.round(d,3))

	if np.any(Az) == None or np.any(r) == None:

		# Case when the altitude and Azimuth are not provided.
		Alt, Az, Zen = mwa_alt_az_za(OBSID, RA, DEC, degrees=True)
		r = np.cos(np.radians(Alt))
	else:
		pass

	# Circle growing condition.
	lobe_cond = True
	dR = (2.0/100.0)

	for i in range(100):

		if i == 0:

			# Getting the index of sources in the circle.
			circ_ind = circle_subset(R,d,PA,r,np.radians(Az))
			
			# The number of sources in the circle
			N_circ_sources = len(circ_ind)

		else:

			# Getting the index of sources in the circle.
			circ_ind = circle_subset(R + i*dR,d,PA,r,np.radians(Az))

			if len(circ_ind) != N_circ_sources:

				# Update the number of souces in the circle.
				N_circ_sources = len(circ_ind)

			elif len(circ_ind) == N_circ_sources:

				# Else if the number of sources doesn't change then subset out pb sources.
				r_nu = np.delete(r,circ_ind)
				Az_nu = np.delete(Az,circ_ind)

				# Specifying the primary beam RA and DEC subsets.
				# Can use these and the com function to determine the centre of the image, as well as the size.
				RA_sub = RA[circ_ind]
				DEC_sub = DEC[circ_ind]
				Flux_sub = Flux[circ_ind]

				# Defining the centre of mass RA and DEC.
				#RA_sub_wcent, DEC_sub_wcent = com(RA_sub,DEC_sub,weights=Flux_sub)
				RA_sub_wcent, DEC_sub_wcent = quadrant_check(np.radians(RA_sub),np.radians(DEC_sub),weights=Flux_sub,gcd_cond=False)
				print "RA min, RA max",np.min(RA_sub),np.max(RA_sub)
				print "RA com",RA_sub_wcent

				# Getting the hmsdms format of the pointing centre.
				Cent_hmsdms_string = SkyCoord(ra=RA_sub_wcent*u.degree, dec=DEC_sub_wcent*u.degree).to_string('hmsdms')

				# Condition to break the for loop.
				lobe_cond = False

		if plot_cond == True:
				
			ax1 = fig.add_subplot(111,projection="polar")
			pcm1 = ax1.scatter(np.radians(np.delete(Az,circ_ind)), np.delete(r,circ_ind), \
				c=np.log10(np.delete(Flux,circ_ind)), cmap='viridis')
			
			radius, phi_vec = draw_circle(R + i*dR,d,PA)
			
			ax1.plot(phi_vec,radius)
			ax1.set_rmin(0.0)
			ax1.set_rmax(1.2)
			ax1.set_theta_zero_location('N')
			fig.colorbar(pcm1, ax=ax1, label='Apparent flux')
			
			plt.show(block=False)
			plt.pause(0.5)
			plt.clf()
			#plt.close()

		if lobe_cond == False:

			# Exit the for loop.
			break

	# Returning the angular disance from the com to each point.
	dtheta = quadrant_check(np.radians(RA_sub),np.radians(DEC_sub),weights=Flux_sub,gcd_cond=True)

	# This is used to determine the scale of the images.
	dDEC = np.abs(np.max(DEC[circ_ind]) - np.min(DEC[circ_ind]))
	#dRA = np.abs(np.max(RA[circ_ind]) - np.min(RA[circ_ind]))
	dRA = np.degrees(np.arccos((np.cos(2*np.max(np.radians(dtheta)))/np.cos(np.radians(dDEC)))))

	if verb_cond == True:
		print "Final circle radius = {0}".format(R + i*(1.0/100.0))
		print "Number of sources in lobe = {0}".format(len(circ_ind))
		print "Max(RA-pb) = {0} [deg], Min(RA-pb) = {1} [deg]".format(np.round(np.float(np.max(RA[circ_ind])),3),np.round(np.float(np.min(RA[circ_ind])),3))
		print "Max(DEC-pb) = {0} [deg], Min(DEC-pb) = {1} [deg]".format(np.round(np.float(np.max(DEC[circ_ind])),3),np.round(np.float(np.min(DEC[circ_ind])),3))
		print "Weighted centre of mass {0}".format(Cent_hmsdms_string)

	if output_cond == True:
		
		# Verbose output condition.
		return Cent_hmsdms_string, RA_pb, DEC_pb, r_nu, Az_nu, dRA, dDEC

	else:
		
		# Non-verbose output.
		return Cent_hmsdms_string, circ_ind, dRA, dDEC
    def _extract_angle(self, dataset, cfg, src_meta):
        """Extract data for vector direction from dataset.
        Angles are expected to be counter-clockwise from east in degrees.
        Angles must be recomputed to match the output projection."""

        # Get metadata
        u_channel = src_meta['u_channel']
        v_channel = src_meta['v_channel']
        uv_offset = src_meta['uv_offset']
        uv_scale = src_meta['uv_scale']
        uv_nodatavalue = src_meta['uv_nodatavalue']
        angle_channel = src_meta['angle_channel']
        angle_offset = src_meta['angle_offset']
        angle_scale = src_meta['angle_scale']

        # Get projection settings
        x_origin, pixel_col_width, pixel_row_width, y_origin, \
            pixel_col_height, pixel_row_height = dataset.GetGeoTransform()

        # Get u/v and projected x/y
        u_band = dataset.GetRasterBand(u_channel)
        v_band = dataset.GetRasterBand(v_channel)
        u_array = u_band.ReadAsArray()
        v_array = v_band.ReadAsArray()
        output_shape = u_array.shape
        x_size, y_size = output_shape
        if uv_nodatavalue is not None:
            valid = numpy.where(u_array != uv_nodatavalue)
            u = u_array[valid]
            v = v_array[valid]
            x1 = valid[1] * pixel_col_width + valid[0] * pixel_row_width
            y1 = valid[1] * pixel_col_height + valid[0] * pixel_row_height
        else:
            u = u_array
            v = v_array
            col = numpy.arange(y_size)
            row = numpy.arange(x_size)
            x1 = numpy.tile(col * pixel_col_width, x_size) + \
                numpy.repeat(row * pixel_row_width, y_size)
            y1 = numpy.tile(col * pixel_col_height, x_size) + \
                numpy.repeat(row * pixel_row_height, y_size)

        u = u * uv_scale + uv_offset
        v = v * uv_scale + uv_offset
        x1 = x_origin + x1
        y1 = y_origin + y1

        # Compute angle from u/v
        real_angle = numpy.degrees(numpy.arctan2(v, u))
        del u, v, u_array, v_array

        # Reverse projection to get (lon,lat) for each (x,y)
        output_proj = get_proj4(cfg['output_proj'], src_meta)
        proj = pyproj.Proj(output_proj)

        # Get lon/lat for each x/y
        lons1, lats1 = proj(x1, y1, inverse=True)

        # Use an arbitrary distance (1km)
        dists = numpy.ndarray(shape=lons1.shape)
        dists.fill(1000.0)

        # pyproj.Geod.fwd expects bearings to be clockwise angles from north
        # (in degrees).
        fixed_angles = 90.0 - real_angle

        # Interpolate a destination from grid point and data direction
        geod = pyproj.Geod(ellps='WGS84')
        lons2, lats2, bearings2 = geod.fwd(lons1, lats1, fixed_angles, dists)
        del bearings2, lons1, lats1, real_angle, fixed_angles, dists

        # Warp destination to output projection
        x2, y2 = proj(lons2, lats2)
        del lons2, lats2

        # Fix issue when an interpolated point is not reprojected in the same
        # longitude range as it origin (applies to cylindric projections only)
        ## NEW metadata.py
        ## Before
        #if int(cfg['output_proj']) in (4326, 3857, 900913):
        ## Now
        if cfg['output_proj_type'] == 'cylindric':
        ## \NEW metadata.py
            extent = [float(x) for x in cfg['extent'].split(' ')]
            vport_bottom, vport_left, vport_top, vport_right = extent
            vport_x_extent = vport_right - vport_left
            x2 = numpy.mod(x2 - (x1 + vport_left), vport_x_extent) \
                + (x1 + vport_left)

        # Compute angle in output projection between [0, 360] degrees
        projected_angles = numpy.arctan2(y2 - y1, x2 - x1)
        ranged_angles = numpy.mod(360 + numpy.degrees(projected_angles), 360)
        del x1, y1, x2, y2

        # Rescale angle
        scaled_angles = (ranged_angles - angle_offset) / angle_scale
        scaled_angles = numpy.round(scaled_angles).astype('uint8')

        # Rebuild matrix from flattened data
        if uv_nodatavalue is not None:
            nodatavalue = src_meta['nodatavalues'][angle_channel - 1]
            angle = numpy.empty(output_shape, dtype='uint8')
            angle.fill(nodatavalue)
            angle[valid] = scaled_angles
            return angle
        else:
            scaled_angles.shape = output_shape
            return scaled_angles
Example #39
0
def crosscorrelate(results, low, high, rotor, debug=False):
    '''obtains the difference in phase between the accelerometer an ir signal

    The function has been tested and produces reproducable results.
    The result is not very accurate.
    With a sample frequency of 952 Hertz and a rotor frequency of
    100 Hertz, rouglhy 9.5 measurements are available per period so the outcome
    in degrees is not accurate
    Therefore the function get details was developped
    and this is used as alternative.
    In the code there is also the option to use a fake signal.
    Code was copied from;
    https://stackoverflow.com/questions/6157791/find-phase-difference-between-two-inharmonic-

    Keyword arguments:
    results -- dictionary resulting from the main.main function
    low -- low frequency cut in Hz, used to filter noise,
           should be lower than rotor frequency
    high -- high frequency cut in Hz, used to filter noise,
            should higher than rotor frequency
    rotor -- frequency at which the rotor is thought to rotate
    debug -- makes fake ac_meas and ir_meas signals, used to test code
    '''
    ac_meas = results['ac_meas']
    ir_meas = results['ir_meas']
    freq = rotor  # hertz, used to calculate phase shift
    N = len(ac_meas)
    T = 1/results['sample_freq']
    if debug:
        print("debug activated")
        phasediff = np.pi*debug
        # timeshift = round(debug/T)
        x = np.linspace(0.0, N*T, N)
        ir_meas = np.sin(freq*2*np.pi*x+phasediff)
        # ir_meas = np.roll(ir_meas, timeshift)
        ac_meas = np.sin(freq*2*np.pi*x)
    # band pass filter
    ac_meas = butter_bandpass_filter(ac_meas,
                                     low,
                                     high,
                                     results['sample_freq'],
                                     order=6)
    ir_meas = butter_bandpass_filter(ir_meas,
                                     low,
                                     high,
                                     results['sample_freq'],
                                     order=6)
    # mean centering, SDV scaling
    ac_meas = ac_meas-np.mean(ac_meas)
    ac_meas = ac_meas/np.std(ac_meas)
    ir_meas = ir_meas-np.mean(ir_meas)
    ir_meas = ir_meas/np.std(ir_meas)
    # calculate cross correlation of the two signal
    xcorr = correlate(ir_meas, ac_meas)
    # delta time array to match xcorrr
    t = np.linspace(0.0, N*T, N, endpoint=False)
    dt = np.linspace(-t[-1], t[-1], 2*N-1)
    # dt = np.arange(1-N, N)
    recovered_time_shift = dt[xcorr.argmax()]
    # force the phase shift to be in [-pi:pi]
    recovered_phase_shift = (
        2*np.pi*(((0.5 + recovered_time_shift/(1/freq) % 1.0) - 0.5)))
    print("Recovered time shift {}".format(recovered_time_shift))
    print("Recovered phase shift {} radian".format(recovered_phase_shift))
    print("Recovered phase shift {} degrees"
          .format(np.degrees(recovered_phase_shift)))
Example #40
0
def add_geo_to_arrivals(arrivals,
                        source_latitude_in_deg,
                        source_longitude_in_deg,
                        receiver_latitude_in_deg,
                        receiver_longitude_in_deg,
                        radius_of_planet_in_km,
                        flattening_of_planet,
                        resample=False,
                        sampleds=5.0):
    """
    Add geographical information to arrivals.

    :param arrivals: Set of taup arrivals
    :type: :class:`Arrivals`
    :param source_latitude_in_deg: Source location latitude in degrees
    :type source_latitude_in_deg: float
    :param source_longitude_in_deg: Source location longitude in degrees
    :type source_longitude_in_deg: float
    :param receiver_latitude_in_deg: Receiver location latitude in degrees
    :type receiver_latitude_in_deg: float
    :param receiver_longitude_in_deg: Receiver location longitude in degrees
    :type receiver_longitude_in_deg: float
    :param radius_of_planet_in_km: Radius of the planet in km
    :type radius_of_planet_in_km: float
    :param flattening_of_planet: Flattening of planet (0 for a sphere)
    :type receiver_longitude_in_deg: float
    :param resample: adds sample points to allow for easy cartesian
                     interpolation. This is especially useful for phases
                     like Pdiff.
    :type resample: boolean


    :return: List of ``Arrival`` objects, each of which has the time,
        corresponding phase name, ray parameter, takeoff angle, etc. as
        attributes.
    :rtype: :class:`Arrivals`
    """
    if geodetics.HAS_GEOGRAPHICLIB:
        if not geodetics.GEOGRAPHICLIB_VERSION_AT_LEAST_1_34:
            # geographiclib is not installed ...
            # and  obspy/geodetics does not help much
            msg = ("This functionality needs the Python module "
                   "'geographiclib' in version 1.34 or higher.")
            raise ImportError(msg)
        ellipsoid = Geodesic(a=radius_of_planet_in_km * 1000.0,
                             f=flattening_of_planet)
        g = ellipsoid.Inverse(source_latitude_in_deg, source_longitude_in_deg,
                              receiver_latitude_in_deg,
                              receiver_longitude_in_deg)
        azimuth = g['azi1']
        line = ellipsoid.Line(source_latitude_in_deg, source_longitude_in_deg,
                              azimuth)

        # We may need to update many arrival objects
        # and each could have pierce points and a
        # path
        for arrival in arrivals:
            # check if we go in minor or major arc direction
            distance = arrival.purist_distance % 360.
            if distance > 180.:
                sign = -1
                az_arr = (azimuth + 180.) % 360.
            else:
                sign = 1
                az_arr = azimuth
            arrival.azimuth = az_arr

            if arrival.pierce is not None:
                geo_pierce = np.empty(arrival.pierce.shape, dtype=TimeDistGeo)

                for i, pierce_point in enumerate(arrival.pierce):
                    signed_dist = np.degrees(sign * pierce_point['dist'])
                    pos = line.ArcPosition(signed_dist)
                    geo_pierce[i] = (pierce_point['p'], pierce_point['time'],
                                     pierce_point['dist'],
                                     pierce_point['depth'], pos['lat2'],
                                     pos['lon2'])
                arrival.pierce = geo_pierce

            # choose whether we need to resample the trace
            if arrival.path is not None:
                if resample:
                    rplanet = radius_of_planet_in_km
                    # compute approximate distance between sampling points
                    # mindist = 200  # km
                    mindist = sampleds
                    radii = rplanet - arrival.path['depth']
                    rmean = np.sqrt(radii[1:] * radii[:-1])
                    diff_dists = rmean * np.diff(arrival.path['dist'])
                    npts_extra = np.floor(diff_dists / mindist).astype(np.int)

                    # count number of extra points and initialize array
                    npts_old = len(arrival.path)
                    npts_new = int(npts_old + np.sum(npts_extra))
                    geo_path = np.empty(npts_new, dtype=TimeDistGeo)

                    # now loop through path, adding extra points
                    i_new = 0
                    for i_old, path_point in enumerate(arrival.path):
                        # first add the original point at the new index
                        dist = np.degrees(sign * path_point['dist'])
                        pos = line.ArcPosition(dist)
                        geo_path[i_new] = (path_point['p'], path_point['time'],
                                           path_point['dist'],
                                           path_point['depth'], pos['lat2'],
                                           pos['lon2'])
                        i_new += 1

                        if i_old > npts_old - 2:
                            continue

                        # now check if we need to add new points
                        npts_new = npts_extra[i_old]
                        if npts_new > 0:
                            # if yes, distribute them linearly between the old
                            # and the next point
                            next_point = arrival.path[i_old + 1]
                            dist_next = np.degrees(sign * next_point['dist'])
                            dists_new = np.linspace(dist, dist_next,
                                                    npts_new + 2)[1:-1]

                            # now get all interpolated parameters
                            xs = [dist, dist_next]
                            ys = [path_point['p'], next_point['p']]
                            p_interp = np.interp(dists_new, xs, ys)
                            ys = [path_point['time'], next_point['time']]
                            time_interp = np.interp(dists_new, xs, ys)
                            ys = [path_point['depth'], next_point['depth']]
                            depth_interp = np.interp(dists_new, xs, ys)
                            pos_interp = [
                                line.ArcPosition(dist_new)
                                for dist_new in dists_new
                            ]
                            lat_interp = [
                                point['lat2'] for point in pos_interp
                            ]
                            lon_interp = [
                                point['lon2'] for point in pos_interp
                            ]

                            # add them to geo_path
                            # dists_new --> np.radians(dists_new), modified by Hongjian Fang
                            for i_extra in range(npts_new):
                                geo_path[i_new] = (p_interp[i_extra],
                                                   time_interp[i_extra],
                                                   np.radians(
                                                       dists_new[i_extra]),
                                                   depth_interp[i_extra],
                                                   lat_interp[i_extra],
                                                   lon_interp[i_extra])
                                i_new += 1

                    arrival.path = geo_path
                else:
                    geo_path = np.empty(arrival.path.shape, dtype=TimeDistGeo)
                    for i, path_point in enumerate(arrival.path):
                        signed_dist = np.degrees(sign * path_point['dist'])
                        pos = line.ArcPosition(signed_dist)
                        geo_path[i] = (path_point['p'], path_point['time'],
                                       path_point['dist'], path_point['depth'],
                                       pos['lat2'], pos['lon2'])
                    arrival.path = geo_path
    else:
        # geographiclib is not installed ...
        # and  obspy/geodetics does not help much
        msg = "You need to install the Python module 'geographiclib' in " + \
              "order to add geographical information to arrivals."
        raise ImportError(msg)

    return arrivals
Example #41
0
 def mindist(a):
     d = map(a.difference, B.skydir.values)
     n = np.argmin(d)
     return (B.index[n], np.degrees(d[n]))
def get_thresholded_rotated(im_path):

    #read image
    img = cv2.imread(im_path)

    img = cv2.resize(img, (600, 800), interpolation=Image.BILINEAR)

    sat = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)[:, :, 1]
    val = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)[:, :, 2]
    sat = cv2.medianBlur(sat, 11)
    val = cv2.medianBlur(val, 11)

    #create threshold
    thresh_S = cv2.adaptiveThreshold(sat, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                     cv2.THRESH_BINARY, 401, 10)
    thresh_V = cv2.adaptiveThreshold(val, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                     cv2.THRESH_BINARY, 401, 10)

    #mean, std
    mean_S, stdev_S = cv2.meanStdDev(img, mask=255 - thresh_S)
    mean_S = mean_S.ravel().flatten()
    stdev_S = stdev_S.ravel()

    #chromacity
    chrom_S = chromacity_distortion(img, mean_S, stdev_S)
    chrom255_S = cv2.normalize(chrom_S,
                               chrom_S,
                               alpha=0,
                               beta=255,
                               norm_type=cv2.NORM_MINMAX).astype(
                                   np.uint8)[:, :, None]

    mean_V, stdev_V = cv2.meanStdDev(img, mask=255 - thresh_V)
    mean_V = mean_V.ravel().flatten()
    stdev_V = stdev_V.ravel()
    chrom_V = chromacity_distortion(img, mean_V, stdev_V)
    chrom255_V = cv2.normalize(chrom_V,
                               chrom_V,
                               alpha=0,
                               beta=255,
                               norm_type=cv2.NORM_MINMAX).astype(
                                   np.uint8)[:, :, None]

    #create different thresholds
    thresh2_S = cv2.adaptiveThreshold(chrom255_S, 255,
                                      cv2.ADAPTIVE_THRESH_MEAN_C,
                                      cv2.THRESH_BINARY, 401, 10)
    thresh2_V = cv2.adaptiveThreshold(chrom255_V, 255,
                                      cv2.ADAPTIVE_THRESH_MEAN_C,
                                      cv2.THRESH_BINARY, 401, 10)

    #thresholded image
    thresh = cv2.bitwise_and(thresh2_S, cv2.bitwise_not(thresh2_V))

    #find countours and keep max
    contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
    contours = contours[0] if len(contours) == 2 else contours[1]
    big_contour = max(contours, key=cv2.contourArea)

    # fit ellipse to leaf contours
    ellipse = cv2.fitEllipse(big_contour)
    (xc, yc), (d1, d2), angle = ellipse

    print('thresh shape: ', thresh.shape)
    #print(xc,yc,d1,d2,angle)

    rmajor = max(d1, d2) / 2

    rminor = min(d1, d2) / 2

    origi_angle = angle

    if angle > 90:
        angle = angle - 90
    else:
        angle = angle + 90

    #calc major axis line
    xtop = xc + math.cos(math.radians(angle)) * rmajor
    ytop = yc + math.sin(math.radians(angle)) * rmajor
    xbot = xc + math.cos(math.radians(angle + 180)) * rmajor
    ybot = yc + math.sin(math.radians(angle + 180)) * rmajor

    #calc minor axis line
    xtop_m = xc + math.cos(math.radians(origi_angle)) * rminor
    ytop_m = yc + math.sin(math.radians(origi_angle)) * rminor
    xbot_m = xc + math.cos(math.radians(origi_angle + 180)) * rminor
    ybot_m = yc + math.sin(math.radians(origi_angle + 180)) * rminor

    #determine which region is up and which is down
    if max(xtop, xbot) == xtop:
        x_tij = xtop
        y_tij = ytop

        x_b_tij = xbot
        y_b_tij = ybot
    else:
        x_tij = xbot
        y_tij = ybot

        x_b_tij = xtop
        y_b_tij = ytop

    if max(xtop_m, xbot_m) == xtop_m:
        x_tij_m = xtop_m
        y_tij_m = ytop_m

        x_b_tij_m = xbot_m
        y_b_tij_m = ybot_m
    else:
        x_tij_m = xbot_m
        y_tij_m = ybot_m

        x_b_tij_m = xtop_m
        y_b_tij_m = ytop_m

    print('-----')
    print(x_tij, y_tij)

    rect_size = 100
    """
    calculate regions of edges of major axis of ellipse
    this is done by creating a squared region of rect_size x rect_size, being the edge the center of the square
    """
    x_min_tij = int(0 if x_tij - rect_size < 0 else x_tij - rect_size)
    x_max_tij = int(thresh.shape[1] -
                    1 if x_tij + rect_size > thresh.shape[1] else x_tij +
                    rect_size)

    y_min_tij = int(0 if y_tij - rect_size < 0 else y_tij - rect_size)
    y_max_tij = int(thresh.shape[0] -
                    1 if y_tij + rect_size > thresh.shape[0] else y_tij +
                    rect_size)

    x_b_min_tij = int(0 if x_b_tij - rect_size < 0 else x_b_tij - rect_size)
    x_b_max_tij = int(thresh.shape[1] -
                      1 if x_b_tij + rect_size > thresh.shape[1] else x_b_tij +
                      rect_size)

    y_b_min_tij = int(0 if y_b_tij - rect_size < 0 else y_b_tij - rect_size)
    y_b_max_tij = int(thresh.shape[0] -
                      1 if y_b_tij + rect_size > thresh.shape[0] else y_b_tij +
                      rect_size)

    sum_red_region = np.sum(thresh[y_min_tij:y_max_tij, x_min_tij:x_max_tij])

    sum_yellow_region = np.sum(thresh[y_b_min_tij:y_b_max_tij,
                                      x_b_min_tij:x_b_max_tij])
    """
    calculate regions of edges of minor axis of ellipse
    this is done by creating a squared region of rect_size x rect_size, being the edge the center of the square
    """
    x_min_tij_m = int(0 if x_tij_m - rect_size < 0 else x_tij_m - rect_size)
    x_max_tij_m = int(thresh.shape[1] -
                      1 if x_tij_m + rect_size > thresh.shape[1] else x_tij_m +
                      rect_size)

    y_min_tij_m = int(0 if y_tij_m - rect_size < 0 else y_tij_m - rect_size)
    y_max_tij_m = int(thresh.shape[0] -
                      1 if y_tij_m + rect_size > thresh.shape[0] else y_tij_m +
                      rect_size)

    x_b_min_tij_m = int(0 if x_b_tij_m - rect_size < 0 else x_b_tij_m -
                        rect_size)
    x_b_max_tij_m = int(thresh.shape[1] - 1 if x_b_tij_m +
                        rect_size > thresh.shape[1] else x_b_tij_m + rect_size)

    y_b_min_tij_m = int(0 if y_b_tij_m - rect_size < 0 else y_b_tij_m -
                        rect_size)
    y_b_max_tij_m = int(thresh.shape[0] - 1 if y_b_tij_m +
                        rect_size > thresh.shape[0] else y_b_tij_m + rect_size)

    #value of the regions, the names of the variables are related to the color of the rectangles drawn at the end of the function
    sum_red_region_m = np.sum(thresh[y_min_tij_m:y_max_tij_m,
                                     x_min_tij_m:x_max_tij_m])

    sum_yellow_region_m = np.sum(thresh[y_b_min_tij_m:y_b_max_tij_m,
                                        x_b_min_tij_m:x_b_max_tij_m])

    #print(sum_red_region, sum_yellow_region, sum_red_region_m, sum_yellow_region_m)

    min_arg = np.argmin(
        np.array([
            sum_red_region, sum_yellow_region, sum_red_region_m,
            sum_yellow_region_m
        ]))

    print('min: ', min_arg)

    if min_arg == 1:  #sum_yellow_region < sum_red_region :

        left_quartile = x_b_tij < thresh.shape[0] / 2
        upper_quartile = y_b_tij < thresh.shape[1] / 2

        center_x = x_b_min_tij + ((x_b_max_tij - x_b_min_tij) / 2)
        center_y = y_b_min_tij + (y_b_max_tij - y_b_min_tij / 2)

        center_x = x_b_min_tij + np.argmax(
            thresh[y_b_min_tij:y_b_max_tij,
                   x_b_min_tij:x_b_max_tij].mean(axis=0))
        center_y = y_b_min_tij + np.argmax(
            thresh[y_b_min_tij:y_b_max_tij,
                   x_b_min_tij:x_b_max_tij].mean(axis=1))

    elif min_arg == 0:

        left_quartile = x_tij < thresh.shape[0] / 2
        upper_quartile = y_tij < thresh.shape[1] / 2

        center_x = x_min_tij + ((x_b_max_tij - x_b_min_tij) / 2)
        center_y = y_min_tij + ((y_b_max_tij - y_b_min_tij) / 2)

        center_x = x_min_tij + np.argmax(
            thresh[y_min_tij:y_max_tij, x_min_tij:x_max_tij].mean(axis=0))
        center_y = y_min_tij + np.argmax(
            thresh[y_min_tij:y_max_tij, x_min_tij:x_max_tij].mean(axis=1))

    elif min_arg == 3:

        left_quartile = x_b_tij_m < thresh.shape[0] / 2
        upper_quartile = y_b_tij_m < thresh.shape[1] / 2

        center_x = x_b_min_tij_m + ((x_b_max_tij_m - x_b_min_tij_m) / 2)
        center_y = y_b_min_tij_m + (y_b_max_tij_m - y_b_min_tij_m / 2)

        center_x = x_b_min_tij_m + np.argmax(
            thresh[y_b_min_tij_m:y_b_max_tij_m,
                   x_b_min_tij_m:x_b_max_tij_m].mean(axis=0))
        center_y = y_b_min_tij_m + np.argmax(
            thresh[y_b_min_tij_m:y_b_max_tij_m,
                   x_b_min_tij_m:x_b_max_tij_m].mean(axis=1))

    else:

        left_quartile = x_tij_m < thresh.shape[0] / 2
        upper_quartile = y_tij_m < thresh.shape[1] / 2

        center_x = x_min_tij_m + ((x_b_max_tij_m - x_b_min_tij_m) / 2)
        center_y = y_min_tij_m + ((y_b_max_tij_m - y_b_min_tij_m) / 2)

        center_x = x_min_tij_m + np.argmax(
            thresh[y_min_tij_m:y_max_tij_m,
                   x_min_tij_m:x_max_tij_m].mean(axis=0))
        center_y = y_min_tij_m + np.argmax(
            thresh[y_min_tij_m:y_max_tij_m,
                   x_min_tij_m:x_max_tij_m].mean(axis=1))

    # draw ellipse on copy of input
    result = img.copy()
    cv2.ellipse(result, ellipse, (0, 0, 255), 1)

    cv2.line(result, (int(xtop), int(ytop)), (int(xbot), int(ybot)),
             (255, 0, 0), 1)
    cv2.circle(result, (int(xc), int(yc)), 10, (255, 255, 255), -1)

    cv2.circle(result, (int(center_x), int(center_y)), 10, (255, 0, 255), 5)

    cv2.circle(result, (int(thresh.shape[1] / 2), int(thresh.shape[0] - 1)),
               10, (255, 0, 0), 5)

    cv2.rectangle(result, (x_min_tij, y_min_tij), (x_max_tij, y_max_tij),
                  (255, 0, 0), 3)
    cv2.rectangle(result, (x_b_min_tij, y_b_min_tij),
                  (x_b_max_tij, y_b_max_tij), (255, 255, 0), 3)

    cv2.rectangle(result, (x_min_tij_m, y_min_tij_m),
                  (x_max_tij_m, y_max_tij_m), (255, 0, 0), 3)
    cv2.rectangle(result, (x_b_min_tij_m, y_b_min_tij_m),
                  (x_b_max_tij_m, y_b_max_tij_m), (255, 255, 0), 3)

    plt.imshow(result)
    plt.figure()
    #rotate the image
    rot_img = Image.fromarray(thresh)

    #180
    bot_point_x = int(thresh.shape[1] / 2)
    bot_point_y = int(thresh.shape[0] - 1)

    #poi
    poi_x = int(center_x)
    poi_y = int(center_y)

    #image_center
    im_center_x = int(thresh.shape[1] / 2)
    im_center_y = int(thresh.shape[0] - 1) / 2

    #a - adalt, b - abaix, c - dreta
    #ba = a - b
    #bc = c - a(b en realitat)

    ba = np.array([im_center_x, im_center_y]) - np.array(
        [bot_point_x, bot_point_y])
    bc = np.array([poi_x, poi_y]) - np.array([im_center_x, im_center_y])

    #angle 3 punts
    cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
    cos_angle = np.arccos(cosine_angle)

    cos_angle = np.degrees(cos_angle)

    print('cos angle: ', cos_angle)

    print('print: ', abs(poi_x - bot_point_x))

    m = (int(thresh.shape[1] / 2) - int(center_x) / int(thresh.shape[0] - 1) -
         int(center_y))

    ttan = math.tan(m)

    theta = math.atan(ttan)

    print('theta: ', theta)

    result = Image.fromarray(result)

    result = result.rotate(cos_angle)

    plt.imshow(result)
    plt.figure()

    #rot_img = rot_img.rotate(origi_angle)

    rot_img = rot_img.rotate(cos_angle)

    return rot_img
Example #43
0
def analyse():
    """
    This function goes to the folder of the previously run simulation, and averages over
    all the runs (which, because of thermal noise, are different). It writes the mean
    squared separation and mean separation of each constant simulated in a file. It also
    creates a files with the maximum separation of every constant and at which time it occured
    """

    os.chdir(
        "Shear_Wi:{}-{}_chi:{}-{}_hydro:{}_steps:{}_ts:{}_ra{}_noise{}".format(
            constant[0], constant[-1], chi[0], chi[-1], hydro, steps,
            time_step, ar_ratio, noise))
    max_file = open(
        "AMax_File_con"
        ":{}-{}_numc:{}_h{}_s{}_ts{}_ra{}_n{}".format(constant[0],
                                                      constant[-1],
                                                      len(constant), hydro,
                                                      steps, time_step,
                                                      ar_ratio, noise), "w")
    max_file.write(
        "#Constant, Maxseparation over initial separation, time of max separation\n"
    )

    for v in constant:
        #Rfile = open(os.getcwd() + "/Results.out", "a")

        out = open(
            "MSS_{}_{}_{}_{}_{}_{}.out".format(v, hydro, steps, time_step,
                                               ar_ratio, noise), "w")
        nout = open(
            "MS_{}_{}_{}_{}_{}_{}.out".format(v, hydro, steps, time_step,
                                              ar_ratio, noise), "w")

        #The following way of reading files is because of a limitation
        #in the number of files a computer can have open at the same time
        thousands_of_runs = int(math.ceil(runs / 1000))
        ms_list = []
        mss_list = []
        # Reads every thousand runs of a simulation
        for k in range(thousands_of_runs):
            # Opens the first 1000 runs in a dictionary, then opens the next 1000 and so on.
            filedata = {
                i: open("Run{}_Wi{}_chi{}".format(i, v, chi[0]), "r")
                for i in xrange(k * 1000, min(runs, (k + 1) * 1000))
            }
            # Mean separation and Mean square separation lists that contain temporary files
            # with the respective values for every thousand runs. They are deleted afterwards
            ms_list.append(open("ms_{}th_thousand.tmp".format(k), "w"))
            mss_list.append(open("mss_{}th_thousand.tmp".format(k), "w"))

            # Adding squared separation and separation together
            # to average noise
            for lines in xrange(steps + 1):
                s = 0
                ssq = 0
                totangle1 = 0
                for file in filedata.values():
                    token = str.split(file.readline())
                    # This convenion will most likely change in the 3rd version of the program
                    t = float(token[0])
                    x = float(token[1])
                    y = float(token[2])
                    z = float(token[3])
                    rsepparation = x * x + y * y + z * z
                    angle1 = np.degrees(
                        np.arccos(
                            np.clip(
                                np.dot(np.array([x, y, z]), np.array(
                                    [1, 0, 0])) / math.sqrt(rsepparation),
                                -1.0, 1.0)))
                    totangle1 += angle1
                    s += rsepparation
                    ssq += math.sqrt(rsepparation)
                mss_list[k].write("{} {}\n".format(t, s / runs))
                ms_list[k].write("{} {} {}\n".format(t, (ssq / runs),
                                                     totangle1 / runs))
                update_progress(lines / (steps))
            for fruns in filedata.values():
                fruns.close()
            ms_list[k].close()
            mss_list[k].close()
            ms_list[k] = open("ms_{}th_thousand.tmp".format(k), "r")
            mss_list[k] = open("mss_{}th_thousand.tmp".format(k), "r")

        # This loop goes through the temporary file in ms_list and mss_list and finds the
        # largest sepparation. It also finds the mean separation and separation squared if
        # the number of runs was more than 1000. If its under 1000 runs then this loop will
        # slow down the computation by a bit.
        # ~~~~~~~~~ NOTE: If computation time is an issue then modify this ~~~~~~~~~~~~~~~~~~~~~~~~~~
        print "~~~~~~~Merging and finding Max value~~~~~~~~~"
        maxr = 0
        tmax = 0
        for j in xrange(steps + 1):
            mean_mss = 0
            mean_ms = 0
            mean_angle = 0

            for k in range(thousands_of_runs):
                mstoken = str.split(ms_list[k].readline())
                msstoken = str.split(mss_list[k].readline())
                t = float(mstoken[0])
                angle = float(mstoken[2])
                mean_angle += angle
                mssn = float(msstoken[1])
                msn = float(mstoken[1])
                mean_mss += mssn
                mean_ms += msn

            out.write("{} {}\n".format(t, mean_mss))
            nout.write("{} {} {}\n".format(t, mean_ms, mean_angle))
        #    if maxr <= mean_mss:
        #        maxr = mean_mss
        #        tmax = t
        # Max separation squared over initial separation squared is stored in a max file for
        # every constant
        # The loop deletes the unnecessary temporary files
        #max_file.write("{} {} {}\n".format(v, maxr / (init_separation ** 2), tmax))
        for k in range(thousands_of_runs):
            os.remove(mss_list[k].name)
            os.remove(ms_list[k].name)
        out.close()
        nout.close()
        #meansqsep = float(str.split(linecache.getline(out.name, steps + 1))[1])
        #meansep = float(str.split(linecache.getline(nout.name, steps + 1))[1])
        #print("Mean squared separation over {} runs: {} ".format(runs, meansqsep))
        #print("Root Mean squared separation over {} runs: {} ".format(runs, math.sqrt(meansqsep)))
        #print ("Mean separation over {} runs : {}".format(runs, meansep))
        # Appending the results at the end of the Results.out file
        # Rfile.write("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~R E S U L T S~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n")
        # Rfile.write(
        #     "Time-Step:{} Steps:{} runs:{} constant:{} Initial separation:{} hydro: {} time&date: {} \n".format(time_step,
        #                                                                                                         steps, runs,
        #                                                                                                         constant[j],
        #                                                                                                         init_separation,
        #                                                                                                         hydro,
        #                                                                                                         time.strftime(
        #                                                                                                             "%c")))
        # Rfile.write("Mean Squared separation {}\n".format(meansqsep))
        # Rfile.write("Root Mean squared separation {}\n".format(math.sqrt(meansqsep)))
        # Rfile.write("Mean separation {}\n".format(meansep))
        # Rfile.close()
        # Mean squared displacement. Each row has a colour of the rainbow.
        # if args.walk:
        #     os.chdir("Max_Separation_constants:{}-{}_numc:{}".format(constant[0],constant[-1],len(constant)))
        #     max_file = open("Max_Separation_constants:{}-{}_numc:{}".format(constant[0],constant[-1],len(constant)),"w")
        #     for n,j in enumerate(constant):
    os.chdir("..")
Example #44
0
    def setup_cat(self):
        ft = self.ft
        fieldnames = ft.dtype.fields.keys()
        id_check = map(lambda n: n.startswith('ID_Prob'), fieldnames)
        if sum(id_check) == 0:
            print 'warning: ID_Probability field not found'
            id_prob = [np.nan] * len(ft)
        else:
            id_name = fieldnames[range(len(fieldnames))[id_check][0]]
            print 'Using prob from field {}'.format(id_name)
            id_prob = ft[id - name]

        cat_skydirs = map(lambda x, y: SkyDir(float(x), float(y)), ft.RAJ2000,
                          ft.DEJ2000)

        glat = [s.b() for s in cat_skydirs]
        glon = [s.l() for s in cat_skydirs]
        truncnames = [n.replace(' ', '') for n in self.df.index]
        not_found = []

        def nickfix(n):
            # put blanks into index
            if n in truncnames:
                return self.df.index[truncnames.index(n)]
            not_found.append(n)
            return n

        nicknames = ft.NickName_3FGL if 'NickName_3FGL' in ft.dtype.fields else ft.NickName
        sourcenames = ft.Source_Name if 'Source_Name' in ft.dtype.fields else nicknames
        index = map(nickfix, nicknames)  #Source_Name
        if len(not_found) > 0:
            print '{} entries not found in {}, names starting with {}'.format(
                len(not_found), self.skymodel,
                list(set(map(lambda n: n[:4], not_found))))
        self.not_found = not_found
        self.gtlike_info['missing'] = len(not_found)
        flags = np.asarray(
            ft.Flags_3FGL,
            int) if 'Flags_3FGL' in ft.dtype.fields else [0] * len(ft)
        self.cat = pd.DataFrame(
            dict(
                namec=sourcenames,  #_3FGL_1, 
                #nickname=nicknames, #map(nickfix, nicknames),
                ra=ft.RAJ2000,
                dec=ft.DEJ2000,
                ts=ft.Test_Statistic,
                pindex=ft.PL_Index,
                unc_pindex=ft.Unc_PL_Index,
                beta=ft.LP_beta,
                pivot=ft.Pivot_Energy,
                flux=ft.Flux_Density,
                unc_flux=ft.Unc_Flux_Density,
                #skydir=cat_skydirs,
                glat=glat,
                glon=glon,
                #pivot=ft.Pivot_Energy, flux=ft.Flux_Density,
                #modelname=ft.SpectrumType,
                id_prob=id_prob,
                a95=ft.Conf_95_SemiMajor,
                b95=ft.Conf_95_SemiMinor,
                ang95=ft.Conf_95_PosAng,
                #ROI_num = ft.ROI_num,
                #ROI_dist = ft.ROI_dist,
            ),
            index=index,
        )
        self.cat.index.name = 'name'
        self.cat['eflux'] = self.cat.flux * self.cat['pivot']**2 * 1e6
        # set values for corresponding source model
        self.cat['pt_ts'] = self.df.ts
        self.cat['pt_ra'] = self.df.ra
        self.cat['pt_dec'] = self.df.dec
        self.cat['pt_beta'] = self.df.beta
        self.cat['pt_index'] = self.df.pindex
        self.cat['pt_index_unc'] = self.df.pindex_unc
        self.cat['pt_eflux'] = self.df.eflux
        try:
            self.cat['pt_eflux_unc'] = self.df.eflux_unc
        except:
            print 'No eflux_unc in input.'
            self.cat['pt_eflux_unc'] = np.nan
        self.cat['pt_pivot'] = self.df.pivot_energy
        self.cat['ispsr'] = map(lambda name: name.startswith('PSR'),
                                self.cat.index)
        self.cat['ismissing'] = [name in not_found for name in self.cat.index]

        extended_names = self.df.query('isextended==True').index
        self.cat['isextended'] = [
            name in extended_names for name in self.cat.index
        ]
        self.gtlike_info['pulsars'] = sum(self.cat.ispsr)
        self.gtlike_info['extended'] = sum(self.cat.isextended)

        print 'Found {} extended sources, {} pulsars'.format(
            sum(self.cat.isextended), sum(self.cat.ispsr))

        def find_close(A, B):
            """ helper function: make a DataFrame with A index containg
            columns of the
            name of the closest entry in B, and its distance
            A, B : DataFrame objects each with a skydir column
            """
            def mindist(a):
                d = map(a.difference, B.skydir.values)
                n = np.argmin(d)
                return (B.index[n], np.degrees(d[n]))

            return pd.DataFrame(map(mindist, A.skydir.values),
                                index=A.index,
                                columns=('otherid', 'distance'))

        if self.catname == '2FGL' or self.catname == '3FGL':
            print 'generating closest distance to catalog "%s"' % self.catname
            closedf = find_close(self.df, self.cat)
            self.df['closest'] = closedf['distance']
            self.df['close_name'] = closedf.otherid
            closedf.to_csv(
                os.path.join('plots', self.plotfolder,
                             'comparison_%s.csv' % self.catname))
            closest2 = np.degrees(
                np.array([
                    min(map(sdir.difference, self.df.skydir.values))
                    for sdir in cat_skydirs
                ]))
            self.cat['closest'] = closest2
        elif self.catname == 'psc8':
            self.cat['closest'] = 0
def plot_error(year):
    #The following currently devoted to making energy and angular error plots for a year of data.
    # init likelihood class
    if year == 40:
        mc = cache.load(filename_pickle + "sirin_IC40/mc.pickle")
        extra = cache.load(filename_pickle + "sirin_IC40/dpsi.pickle")
    if year == 59:
        mc = cache.load(filename_pickle + "sirin_IC59/mc.pickle")
        extra = cache.load(filename_pickle + "sirin_IC59/dpsi.pickle")
    if year == 79:
        mc = cache.load(filename_pickle + "sirin_IC79/mc.pickle")
        extra = cache.load(filename_pickle + "sirin_IC79/dpsi.pickle")
    elif year == 86:
        mc = cache.load(filename_pickle + "sirin_IC86I/mc.pickle")
        extra = cache.load(filename_pickle + "sirin_IC86I/dpsi.pickle")
    dpsi = extra['dpsi']

    # datatest
    #Currently don't need to remake these plots. but DONT DELETE

    colors = ['b', 'g', 'y', 'r']
    gamma = np.linspace(1., 2.7, 4)

    # fig_energy, (ax1, ax2) = plt.subplots(ncols=2)
    # ax1.hist([llh.exp["logE"]] + [mc["logE"] for i in gamma],
    #          weights=[np.ones(len(llh.exp))]
    #                   + [mc["ow"] * mc["trueE"]**(-g) for g in gamma],
    #          label=["Pseudo-Data"] + [r"$\gamma={0:.1f}$".format(g) for g in gamma], color= ['k'] + [colors[g] for g in range(len(gamma))],
    #          histtype="step", bins=100, log=True, normed=True, cumulative=-1)
    # ax1.legend(loc="best")
    # ax1.set_title("Reconstructed Energy - IC{}".format(str(year)))
    # ax1.set_xlabel("logE")
    # ax1.set_ylabel("Relative Abundance")
    # ax1.hist([llh.exp["logE"]] + [mc["logE"] for i in gamma],
    #          weights=[np.ones(len(llh.exp))]
    #                   + [mc["ow"] * mc["trueE"]**(-g) for g in gamma],
    #          label=["Data"] + [r"$\gamma={0:.1f}$".format(g) for g in gamma], color= ['k'] + [colors[g] for g in range(len(gamma))],
    #          histtype="step", bins=100, log=True, normed=True, cumulative=-1)
    # ax2.set_title("Zoomed In")
    # ax2.set_xlabel("logE")
    # ax2.set_xlim(4,10)
    # ax2.set_ylim(1e-5,1)
    # ax2.hist([llh.exp["logE"]] + [mc["logE"] for i in gamma],
    #          weights=[np.ones(len(llh.exp))]
    #                   + [mc["ow"] * mc["trueE"]**(-g) for g in gamma],
    #          label=["Pseudo-Data"] + [r"$\gamma={0:.1f}$".format(g) for g in gamma], color= ['k'] + [colors[g] for g in range(len(gamma))],
    #          histtype="step", bins=100, log=True, normed=True, cumulative=-1)
    # fig_energy.savefig(filename_plots + 'energy_hists_IC{}.pdf'.format(str(year)))

    fig_angular_error, (ax1, ax2) = plt.subplots(ncols=2, figsize=(10, 5))
    dec = np.arcsin(mc["sinDec"])
    angdist = np.degrees(dpsi)

    ax1.hist([np.log10(np.degrees(mc["sigma"])) for i in gamma],
             label=[r"$\sigma$ - $\gamma={0:.1f}$".format(g) for g in gamma],
             linestyle='dashed',
             weights=[mc["ow"] * mc["trueE"]**(-g) for g in gamma],
             color=[colors[g] for g in range(len(gamma))],
             histtype="step",
             bins=100,
             normed=True)

    ax1.hist(
        [np.log10(angdist) for i in gamma],
        label=[r"$\Delta \psi$ - $\gamma={0:.1f}$".format(g) for g in gamma],
        weights=[mc["ow"] * mc["trueE"]**(-g) for g in gamma],
        linestyle='solid',
        color=[colors[g] for g in range(len(gamma))],
        histtype="step",
        bins=100,
        normed=True)
    ax1.set_title("Reco MC Angular Error Check - IC{}".format(str(year)))
    ax1.set_xlabel(r"log$\sigma_{ang}$ (degrees)")
    ax1.set_ylabel("Relative Abundance")
    ax1.set_ylim(0, 1.5)

    ax2.hist([(np.degrees(mc["sigma"])) for i in gamma],
             label=[r"$\sigma$ - $\gamma={0:.1f}$".format(g) for g in gamma],
             linestyle='dashed',
             weights=[mc["ow"] * mc["trueE"]**(-g) for g in gamma],
             color=[colors[g] for g in range(len(gamma))],
             histtype="step",
             bins=1000,
             normed=True)

    ax2.hist(
        [(angdist) for i in gamma],
        label=[r"$\Delta \psi$ - $\gamma={0:.1f}$".format(g) for g in gamma],
        weights=[mc["ow"] * mc["trueE"]**(-g) for g in gamma],
        linestyle='solid',
        color=[colors[g] for g in range(len(gamma))],
        histtype="step",
        bins=1000,
        normed=True)
    ax2.legend(loc="upper right")
    ax2.set_xlim(0, 5)
    ax2.set_ylim(0, 3.5)
    ax2.set_xlabel(r"$\sigma_{ang}$ (degrees)")
    fig_angular_error.savefig(
        filename_plots +
        'angular_error_hists_sirin_IC{}.pdf'.format(str(year)))
def starPos(degFitX=2, sTitl=''):

    jdAll = np.array([])
    xShiftAll = np.array([])
    yShiftAll = np.array([])
    print('Reading in files...')
    
    files = glob.glob('aligned*.fits')
    #files = glob.glob('*.fits')            
#Data=np.genfromtxt

                    #fileNames = sorted(files)
                    #fileNames = fileNames[0:]

    for f in files:
                    #Data=np.genfromtxt(fname=f)
        thisHdr = fits.getheader(f)
        thisJD = thisHdr['JD']
        jdAll = np.hstack((jdAll, thisJD))
        thisX = thisHdr['X_SHIFT']
        thisY = thisHdr['Y_SHIFT']
        xShiftAll = np.hstack((thisX, xShiftAll))
        yShiftAll = np.hstack((thisY, yShiftAll))

        #xyShift = np.vstack((thisX, thisY))
    #print jdAll, xShiftAll, yShiftAll
    #print jdAll
#   update the time vector in-place to convert from days --> seconds
    #print jdAll
    jdAll = (jdAll - np.min(jdAll))*86400.

    # fit low-order polynomial to the shifts
    parsX = np.polyfit(jdAll, xShiftAll, deg=degFitX)
    parsY = np.polyfit(jdAll, yShiftAll, deg=degFitX)

    tFine = np.linspace(np.min(jdAll)-0.01, np.max(jdAll)+0.01, 1000, endpoint=True)

#----------------- FIGURE 1 AND 2 -------------------#
#Plot data
    figx = plt.figure(1)
    figx.clf()
    axx = figx.add_subplot(211)
    dum1 = axx.scatter(jdAll, xShiftAll, marker='^', s=9, c='b', \
                       label=r"xShift %s" % (sTitl))
    
    if np.abs(degFitX - 1) < 1e-3:
        axx.set_title(r'$\Delta x = (%.2e)t + (%.2e$)' % (parsX[0], parsX[1]))
    #axx.set_xlabel(r"$\Delta t$, seconds")
    
    # overplot the fit
    dum2 = axx.plot(tFine, np.polyval(parsX, tFine), 'k-')
    
    #plt.show(block=False)
    
#figy = plt.figure(2)
#figy.clf()
    axy = figx.add_subplot(212, sharex=axx)
    dum2=axy.scatter(jdAll, yShiftAll, marker='s', s=9, c='r', \
                label=r"yShift %s" % (sTitl))
    #plt.show(block=False)
    dum3 = axy.plot(tFine, np.polyval(parsY, tFine), 'k-')

    #axy.set_xlabel('JD - min(JD), seconds')
    sLabelT =r"$\Delta t$, seconds"
    axy.set_xlabel(sLabelT)
    #axy.set_xlabel(r"$\Delta t$, seconds")

#    for ax in [axx, axy]:
#        ax.grid(which='both')
#        leg=ax.legend()

    axx.set_ylabel('xShift (pixels)')
    axy.set_ylabel('yShift (pixels)')
    plt.suptitle('X-Shift and Y-Shift vs. Time')

#---------------- FIGURE 3 -----------------------#

    figxy = plt.figure(3)
    figxy.clf()
    axxy = figxy.add_subplot(111)
    dum = axxy.scatter(xShiftAll, yShiftAll, c=jdAll, marker='o', label='path %s' % (sTitl), \
        edgecolor='0.5')
    axxy.set_xlabel('xShift (pixels)')
    axxy.set_ylabel('yShift (pixels)')
    plt.title('X-Shift vs. Y-Shift')
    cbar = figxy.colorbar(dum)
    
    # let's get the residuals from the straight line fit in X and the fit in Y
    residX = xShiftAll - np.polyval(parsX, jdAll)
    residY = yShiftAll - np.polyval(parsY, jdAll)

#--------------- FIGURE 4 -----------------------#

    fig4 = plt.figure(4)
    fig4.clf()
    ax4 = fig4.add_subplot(211)
    dum4 = ax4.plot(jdAll, residX, color='b', ms=3, label='X Residual %s' %(sTitl), marker='^')
#ax4.set_xlabel(r"JD (seconds)")
    ax4.set_ylabel(r"$\Delta x$, pix")
    ax5 = fig4.add_subplot(212, sharex=ax4)
    dum5 = ax5.plot(jdAll, residY, color='r', ms=3, label='Y Residual', marker='s')
    #ax5.set_xlabel(r"JD(seconds)")
    ax5.set_xlabel(sLabelT)
    ax5.set_ylabel(r"$\Delta y$, pix")
    plt.suptitle('X and Y Residuals vs. Time')

#-------------- FIGURE 5 ------------------------#

    # Creating Lomb-Scargle Periodograms
    # imported from astropy.stats import LombScargle
    fig5 = plt.figure(5)
    fig5.clf()
    ax6 = fig5.add_subplot(212)
    # fig.suptitle() (could also use plt.suptitle()) displays a tilte above both subplots
    plt.suptitle('Lomb-Scargle Power Spectrum for X and Y Residuals')


# let's generate the frequencies we want
    pDesired = np.linspace(500., 1500., 1000, endpoint=True)
    freq = 1.0/pDesired
    power = LombScargle(jdAll, residY).power(freq)

# let's get the maximium value
    iMax = np.argmax(power)
    periodMax = pDesired[iMax]

    sPeakY = 'Peak period = %.2fs' % (periodMax)

#freq, power = LombScargle(jdAll,residY).autopower()
    ax6.plot(1.0/freq, power, 'ro', ls='-', ms=2, label=sPeakY)
#ax6.set_xlabel('Period (s)')
    ax6.set_xlim(500., 1500.)
    # leg6 = ax6.legend()

# Plot for X residuals
# sPeakX = 'Peak period = %.2f s' % (periodMax)
    powerX = LombScargle(jdAll, residX).power(freq)
    iMaxX = np.argmax(powerX)
    periodMaxX = pDesired[iMaxX]
    
    sPeakX = 'Peak period = %.2fs %s' % (periodMaxX, sTitl)

    ax7=fig5.add_subplot(211)
    ax7.plot(1.0/freq, powerX, 'bo', ls='-', ms=2, label=sPeakX)
    ax6.set_xlabel('Period (s)')
#leg7 = ax7.legend()
    avgPeriod = np.average([periodMax,periodMaxX])
    phase = jdAll/np.float(avgPeriod)
    print np.shape(phase)
    phase = phase - np.floor(phase)

# let's fit the residuals against each other
    parsXY = np.polyfit(residX, residY, 1)
# ------------------ FIGURE 6 --------------- #
    fig6 = plt.figure(6)
    fig6.clf()
    axdxdy = fig6.add_subplot(111)

# let's try ordering by phase
    iSort = np.argsort(phase)

# increase the plot symbol size with phase
    sPhs = 25.0 + 50.*phase**2
#sPhs = np.repeat(25., np.size(phase))

    ax8 = axdxdy.scatter(residX[iSort], residY[iSort], c=phase[iSort], \
                        marker='o', label='path %s' % (sTitl), edgecolor='0.5', s=sPhs, \
                            vmin=0., vmax=1., zorder=2)


#dumLine = axdxdy.plot(residX[iSort], residY[iSort], ls='-.', lw=1, color='0.8')
    axdxdy.set_xlabel('residX (pixels)')
    axdxdy.set_ylabel('residY (pixels)')
    cbar1 = fig6.colorbar(ax8)
#axdxdy.set_xlim(500., 1500.)

#a = np.array([residX])
#    b = np.array([residY])

    c = np.hstack((residX, residY))
    cMax = np.max(np.abs(c))

    # let's overplot the trend HERE
    xFine = np.linspace(-cMax, cMax, 1000)
    dumTrend = axdxdy.plot(xFine, np.polyval(parsXY, xFine), color='k', ls='--', zorder=1, \
                           label=r'Trend angle: %.2f$^{\circ}$' % (np.degrees(np.arctan(parsXY[0]))) )


    axdxdy.set_xlim(-cMax,cMax)
    axdxdy.set_ylim(-cMax,cMax)
    plt.title('Residuals in X vs. Residuals in Y')
#    axdxdy.set_xlim(-np.max(np.abs(c)), np.max(np.abs(c)))
#    axdxdy.set_ylim(-np.max(np.abs(c)), np.max(np.abs(c)))
#axdxdy.set_xlim(


    for ax in [ax6, ax7]:
        ax.set_ylabel('Lomb-Scargle power')
    



    for ax in [axx, axy, axxy, ax4, ax5, ax6, ax7, axdxdy]:
        ax.grid(which='both')
        leg=ax.legend()

    figList = [figx, figxy, fig4, fig6, fig5 ]
    figTails = ['shiftVsTime', 'shiftVsShift', 'residVsTime', 'residVsResid','powspec']

    # clean up the string to make it filename-appropriate
    sFil=''
    if len(sTitl) > 0:
        sFil = sTitl[:].replace("(","").replace(")","")
        sFil = sFil.replace(",","_").replace(" ","")
        sFil = sFil.replace("/","").replace("\\","")
        sFil = "%s_" % (sFil)
        #print "SFIL INFO: %s" % (sFil)
        print '---------Date---file name--'
        print ''

    for iFig in range(len(figList)):
        fileName = 'fig_%s%s.pdf' % (sFil, figTails[iFig])
        figList[iFig].savefig(fileName)

        
        print fileName, 'SAVED' 
def main():
    # Initialize and parse command-line arguments.
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--verbose', action = 'store_true',
        help = 'Provide verbose output.')
    descwl.output.Reader.add_args(parser)
    parser.add_argument('--no-display', action = 'store_true',
        help = 'Do not display the image on screen.')
    parser.add_argument('-o','--output-name',type = str, default = None, metavar = 'FILE',
        help = 'Name of the output file to write.')

    select_group = parser.add_argument_group('Object selection options')
    select_group.add_argument('--galaxy', type = int, action = 'append',
        default = [ ], metavar = 'ID',
        help = 'Select the galaxy with this database ID (can be repeated).')
    select_group.add_argument('--group', type = int, action = 'append',
        default = [ ], metavar = 'ID',
        help = 'Select galaxies belonging to the group with this group ID (can be repeated).')
    select_group.add_argument('--select', type = str, action = 'append',
        default = [ ], metavar = 'CUT',
        help = 'Select objects passing the specified cut (can be repeated).')
    select_group.add_argument('--select-region', type = str,
        default = None, metavar = '[XMIN,XMAX,YMIN,YMAX]',
        help = 'Select objects within this region relative to the image center (arcsecs).')
    select_group.add_argument('--save-selected', type = str, default = None,
        help = 'Name of FITS file for saving table of selected objects')

    match_group = parser.add_argument_group('Detection catalog matching options')
    match_group.add_argument('--match-catalog', type = str,
        default = None, metavar = 'FILE',
        help = 'Name of SExtractor-compatible detection catalog to read.')
    match_group.add_argument('--match-color', type = str,
        default = 'black', metavar = 'COL',
        help = 'Matplotlib color name to use for displaying detection catalog matches.')
    match_group.add_argument('--match-info', type = str,
        default = None, metavar = 'FMT',
        help = 'String interpolation format to generate matched object annotations.')

    view_group = parser.add_argument_group('Viewing options')
    view_group.add_argument('--magnification', type = float,
        default = 1, metavar = 'MAG',
        help = 'Magnification factor to use for display.')
    view_group.add_argument('--crop', action = 'store_true',
        help = 'Crop the displayed pixels around the selected objects.')
    view_group.add_argument('--view-region', type = str,
        default = None, metavar = '[XMIN,XMAX,YMIN,YMAX]',
        help = 'Viewing region in arcsecs relative to the image center (overrides crop if set).')
    view_group.add_argument('--draw-moments', action = 'store_true',
        help = 'Draw ellipses to represent the 50%% iosophote second moments of selected objects.')
    view_group.add_argument('--info', type = str,
        default = None, metavar = 'FMT',
        help = 'String interpolation format to generate annotation labels.')
    view_group.add_argument('--no-crosshair', action = 'store_true',
        help = 'Do not draw a crosshair at the centroid of each selected object.')
    view_group.add_argument('--clip-lo-noise-fraction', type = float,
        default = 0.05, metavar = 'FRAC',
        help = 'Clip pixels with values below this fraction of the mean sky noise.')
    view_group.add_argument('--clip-hi-percentile', type = float,
        default = 90.0, metavar = 'PCT',
        help = 'Clip pixels with non-zero values above this percentile for the selected image.')
    view_group.add_argument('--hide-background', action = 'store_true',
        help = 'Do not display background pixels.')
    view_group.add_argument('--hide-selected', action = 'store_true',
        help = 'Do not overlay any selected pixels.')
    view_group.add_argument('--add-noise',type = int,default = None,metavar = 'SEED',
        help = 'Add Poisson noise using the seed provided (no noise is added unless this is set).')
    view_group.add_argument('--clip-noise',type = float,default = -1.,metavar = 'SIGMAS',
        help = 'Clip background images at this many sigmas when noise is added.')
    view_group.add_argument('--zscale-all', action='store_true',
        help = 'Set zscale using all displayed objects instead of only selected ones')

    format_group = parser.add_argument_group('Formatting options')
    format_group.add_argument('--info-size', type = str,
        default = 'large', metavar = 'SIZE',
        help = 'Matplotlib font size specification in points or relative (small,large,...)')
    format_group.add_argument('--dpi', type = float, default = 64.,
        help = 'Number of pixels per inch to use for display.')
    format_group.add_argument('--max-view-size', type = int,
        default = 2048, metavar = 'SIZE',
        help = 'Maximum allowed pixel dimensions of displayed image.')
    format_group.add_argument('--colormap', type = str,
        default = 'viridis', metavar = 'CMAP',
        help = 'Matplotlib colormap name to use for background pixel values.')
    format_group.add_argument('--highlight', type = str,
        default = 'red', metavar = 'COL',
        help = 'Matplotlib color name to use for highlighted pixel values.')
    format_group.add_argument('--crosshair-color', type = str,
        default = 'greenyellow', metavar = 'COL',
        help = 'Matplotlib color name to use for crosshairs.')
    format_group.add_argument('--ellipse-color', type = str,
        default = 'greenyellow', metavar = 'COL',
        help = 'Matplotlib color name to use for second-moment ellipses.')
    format_group.add_argument('--info-color', type = str,
        default = 'green', metavar = 'COL',
        help = 'Matplotlib color name to use for info text.')
    format_group.add_argument('--outline-color', type = str,
        default = None, metavar = 'COL',
        help = 'Matplotlib color name to use for outlining text.')

    args = parser.parse_args()

    if args.no_display and not args.output_name:
        print('No display our output requested.')
        return 0
    if args.hide_background and args.hide_selected:
        print('No pixels visible with --hide-background and --hide-selected.')
        return 0

    # Load the analysis results file we will display from.
    try:
        reader = descwl.output.Reader.from_args(defer_stamp_loading = True,args = args)
        results = reader.results
        if args.verbose:
            print(results.survey.description())
    except RuntimeError as e:
        print(str(e))
        return -1

    # Add noise, if requested.
    if args.add_noise is not None:
        results.add_noise(args.add_noise)

    # Match detected objects to simulated objects, if requested.
    if args.match_catalog:
        detected,matched,matched_indices,matched_distance = (
            results.match_sextractor(args.match_catalog))
        if args.verbose:
            print('Matched %d of %d detected objects (median sep. = %.2f arcsecs).' % (
                np.count_nonzero(matched),len(matched),np.median(matched_distance)))

    # Create region selectors.
    if args.select_region:
        try:
            assert args.select_region[0] == '[' and args.select_region[-1] == ']'
            xmin,xmax,ymin,ymax = [ float(token) for token in args.select_region[1:-1].split(',') ]
            assert xmin < xmax and ymin < ymax
        except (ValueError,AssertionError):
            print('Invalid select-region xmin,xmax,ymin,ymax = %s.' % args.select_region)
            return -1
        args.select.extend(['dx>=%f'%xmin,'dx<%f'%xmax,'dy>=%f'%ymin,'dy<%f'%ymax])

    # Perform object selection.
    if args.select:
        # Combine select clauses with logical AND.
        selection = results.select(*args.select,mode='and',format='mask')
    else:
        # Nothing is selected by default.
        selection = results.select('NONE',format='mask')
    # Add any specified groups to the selection with logical OR.
    for identifier in args.group:
        selected = results.select('grp_id==%d' % identifier,format='mask')
        if not np.any(selected):
            print('WARNING: no group found with ID %d.' % identifier)
        selection = np.logical_or(selection,selected)
    # Add any specified galaxies to the selection with logical OR.
    for identifier in args.galaxy:
        selected = results.select('db_id==%d' % identifier,format='mask')
        if not np.any(selected):
            print('WARNING: no galaxy found with ID %d.' % identifier)
        selection = np.logical_or(selection,selected)
    selected_indices = np.arange(results.num_objects)[selection]
    if args.verbose:
        print('Selected IDs:\n%s' % np.array(results.table['db_id'][selected_indices]))
        groups = np.unique(results.table[selected_indices]['grp_id'])
        print('Selected group IDs:\n%s' % np.array(groups))

    # Do we have individual objects available for selection in the output file?
    if np.any(selection) and not results.stamps:
        print('Cannot display selected objects without any stamps available.')
        return -1

    # Save table of selected objects if requested.
    if args.save_selected:
        if args.verbose:
            print('Saving selected objects to %s.' % args.save_selected)
        results.table[selected_indices].write(args.save_selected, overwrite=True)

    # Build the image of selected objects (might be None).
    selected_image = results.get_subimage(selected_indices)

    # Calculate our viewing bounds as (xmin,xmax,ymin,ymax) in floating-point pixels
    # relative to the image bottom-left corner. Also calculate view_bounds with
    # integer values that determine how to extract sub-images to display.
    scale = results.survey.pixel_scale
    if args.view_region is not None:
        try:
            assert args.view_region[0] == '[' and args.view_region[-1] == ']'
            xmin,xmax,ymin,ymax = [ float(token) for token in args.view_region[1:-1].split(',') ]
            assert xmin < xmax and ymin < ymax
        except (ValueError,AssertionError):
            print('Invalid view-window xmin,xmax,ymin,ymax = %s.' % args.view_region)
            return -1
        # Convert to pixels relative to bottom-left corner.
        xmin = xmin/scale + 0.5*results.survey.image_width
        xmax = xmax/scale + 0.5*results.survey.image_width
        ymin = ymin/scale + 0.5*results.survey.image_height
        ymax = ymax/scale + 0.5*results.survey.image_height
        # Calculate integer pixel bounds that cover the view window.
        view_bounds = galsim.BoundsI(
            int(math.floor(xmin)),int(math.ceil(xmax))-1,
            int(math.floor(ymin)),int(math.ceil(ymax))-1)
    elif args.crop and selected_image is not None:
        view_bounds = selected_image.bounds
        xmin,xmax,ymin,ymax = (
            view_bounds.xmin,view_bounds.xmax+1,view_bounds.ymin,view_bounds.ymax+1)
    else:
        view_bounds = results.survey.image.bounds
        xmin,xmax,ymin,ymax = 0,results.survey.image_width,0,results.survey.image_height
    if args.verbose:
        vxmin = (xmin - 0.5*results.survey.image_width)*scale
        vxmax = (xmax - 0.5*results.survey.image_width)*scale
        vymin = (ymin - 0.5*results.survey.image_height)*scale
        vymax = (ymax - 0.5*results.survey.image_height)*scale
        print('View window is [xmin,xmax,ymin,ymax] = [%.2f,%.2f,%.2f,%.2f] arcsecs' % (
            vxmin,vxmax,vymin,vymax))
        print('View pixels in %r' % view_bounds)

    # Initialize a matplotlib figure to display our view bounds.
    view_width = float(xmax - xmin)
    view_height = float(ymax - ymin)
    if (view_width*args.magnification > args.max_view_size or
        view_height*args.magnification > args.max_view_size):
        print('Requested view dimensions %d x %d too big. Increase --max-view-size if necessary.' % (
            view_width*args.magnification,view_height*args.magnification))
        return -1
    fig_height = args.magnification*(view_height/args.dpi)
    fig_width = args.magnification*(view_width/args.dpi)
    figure = plt.figure(figsize = (fig_width,fig_height),frameon = False,dpi = args.dpi)
    axes = plt.Axes(figure, [0., 0., 1., 1.])
    axes.axis(xmin = xmin,xmax = xmax,ymin = ymin,ymax = ymax)
    axes.set_axis_off()
    figure.add_axes(axes)

    # Get the background and highlighted images to display, sized to our view.
    background = galsim.Image(bounds = view_bounds,dtype = np.float32,scale = scale)
    highlighted = background.copy()
    if not args.hide_background:
        overlap = results.survey.image.bounds & view_bounds
        if overlap.area() > 0:
            background[overlap] = results.survey.image[overlap]
    if not args.hide_selected and selected_image is not None:
        overlap = selected_image.bounds & view_bounds
        if overlap.area() > 0:
            highlighted[overlap] = selected_image[overlap]
    if np.count_nonzero(highlighted.array) == 0:
        if args.hide_background or np.count_nonzero(background.array) == 0:
            print('There are no non-zero pixel values in the view window.')
            return -1

    # Prepare the z scaling.
    zscale_pixels = results.survey.image.array
    if selected_image and not args.zscale_all:
        if selected_image.bounds.area() < 16:
            print('WARNING: using full image for z-scaling since only %d pixel(s) selected.' % (
                selected_image.bounds.area()))
        else:
            zscale_pixels = selected_image.array
    # Clip large fluxes to a fixed percentile of the non-zero selected pixel values.
    non_zero_pixels = (zscale_pixels != 0)
    vmax = np.percentile(zscale_pixels[non_zero_pixels],q = (args.clip_hi_percentile))
    # Clip small fluxes to a fixed fraction of the mean sky noise.
    vmin = args.clip_lo_noise_fraction*np.sqrt(results.survey.mean_sky_level)
    if args.verbose:
        print('Clipping pixel values to [%.1f,%.1f] detected electrons.' % (vmin,vmax))

    # Define the z scaling function. See http://ds9.si.edu/ref/how.html#Scales
    def zscale(pixels):
        return np.sqrt(pixels)

    # Calculate the clipped and scaled pixel values to display.
    highlighted_z = zscale((np.clip(highlighted.array,vmin,vmax) - vmin)/(vmax-vmin))
    if args.add_noise:
        vmin = args.clip_noise*np.sqrt(results.survey.mean_sky_level)
        if args.verbose:
            print('Background pixels with noise clipped to [%.1f,%.1f].' % (vmin,vmax))
    background_z = zscale((np.clip(background.array,vmin,vmax) - vmin)/(vmax-vmin))

    # Convert the background image to RGB using the requested colormap.
    # Drop the alpha channel [3], which is all ones anyway.
    cmap = matplotlib.cm.get_cmap(args.colormap)
    background_rgb = cmap(background_z)[:,:,:3]

    # Overlay the highlighted image using alpha blending.
    # http://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
    if args.highlight and args.highlight != 'none':
        alpha = highlighted_z[:,:,np.newaxis]
        color = np.array(matplotlib.colors.colorConverter.to_rgb(args.highlight))
        final_rgb = alpha*color + background_rgb*(1.-alpha)
    else:
        final_rgb = background_rgb

    # Draw the composite image.
    extent = (view_bounds.xmin,view_bounds.xmax+1,view_bounds.ymin,view_bounds.ymax+1)
    axes.imshow(final_rgb,extent = extent,aspect = 'equal',origin = 'lower',
        interpolation = 'nearest')

    # The argparse module escapes any \n or \t in string args, but we need these
    # to be unescaped in the annotation format string.
    if args.info:
        args.info = binary_type(args.info, 'utf-8').decode('unicode-escape')
    if args.match_info:
        args.match_info = binary_type(args.match_info, 'utf-8').decode('string-escape')

    num_selected = len(selected_indices)
    ellipse_centers = np.empty((num_selected,2))
    ellipse_widths = np.empty(num_selected)
    ellipse_heights = np.empty(num_selected)
    ellipse_angles = np.empty(num_selected)
    match_ellipse_centers = np.empty((num_selected,2))
    match_ellipse_widths = np.empty(num_selected)
    match_ellipse_heights = np.empty(num_selected)
    match_ellipse_angles = np.empty(num_selected)
    num_match_ellipses = 0
    for index,selected in enumerate(selected_indices):
        info = results.table[selected]
        # Do we have a detected object matched to this simulated source?
        match_info = None
        if args.match_catalog and info['match'] >= 0:
            match_info = detected[info['match']]
        # Calculate the selected object's centroid position in user display coordinates.
        x_center = (0.5*results.survey.image_width + info['dx']/scale)
        y_center = (0.5*results.survey.image_height + info['dy']/scale)
        if match_info is not None:
            x_match_center = match_info['X_IMAGE']-0.5
            y_match_center = match_info['Y_IMAGE']-0.5
        # Draw a crosshair at the centroid of selected objects.
        if not args.no_crosshair:
            axes.plot(x_center,y_center,'+',color = args.crosshair_color,
                markeredgewidth = 2,markersize = 24)
            if match_info:
                axes.plot(x_match_center,y_match_center,'x',color = args.match_color,
                    markeredgewidth = 2,markersize = 24)
        # Add annotation text if requested.
        if args.info:
            path_effects = None if args.outline_color is None else [
                matplotlib.patheffects.withStroke(linewidth = 2,
                foreground = args.outline_color)]
            try:
                annotation = args.info % info
            except IndexError:
                print('Invalid annotate-format %r' % args.info)
                return -1
            axes.annotate(annotation,xy = (x_center,y_center),xytext = (4,4),
                textcoords = 'offset points',color = args.info_color,
                fontsize = args.info_size,path_effects = path_effects)
        if match_info and args.match_info:
            path_effects = None if args.outline_color is None else [
                matplotlib.patheffects.withStroke(linewidth = 2,
                foreground = args.outline_color)]
            try:
                annotation = args.match_info % match_info
            except IndexError:
                print('Invalid match-format %r' % args.match_info)
                return -1
            axes.annotate(annotation,xy = (x_match_center,y_match_center),
                xytext = (4,4),textcoords = 'offset points',
                color = args.info_color,fontsize = args.info_size,
                path_effects = path_effects)
        # Add a second-moments ellipse if requested.
        if args.draw_moments:
            ellipse_centers[index] = (x_center,y_center)
            ellipse_widths[index] = info['a']/scale
            ellipse_heights[index] = info['b']/scale
            ellipse_angles[index] = np.degrees(info['beta'])
            if match_info:
                # This will only work if we have the necessary additional fields in the match catalog.
                try:
                    match_ellipse_centers[num_match_ellipses] = (x_match_center,y_match_center)
                    match_ellipse_widths[num_match_ellipses] = match_info['A_IMAGE']
                    match_ellipse_heights[num_match_ellipses] = match_info['B_IMAGE']
                    match_ellipse_angles[num_match_ellipses] = match_info['THETA_IMAGE']
                    num_match_ellipses += 1
                except IndexError:
                    pass

    # Draw any ellipses.
    if args.draw_moments:
        ellipses = matplotlib.collections.EllipseCollection(units = 'x',
            widths = ellipse_widths,heights = ellipse_heights,angles = ellipse_angles,
            offsets = ellipse_centers, transOffset = axes.transData)
        ellipses.set_facecolor('none')
        ellipses.set_edgecolor(args.ellipse_color)
        axes.add_collection(ellipses,autolim = True)
        if num_match_ellipses > 0:
            ellipses = matplotlib.collections.EllipseCollection(units = 'x',
                widths = match_ellipse_widths,heights = match_ellipse_heights,
                angles = match_ellipse_angles,offsets = match_ellipse_centers,
                transOffset = axes.transData)
            ellipses.set_facecolor('none')
            ellipses.set_edgecolor(args.match_color)
            #ellipses.set_linestyle('dashed')
            axes.add_collection(ellipses,autolim = True)

    if args.output_name:
        figure.savefig(args.output_name,dpi = args.dpi)

    if not args.no_display:
        plt.show()
def process_file(pcap_file_in, pcap_dir_in, shm_name, shm_shp, shm_dtp,
                 txt_dir_in, fn_keyword):

    loc_shm = SharedMemory(shm_name)
    loc_apx_arr = np.recarray(shape=shm_shp, dtype=shm_dtp, buf=loc_shm.buf)

    this_os = platform.system()
    if this_os == "Linux":
        concatenate_cmd = "cat"
        wine = "wine "
    elif this_os == "Windows":
        concatenate_cmd = "type"
        wine = ""
    else:
        print("Unknown OS. Terminating.")
        sys.exit(0)
    print(
        f"Running on {this_os}. To concatenate we wil use '{concatenate_cmd}'")
    print(f"Calling lastools with e.g. '{wine}lastool'")

    print(f"Processing {pcap_file_in}")
    logging.info(f"Processing {pcap_file_in}")

    # ### Read entire file only once (takes most time)

    start = time.time()
    packets = rdpcap(os.path.join(pcap_dir_in, pcap_file_in))
    packets_read = len(packets)
    end = time.time()
    print(F"Read {packets_read} packets in {end-start:.2f} seconds.")
    logging.info(F"Read {packets_read} packets in {end-start:.2f} seconds.")

    # ### Make sure all packets have length == 1206!
    start = time.time()
    wrong_lengths = 0
    for p in packets:
        if len(p.load) != DATA_PACKET_LENGTH:
            wrong_lengths += 1
    end = time.time()
    logging.info(F"Checked {packets_read} packets in {end-start:.2f} seconds.")
    logging.info('All have same length (' + str(DATA_PACKET_LENGTH) +
                 ').' if wrong_lengths == 0 else str(wrong_lengths) +
                 ' packets have a different length.')
    logging.info('This is GOOD!' if wrong_lengths == 0 else 'This is BAD!')

    # ### Read all packets into 1 numpy array
    start = time.time()
    raw_pack_data = np.zeros((packets_read, DATA_PACKET_LENGTH),
                             dtype=np.uint8)
    for i, p in enumerate(packets):
        raw_pack_data[i, :] = np.frombuffer(p.load, dtype=np.uint8)
        if i % 1e5 == 0:
            print(
                f"Packet {i} out of {packets_read} in {time.time()-start:.2f} seconds."
            )
    end = time.time()
    logging.info(
        F"Copied data from {packets_read} packets into a numpy array of shape {raw_pack_data.shape} in {end-start:.2f} seconds."
    )

    # ### Make sure all packets are captured in the same mode (last, strongest, dual)
    mode_hypothesis = raw_pack_data[0, RETURN_MODE_OFFSET]
    logging.info(
        f"First packet reports {RETURN_MODE_NAME[mode_hypothesis]} capture mode."
    )
    diff_ret_mode = (raw_pack_data[:, RETURN_MODE_OFFSET] !=
                     mode_hypothesis).sum()
    logging.info(f"{diff_ret_mode} packets disagree.")
    logging.info(
        f"{'This is GOOD!' if diff_ret_mode == 0 else 'This is BAD!'}")

    # ### Make sure all packets are captured with the same sensor (only VLP16 expected)
    sensor_hypothesis = raw_pack_data[0, PRODUCT_MODEL_OFFSET]
    logging.info(
        f"First packet reports {PRODUCT_MODEL_NAME[sensor_hypothesis]} sensor model."
    )
    diff_sensor = (raw_pack_data[:, PRODUCT_MODEL_OFFSET] !=
                   sensor_hypothesis).sum()
    logging.info(f"{diff_sensor} packets disagree.")
    logging.info(f"{'This is GOOD!' if diff_sensor == 0 else 'This is BAD!'}")

    # ### Get µs timestamp from packets and transform to UNIX timestamp
    #
    # I found that Ethernet timestamp agrees with GNSS timestamp very well.
    #
    # Can be problematic if very close ho full hour and I am not careful.
    #
    # Let's look at 1st Ethernet timestamp.
    #
    # * if it is far enough from a full hour (>=1 minute), then we continue
    # * ~if it is too close (<1 minute), then we look at last one~ _not implemented_
    # * ~if last one is also too close (recorded for 1 entire hour, not likely), we find an optimal one in the middle~ _not implemented_

    ts_1st_pack = datetime.datetime.fromtimestamp(int(packets[0].time))
    if ts_1st_pack.minute > 1 and ts_1st_pack.minute < 59:
        logging.info(
            f"Far enough from full hour (~{ts_1st_pack.minute} minutes).")
        logging.info("This is GOOD!\nContinue!")
    else:
        logging.info(
            f"Too close to full hour (~{ts_1st_pack.minute} minutes).")
        logging.info(
            "That is not great, but the code below should deal with it.")

    # #### Take Ethernet timestamp of (1st) packet, discard sub-hour info and add replace it with that from GNSS µs timestamp
    #
    # What happens when the capture rolls over a full hour?
    #
    # **Need to deal with this when such data is captured!**
    #
    # # Solution below!

    start = time.time()
    micros = np.zeros((packets_read, ), dtype=np.int64)
    micro_bytes = micros.view(dtype=np.uint8)
    micro_bytes[0::8] = raw_pack_data[:, DATA_PACK_TIMESTAMP_OFFSET + 0]
    micro_bytes[1::8] = raw_pack_data[:, DATA_PACK_TIMESTAMP_OFFSET + 1]
    micro_bytes[2::8] = raw_pack_data[:, DATA_PACK_TIMESTAMP_OFFSET + 2]
    micro_bytes[3::8] = raw_pack_data[:, DATA_PACK_TIMESTAMP_OFFSET + 3]
    plt.plot(micros)
    end = time.time()
    logging.info(
        f"Extracted time stamp from {packets_read} packets in {end-start:.2f} seconds."
    )
    logging.info(
        "If the line jumps, a full hour occurs. Need to deal with it!")

    # #### Another problem could be that the UDP packets are not guaranteed to arrive in order.
    #
    # An assumption that is made for the following calculations is that this does not happen.
    #
    # **Need to deal with this when such data is captured!**

    while (micros[1:] < micros[:-1]).sum() > 0:
        jump_position = np.where((micros[1:] < micros[:-1]))[0][0] + 1
        micros[jump_position:] += int(3.6e9)
        logging.info(
            f"Added another hour to micros at position {jump_position}")
    plt.plot(micros)

    if (micros[1:] - micros[:-1]).min() > 0:  #all chronological
        logging.info("Packets seem to be in right order. Continue!")
    else:
        logging.info("Not all packets are in order. Handle somehow!")
        print("Not all packets are in order. Handle somehow!")
        sys.exit(0)

    eth_ts_hour = remove_min_sec(packets[0].time)

    puck_timestamps = micros / 1e6 + eth_ts_hour * 1.0

    # ### Get range and intensity info for all packets

    start = time.time()

    # the following contains only channel data (i.e. no timestamp, factory bytes or azimuth)
    channel_data = raw_pack_data[:, :-6].reshape(
        (packets_read, DATA_BLOCKS, 100))[:, :, 4:]
    channel_data = channel_data.reshape(
        (packets_read, DATA_BLOCKS * LASERS_PER_DATA_BLOCK * 3))

    #puck ranges in mm
    puck_ranges = np.zeros((packets_read, DATA_BLOCKS * LASERS_PER_DATA_BLOCK),
                           dtype=np.uint32)
    puck_range_bytes = puck_ranges.view(dtype=np.uint8)
    puck_range_bytes[:, 0::4] = channel_data[:, 0::3]
    puck_range_bytes[:, 1::4] = channel_data[:, 1::3]
    puck_ranges *= 2

    #intensities as 1 byte
    puck_intens = np.zeros((packets_read, DATA_BLOCKS * LASERS_PER_DATA_BLOCK),
                           dtype=np.uint8)
    puck_intens[:, :] = channel_data[:, 2::3]

    end = time.time()

    logging.info(
        f"Extracted range and intensity for {packets_read * DATA_BLOCKS * LASERS_PER_DATA_BLOCK} laser pulses in {end-start:.2f} seconds."
    )

    # ### Get all given azimuths
    #
    # Think how to treat them for dual / single cases later.
    #
    # For now assume it is always DUAL!

    # Changed azimuths data type to signed 32-bit integer to support in-place substraction
    start = time.time()

    # the following contains only azimuth data (i.e. no timestamp, factory bytes or channel data)
    azimuths = np.zeros((packets_read, DATA_BLOCKS, 1), dtype=np.int32)
    azim_data = azimuths.view(dtype=np.uint8)
    azim_data[:, :,
              0:2] = raw_pack_data[:, :-6].reshape(packets_read, DATA_BLOCKS,
                                                   100)[:, :, 2:4]
    azim_data = azim_data.reshape((packets_read, DATA_BLOCKS * 4))

    #azimuth
    azimuths = azim_data.view(dtype=np.int32)

    end = time.time()

    logging.info(
        f"Extracted azimuths for {packets_read * DATA_BLOCKS} firing sequences in {end-start:.2f} seconds."
    )

    # ### All packets are in dual return mode, so the azimuths are expected to repeat (VLP-16 User Manual, Figure 9-3)
    #
    # The following checks this assumption again:

    az_repeat = ((azimuths[:, 0::2] != azimuths[:, 1::2]).sum() == 0)
    if az_repeat:
        logging.info("All azimuths repeat. This is good.")
    else:
        logging.info("Not all azimuths repeat. Investigate before continuing.")

    azimuths_gap = get_azim_gap(azimuths)

    micros_pulses = get_micros_pulses(micros)
    nanos_pulses = get_nanos_pulses(micros)
    # timestamp for each laser pulse
    puck_pulse_time = micros_pulses / 1e6 + eth_ts_hour * 1.0

    # ### Calculate the azimutzhs for each datapoint

    #Use the following simplified array if in dual mode
    #Otherwise can still refer to it, but it's just the original array
    if mode_hypothesis == RETURN_MODE_DUAL:
        az_simple = azimuths[:, 0::2]
    else:
        az_simple = azimuths

    prec_az = get_precision_azimuth(az_simple, azimuths_gap, True, True)

    #cut the big dataframe to only what the puck data covers
    interv = np.where(
        np.logical_and(loc_apx_arr.timestamp > puck_timestamps[0],
                       loc_apx_arr.timestamp < puck_timestamps[-1]))[0]
    mid_apx_arr = loc_apx_arr[max(interv[0] -
                                  1, 0):min(interv[-1] +
                                            1, loc_apx_arr.shape[0])]

    # ### process puck data...

    concat_files = []
    MAXIMUM_POINTS_PER_RUN = 2000  # * DATA_BLOCKS * LASERS_PER_DATA_BLOCK # iterate over puck_timestamps
    max_laps = int(np.ceil(puck_timestamps.size / MAXIMUM_POINTS_PER_RUN))
    for run_count in range(max_laps):
        print(f'Running slice {run_count} out of {max_laps}')
        current_range = np.arange(0, min(MAXIMUM_POINTS_PER_RUN,
                                        puck_timestamps.size - MAXIMUM_POINTS_PER_RUN * run_count)) +\
                        MAXIMUM_POINTS_PER_RUN * run_count  #a slice that hopefully fits in RAM

        #time in seconds
        min_time = puck_timestamps[current_range][0]
        max_time = puck_timestamps[current_range][-1]

        print(
            f"{pcap_file_in}: Processing {(max_time - min_time):.2f} seconds")

        interv = np.where(
            np.logical_and(mid_apx_arr.timestamp > min_time,
                           mid_apx_arr.timestamp < max_time))[0]
        if interv.size < 1:
            continue
        sml_apx_arr = mid_apx_arr[max(interv[0] -
                                      1, 0):min(interv[-1] +
                                                2, mid_apx_arr.shape[0])]
        relevant_times = puck_pulse_time[current_range, :]
        relevant_nanos = nanos_pulses[current_range, :]

        strongest_return_ranges = puck_ranges[current_range].reshape(
            (-1, DATA_BLOCKS, LASERS_PER_DATA_BLOCK))[:, 1::2].flatten() / 1000
        strongest_return_intensities = puck_intens[current_range].reshape(
            (-1, DATA_BLOCKS, LASERS_PER_DATA_BLOCK))[:, 1::2].flatten()

        last_return_ranges = puck_ranges[current_range].reshape(
            (-1, DATA_BLOCKS, LASERS_PER_DATA_BLOCK))[:, 0::2].flatten() / 1000
        last_return_intensities = puck_intens[current_range].reshape(
            (-1, DATA_BLOCKS, LASERS_PER_DATA_BLOCK))[:, 0::2].flatten()

        azimuth = prec_az[current_range]

        vert_elev_angle = np.tile(elevation_and_vert_corr_by_laser_id[:, 0],
                                  (1, 12))
        vert_elev_angle = np.tile(vert_elev_angle, (azimuth.shape[0], 1))

        global_laser_id = np.tile(np.arange(16, dtype=np.uint8), (1, 12))
        global_laser_id = np.tile(global_laser_id, (azimuth.shape[0], 1))

        azimuth = np.deg2rad(azimuth / 100).flatten()
        vert_elev_angle = vert_elev_angle.flatten()

        #not enough points to interpolate
        if sml_apx_arr.shape[0] < 4:
            continue
        f_lat = interp1d(sml_apx_arr.timestamp,
                         sml_apx_arr["lat_EPSG32632"],
                         kind='cubic',
                         fill_value="extrapolate")
        f_lon = interp1d(sml_apx_arr.timestamp,
                         sml_apx_arr["lon_EPSG32632"],
                         kind='cubic',
                         fill_value="extrapolate")
        f_ele = interp1d(sml_apx_arr.timestamp,
                         sml_apx_arr["elevation"],
                         kind='cubic',
                         fill_value="extrapolate")
        f_yaw = interp1d(sml_apx_arr.timestamp,
                         sml_apx_arr["heading_continuous"],
                         kind='cubic',
                         fill_value="extrapolate")
        f_rol = interp1d(sml_apx_arr.timestamp,
                         sml_apx_arr["roll"],
                         kind='cubic',
                         fill_value="extrapolate")
        f_pit = interp1d(sml_apx_arr.timestamp,
                         sml_apx_arr["pitch"],
                         kind='cubic',
                         fill_value="extrapolate")

        MIN_RANGE = 2  #metres
        MIN_INTENSITY = 100
        for return_counter in range(1, 3):
            if return_counter == 1:
                condition = np.logical_and(
                    strongest_return_ranges > MIN_RANGE,
                    strongest_return_intensities > MIN_INTENSITY)
                condition_double = np.logical_and(
                    last_return_ranges > MIN_RANGE,
                    last_return_ranges != strongest_return_ranges)

                return_ranges = strongest_return_ranges
                return_intensities = strongest_return_intensities
            elif return_counter == 2:
                condition = np.logical_and(
                    np.logical_and(
                        last_return_ranges > MIN_RANGE,
                        last_return_ranges != strongest_return_ranges),
                    last_return_intensities > MIN_INTENSITY)
                condition_double = np.ones_like(last_return_ranges,
                                                dtype=np.bool8)

                return_ranges = last_return_ranges
                return_intensities = last_return_intensities

            lat = f_lat(relevant_times).flatten()
            lon = f_lon(relevant_times).flatten()
            ele = f_ele(relevant_times).flatten()
            yaw = f_yaw(relevant_times).flatten() % 360
            rol = f_rol(relevant_times).flatten()
            pit = f_pit(relevant_times).flatten()

            X_puck = np.ones_like(return_intensities) * np.nan
            Y_puck = np.ones_like(return_intensities) * np.nan
            Z_puck = np.ones_like(return_intensities) * np.nan

            #VLP manual p.53
            X_puck[condition] = return_ranges[condition] * np.cos(
                vert_elev_angle)[condition] * np.sin(azimuth)[condition]
            Y_puck[condition] = return_ranges[condition] * np.cos(
                vert_elev_angle)[condition] * np.cos(azimuth)[condition]
            Z_puck[condition] = return_ranges[condition] * np.sin(
                vert_elev_angle)[condition]

            # first rotate into XYZ of the drone!
            # x_roll = -90 #degrees
            # y_pitch = 0
            # z_yaw = -90
            #rotation from puck to uav coordinates:
            R_01 = np.array([[0., 1., 0.], [0., 0., 1.], [1., 0., 0.]])

            #get rid of invalid entries
            X_puck = X_puck[condition]
            Y_puck = Y_puck[condition]
            Z_puck = Z_puck[condition]

            XYZ_puck = np.vstack((X_puck, Y_puck, Z_puck)).T
            XYZ_puck = XYZ_puck[:, np.newaxis, :]
            XYZ_uav = np.matmul(XYZ_puck, R_01)

            #now rotate to real world...
            yaw_correction, pit_correction, rol_correction = -np.radians(
                yaw[condition]), -np.radians(pit[condition]), -np.radians(
                    rol[condition])

            cos_gamma = np.cos(rol_correction)
            sin_gamma = np.sin(rol_correction)

            cos_beta = np.cos(pit_correction)
            sin_beta = np.sin(pit_correction)

            cos_alpha = np.cos(yaw_correction)
            sin_alpha = np.sin(yaw_correction)

            R_gamma = np.array([[
                np.ones_like(cos_gamma),
                np.zeros_like(cos_gamma),
                np.zeros_like(cos_gamma)
            ], [np.zeros_like(cos_gamma), cos_gamma,
                -sin_gamma], [np.zeros_like(cos_gamma), sin_gamma, cos_gamma]])
            R_gamma = np.transpose(R_gamma, (2, 0, 1))

            R_beta = np.array([[cos_beta,
                                np.zeros_like(cos_beta), sin_beta],
                               [
                                   np.zeros_like(cos_beta),
                                   np.ones_like(cos_beta),
                                   np.zeros_like(cos_beta)
                               ],
                               [-sin_beta,
                                np.zeros_like(cos_beta), cos_beta]])
            R_beta = np.transpose(R_beta, (2, 0, 1))

            R_alpha = np.array(
                [[cos_alpha, -sin_alpha,
                  np.zeros_like(cos_alpha)],
                 [sin_alpha, cos_alpha,
                  np.zeros_like(cos_alpha)],
                 [
                     np.zeros_like(cos_alpha),
                     np.zeros_like(cos_alpha),
                     np.ones_like(cos_alpha)
                 ]])
            R_alpha = np.transpose(R_alpha, (2, 0, 1))

            XYZ_rotated = np.matmul(XYZ_uav, R_gamma)
            XYZ_rotated = np.matmul(XYZ_rotated, R_beta)
            XYZ_rotated = np.matmul(XYZ_rotated, R_alpha)

            #bring it into East, North, Up system (+90° around z, then +180° around new x)
            R_last = np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1.]])

            XYZ_rotated = np.matmul(XYZ_rotated, R_last)

            flight_line_id = np.ones_like(
                vert_elev_angle[condition], dtype=np.uint16) * int(
                    pcap_file_in.split(".")[0].split("_")[-1])
            flight_line_id = flight_line_id[:, np.newaxis, np.newaxis]
            XYZ_rotated = np.concatenate((XYZ_rotated, flight_line_id),
                                         axis=-1)

            return_id = np.ones_like(vert_elev_angle[condition],
                                     dtype=np.uint16) * return_counter
            return_id = return_id[:, np.newaxis, np.newaxis]
            XYZ_rotated = np.concatenate((XYZ_rotated, return_id), axis=-1)

            return_intensities = return_intensities[condition]
            return_intensities = return_intensities[:, np.newaxis, np.newaxis]
            XYZ_rotated = np.concatenate((XYZ_rotated, return_intensities),
                                         axis=-1)

            number_of_returns = np.ones_like(
                vert_elev_angle[condition], dtype=np.uint8) + condition_double[
                    condition]  #1 for single, 2 for double
            number_of_returns = number_of_returns[:, np.newaxis, np.newaxis]
            XYZ_rotated = np.concatenate((XYZ_rotated, number_of_returns),
                                         axis=-1)

            laser_times = relevant_times.flatten(
            )[condition] - 1e9  #subtract 1 billion (see here https://support.geocue.com/fixing-las-global-encoding/)
            laser_times = laser_times[:, np.newaxis, np.newaxis]
            XYZ_rotated = np.concatenate((XYZ_rotated, laser_times), axis=-1)

            #for angles
            delta_pos = np.copy(XYZ_rotated[:, :, 0:3])
            delta_pos = np.matmul(delta_pos, R_alpha)  #or transpose?

            new_scan_angle = np.degrees(
                np.arctan2(delta_pos[:, 0, 0], -delta_pos[:, 0, 2])
            )  #take delta_z as positive when looking down (normal scan)
            new_scan_angle = np.clip(
                new_scan_angle, -128, +127
            )  #for some reason does not want to use short even though version 1.4
            new_scan_angle = new_scan_angle[:, np.newaxis, np.newaxis]
            XYZ_rotated = np.concatenate((XYZ_rotated, new_scan_angle),
                                         axis=-1)

            # new_along_track_angle = np.degrees(np.arctan2(delta_pos[:,0,1], - delta_pos[:,0,2])) #take delta_z as positive when looking down (normal scan)
            # #new_along_track_angle %= 360; new_along_track_angle[new_along_track_angle > 180] -= 360 #normalize to +/-180
            # #new_along_track_angle = np.clip(new_along_track_angle, -180, +180)
            # new_along_track_angle = new_along_track_angle[:, np.newaxis, np.newaxis]
            # XYZ_rotated = np.concatenate((XYZ_rotated, new_along_track_angle), axis = -1)

            # put nanosecond timestamp instead of angle along track!
            laser_nanos = relevant_nanos.flatten()[condition]
            laser_nanos = laser_nanos[:, np.newaxis, np.newaxis]
            XYZ_rotated = np.concatenate((XYZ_rotated, laser_nanos), axis=-1)

            laser_id = global_laser_id.flatten()[condition]
            laser_id = laser_id[:, np.newaxis, np.newaxis]
            XYZ_rotated = np.concatenate((XYZ_rotated, laser_id), axis=-1)

            XYZ_rotated[:, 0, 0] += lon[condition]
            XYZ_rotated[:, 0, 1] += lat[condition]
            XYZ_rotated[:, 0, 2] += ele[condition]

            #to easily display height in cloudcompare
            extra_elevation_field = XYZ_rotated[:, :, 2].flatten()
            extra_elevation_field = extra_elevation_field[:, np.newaxis,
                                                          np.newaxis]
            XYZ_rotated = np.concatenate((XYZ_rotated, extra_elevation_field),
                                         axis=-1)

            if return_counter == 1:
                first_returns = np.copy(XYZ_rotated)
            elif return_counter == 2:
                first_returns = np.concatenate((first_returns, XYZ_rotated))

        fname = f'{pcap_file_in.split(".")[0]}'
        #np.savetxt(f"{fname}.xyz", np.squeeze(first_returns, axis = -2), fmt='%.4f')
        np.savetxt(os.path.join(out_dir_ascii,
                                f"{fname}_r{run_count:03d}.xyz"),
                   np.squeeze(first_returns, axis=-2),
                   fmt=[
                       '%.3f', '%.3f', '%.3f', '%3d', '%1d', '%3d', '%1d',
                       '%.9f', '%.3f', '%d', '%d', '%.3f'
                   ])
        #fmt=['%.3f', '%.3f', '%.3f', '%3d', '%1d', '%3d', '%1d', '%.9f', '%.3f', '%.3f', '%d', '%.3f'])
        #fmt=['%.3f', '%.3f', '%.3f', '%3d', '%1d', '%3d', '%1d', '%.9f', '%.3f', '%.3f', '%.3f', '%.3f', '%d'])
        current_ascii_file = f"{fname}_r{run_count:03d}.xyz"
        concat_files.append(os.path.join(out_dir_ascii, current_ascii_file))
    merged_ascii_file = os.path.join(out_dir_ascii, f"{fname}.xyz")

    print(f"Concatenating {len(concat_files)} ascii files")
    start = time.time()
    command = f"{concatenate_cmd} {' '.join(concat_files)} > {merged_ascii_file}"
    logging.info(command)
    os.system(command)
    print(f"Done in {(time.time() -start):.2f} seconds.")

    print(
        f"{pcap_file_in}: Removing {len(concat_files)} redundant ascii files")
    start = time.time()
    for f in concat_files:
        os.remove(f)
    print(f"{pcap_file_in}: Done in {(time.time() -start):.2f} seconds.")

    # for fname in concat_files.strip().split(" "):
    #     os.remove(fname)

    print(
        f"Finished processing file {os.path.join(pcap_dir_in, pcap_file_in)}")
    logging.info(
        f"Finished processing file {os.path.join(pcap_dir_in, pcap_file_in)}")
Example #49
0
def merge_particles(sim_p: POINTER_REB_SIM, collision: reb_collision, ed: ExtraData):
    global massloss_estimator
    print("--------------")
    print("colliding")
    sim: Simulation = sim_p.contents
    print("current time step", sim.dt)
    print(f"p1 is {collision.p1}")
    print(f"p2 is {collision.p2}")
    # the assignment to cp1 or cp2 seems to be random
    # also look at a copy instead of the original particles
    # to avoid issues after they have been modified
    cp1: Particle = sim.particles[collision.p1].copy()
    cp2: Particle = sim.particles[collision.p2].copy()

    # just calling the more massive one the target to keep its type/name
    # Sun<->Protoplanet -> Sun
    # and to keep collsions mostly reproducable
    if cp1.m > cp2.m:
        target = cp1
        projectile = cp2
    else:  # also when masses are the same
        target = cp2
        projectile = cp1

    if collision.p1 > collision.p2:
        lower_index_particle_index = collision.p2
    else:
        lower_index_particle_index = collision.p1

    print(f"colliding {target.hash.value} ({ed.pd(target).type}) "
          f"with {projectile.hash.value} ({ed.pd(projectile).type})")

    projectile_wmf = ed.pd(projectile).water_mass_fraction
    projectile_cmf = ed.pd(projectile).core_mass_fraction
    target_wmf = ed.pd(target).water_mass_fraction
    target_cmf = ed.pd(target).core_mass_fraction

    # get the velocities, velocity differences and unit vector as numpy arrays
    # all units are in sytem units (so AU/year)
    v1 = np.array(target.vxyz)
    v2 = np.array(projectile.vxyz)
    r1 = np.array(target.xyz)
    r2 = np.array(projectile.xyz)
    vdiff = v2 - v1
    rdiff = r2 - r1
    vdiff_n = linalg.norm(vdiff)
    rdiff_n = linalg.norm(rdiff)
    print("dt", sim.dt)
    # during a collision ias15 should always be used, otherwise something weird has happend
    assert sim.ri_mercurius.mode == 1

    print("rdiff", rdiff)
    print("vdiff", vdiff)
    print("sum_radii", target.r + projectile.r)
    print("rdiff_n", rdiff_n)
    print("vdiff_n", vdiff_n)
    ang = float(np.degrees(np.arccos(np.dot(rdiff, vdiff) / (rdiff_n * vdiff_n))))
    if ang > 90:
        ang = 180 - ang

    print("angle_deg", ang)
    print()
    # get mass fraction
    gamma = projectile.m / target.m

    # calculate mutual escape velocity (for norming the velocities in the interpolation) in SI units
    escape_velocity = sqrt(2 * G * (target.m + projectile.m) / ((target.r + projectile.r) * astronomical_unit))

    print("interpolating")

    if not massloss_estimator:
        methods = [RbfMassloss, LeiZhouMassloss, PerfectMerging, SimpleNNMassloss]
        per_name = {}
        for method in methods:
            per_name[method.name] = method
        try:
            estimator_class = per_name[ed.meta.massloss_method]
        except KeyError:
            print("invalid mass loss estimation method")
            print("please use one of these:")
            print(per_name)
            raise
        massloss_estimator = estimator_class()

    # let interpolation calculate water and mass retention fraction
    # meta is just a bunch of intermediate results that will be logged to help
    # understand the collisions better
    input_data = Input(
        alpha=ang,
        velocity_original=vdiff_n,
        escape_velocity=escape_velocity,
        gamma=gamma,
        projectile_mass=projectile.m,
        target_water_fraction=target_wmf,
        projectile_water_fraction=projectile_wmf,
    )

    water_ret, mantle_ret, core_ret, meta = get_mass_fractions(input_data)
    print("mass retentions:", water_ret, mantle_ret, core_ret)

    meta.collision_velocities = (v1.tolist(), v2.tolist())
    meta.collision_positions = (target.xyz, projectile.xyz)
    meta.collision_radii = (target.r, projectile.r)

    hash = unique_hash(ed)  # hash for newly created particle

    # handle loss of water and core mass
    water_mass = target.m * target_wmf + projectile.m * projectile_wmf
    core_mass = target.m * target_cmf + projectile.m * projectile_cmf
    mantle_mass = target.m + projectile.m - water_mass - core_mass

    water_mass *= water_ret
    mantle_mass *= mantle_ret
    core_mass *= core_ret

    total_mass = water_mass + mantle_mass + core_mass
    final_wmf = water_mass / total_mass
    final_cmf = core_mass / total_mass
    print(final_wmf)
    # create new object preserving momentum
    merged_planet = (target * target.m + projectile * projectile.m) / total_mass
    merged_planet.m = total_mass
    merged_planet.hash = hash

    merged_planet.r = PlanetaryRadius(merged_planet.m, final_wmf, final_cmf).total_radius / astronomical_unit
    ed.pdata[hash.value] = ParticleData(
        water_mass_fraction=final_wmf,
        core_mass_fraction=final_cmf,
        type=ed.pd(target).type,
        total_mass=total_mass
    )

    meta.final_wmf = final_wmf
    meta.final_radius = merged_planet.r
    meta.target_wmf = target_wmf
    meta.projectile_wmf = projectile_wmf
    meta.time = sim.t
    pprint(meta)

    ed.tree.add(target, projectile, merged_planet, meta)

    sim.particles[lower_index_particle_index] = merged_planet

    sim.move_to_com()
    sim.integrator_synchronize()
    sim.ri_mercurius.recalculate_coordinates_this_timestep = 1
    sim.ri_mercurius.recalculate_dcrit_this_timestep = 1

    print("collision finished")
    print("--------------")
    # from rebound docs:
    # A return value of 0 indicates that both particles remain in the simulation.
    # A return value of 1 (2) indicates that particle 1 (2) should be removed from the simulation.
    # A return value of 3 indicates that both particles should be removed from the simulation.
    # always keep lower index particle and delete other one
    # this keeps the N_active working
    if lower_index_particle_index == collision.p1:
        print("deleting p2")
        return 2
    elif lower_index_particle_index == collision.p2:
        print("deleting p1")
        return 1
    else:
        raise ValueError("invalid index")
Example #50
0
poll_interval = imu.IMUGetPollInterval()

# magnetic deviation

while True:

    if imu.IMURead():
        data = imu.getIMUData()
        #~ print data

        q = np.asarray(data["fusionQPose"])
        #~ print q

        #~ print data.keys()
        time.sleep(poll_interval * 1.0 / 1000.0)

        q_raw = pq.Quaternion(q[0], q[1], q[2], q[3])
        q_x_fix = pq.Quaternion(axis=[1.0, 0.0, 0.0], degrees=-90.0)

        q_corrected = q_x_fix * q_raw
        #~ q_corrected[1] *= -1.0;
        q_corrected[2] *= -1.0
        q_corrected[3] *= -1.0
        #~ look_dir = np.array([0.0, 0.0, 1.0])
        #~
        #~ look_dir = q_corrected.rotate(look_dir);

        print 'axis:', q_corrected.axis
        print 'angle:', np.degrees(q_corrected.angle)
Example #51
0
                v = v2 - v1  # [20, 3]
                # Normalize v
                v = v / np.linalg.norm(v, axis=1)[:, np.newaxis]

                # Get angle using arcos of dot product
                angle = np.arccos(
                    np.einsum(
                        'nt,nt->n', v[[
                            0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
                            16, 17, 18, 19, 20
                        ], :], v[[
                            1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
                            17, 18, 19, 20, 21
                        ], :]))

                angle = np.degrees(angle)  # Convert radian to degree

                angle_label = np.array([angle], dtype=np.float32)
                angle_label = np.append(angle_label, idx)

                d = np.concatenate([joint.flatten(), angle_label])

                data.append(d)

                mp_drawing.draw_landmarks(img, result.face_landmarks,
                                          mp_holistic.FACE_CONNECTIONS)

            cv2.imshow('img', img)
            if cv2.waitKey(1) == ord('q'):
                break
Example #52
0
def calcTheta(ra, dec, ra_c, dec_c):
    deltaX, deltaY = (ra - ra_c), (dec - dec_c)
    theta = np.degrees(np.arctan2(deltaY, deltaX)) + 180
    return theta
 def get_angle(self, p1, p2, p3):
     v0 = np.array(p2) - np.array(p1)
     v1 = np.array(p3) - np.array(p1)
     angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))
     return np.degrees(angle)
Example #54
0
def make_pix_models(fname,
                    ra1='ra',
                    dec1='dec',
                    ra2='RAJ2000',
                    dec2='DEJ2000',
                    fitsname=None,
                    plots=False,
                    smooth=300.):
    """
    Read a fits file which contains the crossmatching results for two catalogues.
    Catalogue 1 is the source catalogue (positions that need to be corrected)
    Catalogue 2 is the reference catalogue (correct positions)
    return rbf models for the ra/dec corrections
    :param fname: filename for the crossmatched catalogue
    :param ra1: column name for the ra degrees in catalogue 1 (source)
    :param dec1: column name for the dec degrees in catalogue 1 (source)
    :param ra2: column name for the ra degrees in catalogue 2 (reference)
    :param dec2: column name for the dec degrees in catalogue 2 (reference)
    :param fitsname: fitsimage upon which the pixel models will be based
    :param plots: True = Make plots
    :param smooth: smoothing radius (in pixels) for the RBF function
    :return: (dxmodel, dymodel)
    """
    filename, file_extension = os.path.splitext(fname)
    if file_extension == ".fits":
        raw_data = fits.open(fname)[1].data
    elif file_extension == ".vot":
        raw_data = parse_single_table(fname).array

    # get the wcs
    hdr = fits.getheader(fitsname)
    imwcs = wcs.WCS(hdr, naxis=2)

    # filter the data to only include SNR>10 sources
    flux_mask = np.where(raw_data['peak_flux'] / raw_data['local_rms'] > 10)
    data = raw_data[flux_mask]

    cat_xy = imwcs.all_world2pix(zip(data[ra1], data[dec1]), 1)
    ref_xy = imwcs.all_world2pix(zip(data[ra2], data[dec2]), 1)

    diff_xy = ref_xy - cat_xy

    dxmodel = interpolate.Rbf(cat_xy[:, 0],
                              cat_xy[:, 1],
                              diff_xy[:, 0],
                              function='linear',
                              smooth=smooth)
    dymodel = interpolate.Rbf(cat_xy[:, 0],
                              cat_xy[:, 1],
                              diff_xy[:, 1],
                              function='linear',
                              smooth=smooth)

    if plots:
        import matplotlib
        # Super-computer-safe
        matplotlib.use('Agg')
        from matplotlib import pyplot
        from matplotlib import gridspec
        # Perceptually uniform cyclic color schemes
        try:
            import seaborn as sns
            cmap = matplotlib.colors.ListedColormap(
                sns.color_palette("husl", 256))
        except ImportError:
            print("seaborne not detected; using hsv color scheme")
            cmap = 'hsv'
# Attractive serif fonts
        if which("latex"):
            try:
                from matplotlib import rc
                rc('text', usetex=True)
                rc('font', **{'family': 'serif', 'serif': ['serif']})
            except:
                print("rc not detected; using sans serif fonts")
        else:
            print("latex not detected; using sans serif fonts")
        xmin, xmax = 0, hdr['NAXIS1']
        ymin, ymax = 0, hdr['NAXIS2']

        gx, gy = np.mgrid[xmin:xmax:(xmax - xmin) / 50.,
                          ymin:ymax:(ymax - ymin) / 50.]
        mdx = dxmodel(np.ravel(gx), np.ravel(gy))
        mdy = dymodel(np.ravel(gx), np.ravel(gy))
        x = cat_xy[:, 0]
        y = cat_xy[:, 1]

        # plot w.r.t. centre of image, in degrees
        try:
            delX = abs(hdr['CD1_1'])
        except:
            delX = abs(hdr['CDELT1'])
        try:
            delY = hdr['CD2_2']
        except:
            delY = hdr['CDELT2']


# shift all co-ordinates and put them in degrees
        x -= hdr['NAXIS1'] / 2
        gx -= hdr['NAXIS1'] / 2
        xmin -= hdr['NAXIS1'] / 2
        xmax -= hdr['NAXIS1'] / 2
        x *= delX
        gx *= delX
        xmin *= delX
        xmax *= delX
        y -= hdr['NAXIS2'] / 2
        gy -= hdr['NAXIS2'] / 2
        ymin -= hdr['NAXIS2'] / 2
        ymax -= hdr['NAXIS2'] / 2
        y *= delY
        gy *= delY
        ymin *= delY
        ymax *= delY
        scale = 1

        dx = diff_xy[:, 0]
        dy = diff_xy[:, 1]

        fig = pyplot.figure(figsize=(12, 6))
        gs = gridspec.GridSpec(100, 100)
        gs.update(hspace=0, wspace=0)
        kwargs = {
            'angles': 'xy',
            'scale_units': 'xy',
            'scale': scale,
            'cmap': cmap,
            'clim': [-180, 180]
        }
        angles = np.degrees(np.arctan2(dy, dx))
        ax = fig.add_subplot(gs[0:100, 0:48])
        cax = ax.quiver(x, y, dx, dy, angles, **kwargs)
        ax.set_xlim((xmin, xmax))
        ax.set_ylim((ymin, ymax))
        ax.set_xlabel("Distance from pointing centre / degrees")
        ax.set_ylabel("Distance from pointing centre / degrees")
        ax.set_title("Source position offsets / arcsec")
        #        cbar = fig.colorbar(cax, orientation='horizontal')

        ax = fig.add_subplot(gs[0:100, 49:97])
        cax = ax.quiver(gx, gy, mdx, mdy, np.degrees(np.arctan2(mdy, mdx)),
                        **kwargs)
        ax.set_xlim((xmin, xmax))
        ax.set_ylim((ymin, ymax))
        ax.set_xlabel("Distance from pointing centre / degrees")
        ax.tick_params(axis='y', labelleft='off')
        ax.set_title("Model position offsets / arcsec")
        #        cbar = fig.colorbar(cax, orientation='vertical')
        # Color bar
        ax2 = fig.add_subplot(gs[0:100, 98:100])
        cbar3 = pyplot.colorbar(cax, cax=ax2, use_gridspec=True)
        cbar3.set_label('Angle CCW from West / degrees')  #,labelpad=-75)
        cbar3.ax.yaxis.set_ticks_position('right')

        outname = os.path.splitext(fname)[0] + '.png'
        #        pyplot.show()
        pyplot.savefig(outname, dpi=200)

    return dxmodel, dymodel
Example #55
0
    def event(self):
        """
        Create a neutrino event and run it through the simulation chain.

        Creates a particle using the ``generator``, produces a signal from that
        event, propagates that signal through the ice according to the
        ``ice_model`` and the ``ray_tracer``, and passes it into the
        ``antennas`` for processing.

        Returns
        -------
        event : Event
            The neutrino event generated which is responsible for the waveforms
            on the antennas.
        triggered : bool, optional
            If the ``triggers`` parameter was specified, contains whether the
            global trigger condition of the detector was met.

        See Also
        --------
        pyrex.Event : Class for storing a tree of `Particle` objects
                      representing an event.
        pyrex.Particle : Class for storing particle attributes.

        """
        event = self.gen.create_event()
        ray_paths = []
        polarizations = []
        for i in range(len(self.antennas)):
            ray_paths.append([])
            polarizations.append([])
        for particle in event:
            logger.info("Processing event for %s", particle)
            for i, ant in enumerate(self.antennas):
                rt = self.ray_tracer(particle.vertex,
                                     ant.position,
                                     ice_model=self.ice)

                # If no path(s) between the points, skip ahead
                if not rt.exists:
                    logger.debug("Ray paths to %s do not exist", ant)
                    continue

                ray_paths[i].extend(rt.solutions)
                for path in rt.solutions:
                    # nu_pol is the signal polarization at the neutrino vertex
                    # It's calculated as the (negative) vector rejection of
                    # path.emitted_direction onto particle.direction, making
                    # epol orthogonal to path.emitted_direction in the same
                    # plane as particle.direction and path.emitted_direction
                    # This is equivalent to the vector triple product
                    # (particle.direction x path.emitted_direction) x
                    # path.emitted_direction
                    # In the case when path.emitted_direction and
                    # particle.direction are equal, just let nu_pol be zeros
                    nu_pol = normalize(
                        np.vdot(path.emitted_direction, particle.direction) *
                        path.emitted_direction - particle.direction)
                    polarizations[i].append(nu_pol)

                    psi = np.arccos(
                        np.vdot(particle.direction, path.emitted_direction))
                    logger.debug("Angle to %s is %f degrees", ant,
                                 np.degrees(psi))
                    # TODO: Support angles larger than pi/2
                    # (low priority since these angles are far from the
                    # cherenkov cone)
                    if psi > np.pi / 2:
                        continue

                    pulse = self.signal_model(
                        times=self.signal_times,
                        particle=particle,
                        viewing_angle=psi,
                        viewing_distance=path.path_length,
                        ice_model=self.ice)

                    ant_pulses, ant_pols = path.propagate(signal=pulse,
                                                          polarization=nu_pol)

                    ant.receive(ant_pulses,
                                direction=path.received_direction,
                                polarization=ant_pols)

        if self.triggers is None:
            triggered = None
        elif isinstance(self.triggers, dict):
            triggered = {
                key: trigger_func(self.antennas)
                for key, trigger_func in self.triggers.items()
            }
        else:
            triggered = self.triggers(self.antennas)

        if self.writer is not None:
            self.writer.add(event=event,
                            triggered=triggered,
                            ray_paths=ray_paths,
                            polarizations=polarizations,
                            events_thrown=self.gen.count - self._gen_count)

        self._gen_count = self.gen.count

        if triggered is None:
            return event
        elif isinstance(self.triggers, dict):
            return event, triggered['global']
        else:
            return event, triggered
Example #56
0
def calibeovsa(vis=None,
               caltype=None,
               interp=None,
               docalib=True,
               doflag=True,
               flagant=None,
               doimage=False,
               imagedir=None,
               antenna=None,
               timerange=None,
               spw=None,
               stokes=None,
               doconcat=False,
               msoutdir=None,
               keep_orig_ms=True):
    '''

    :param vis: EOVSA visibility dataset(s) to be calibrated 
    :param caltype:
    :param interp:
    :param docalib:
    :param qlookimage:
    :param flagant:
    :param stokes:
    :param doconcat:
    :return:
    '''

    if type(vis) == str:
        vis = [vis]

    for idx, f in enumerate(vis):
        if f[-1] == '/':
            vis[idx] = f[:-1]

    for msfile in vis:
        casalog.origin('calibeovsa')
        if not caltype:
            casalog.post(
                "Caltype not provided. Perform reference phase calibration and daily phase calibration."
            )
            caltype = ['refpha', 'phacal',
                       'fluxcal']  ## use this line after the phacal is applied
            # caltype = ['refcal']
        if not os.path.exists(msfile):
            casalog.post("Input visibility does not exist. Aborting...")
            continue
        if msfile.endswith('/'):
            msfile = msfile[:-1]
        if not msfile[-3:] in ['.ms', '.MS']:
            casalog.post(
                "Invalid visibility. Please provide a proper visibility file ending with .ms"
            )
        # if not caltable:
        #    caltable=[os.path.basename(vis).replace('.ms','.'+c) for c in caltype]

        # get band information
        tb.open(msfile + '/SPECTRAL_WINDOW')
        nspw = tb.nrows()
        bdname = tb.getcol('NAME')
        bd_nchan = tb.getcol('NUM_CHAN')
        bd = [int(b[4:]) - 1 for b in bdname]  # band index from 0 to 33
        # nchans = tb.getcol('NUM_CHAN')
        # reffreqs = tb.getcol('REF_FREQUENCY')
        # cenfreqs = np.zeros((nspw))
        tb.close()
        tb.open(msfile + '/ANTENNA')
        nant = tb.nrows()
        antname = tb.getcol('NAME')
        antlist = [str(ll) for ll in range(len(antname) - 1)]
        antennas = ','.join(antlist)
        tb.close()

        # get time stamp, use the beginning of the file
        tb.open(msfile + '/OBSERVATION')
        trs = {'BegTime': [], 'EndTime': []}
        for ll in range(tb.nrows()):
            tim0, tim1 = Time(tb.getcell('TIME_RANGE', ll) / 24 / 3600,
                              format='mjd')
            trs['BegTime'].append(tim0)
            trs['EndTime'].append(tim1)
        tb.close()
        trs['BegTime'] = Time(trs['BegTime'])
        trs['EndTime'] = Time(trs['EndTime'])
        btime = np.min(trs['BegTime'])
        etime = np.max(trs['EndTime'])
        # ms.open(vis)
        # summary = ms.summary()
        # ms.close()
        # btime = Time(summary['BeginTime'], format='mjd')
        # etime = Time(summary['EndTime'], format='mjd')
        ## stop using ms.summary to avoid conflicts with importeovsa
        t_mid = Time((btime.mjd + etime.mjd) / 2., format='mjd')
        print "This scan observed from {} to {} UTC".format(
            btime.iso, etime.iso)
        gaintables = []

        if ('refpha' in caltype) or ('refamp' in caltype) or ('refcal'
                                                              in caltype):
            refcal = ra.sql2refcalX(btime)
            pha = refcal['pha']  # shape is 15 (nant) x 2 (npol) x 34 (nband)
            pha[np.where(refcal['flag'] == 1)] = 0.
            amp = refcal['amp']
            amp[np.where(refcal['flag'] == 1)] = 1.
            t_ref = refcal['timestamp']
            # find the start and end time of the local day when refcal is registered
            try:
                dhr = t_ref.LocalTime.utcoffset().total_seconds() / 60. / 60.
            except:
                dhr = -7.
            bt = Time(np.fix(t_ref.mjd + dhr / 24.) - dhr / 24., format='mjd')
            et = Time(bt.mjd + 1., format='mjd')
            (yr, mon, day) = (bt.datetime.year, bt.datetime.month,
                              bt.datetime.day)
            dirname = caltbdir + str(yr) + str(mon).zfill(2) + '/'
            if not os.path.exists(dirname):
                os.mkdir(dirname)
            # check if there is any ROACH reboot between the reference calibration found and the current data
            t_rbts = db.get_reboot(Time([t_ref, btime]))
            if not t_rbts:
                casalog.post(
                    "Reference calibration is derived from observation at " +
                    t_ref.iso)
                print "Reference calibration is derived from observation at " + t_ref.iso
            else:
                casalog.post(
                    "Oh crap! Roach reboot detected between the reference calibration time "
                    + t_ref.iso + ' and the current observation at ' +
                    btime.iso)
                casalog.post("Aborting...")
                print "Oh crap! Roach reboot detected between the reference calibration time " + t_ref.iso + ' and the current observation at ' + btime.iso
                print "Aborting..."

            para_pha = []
            para_amp = []
            calpha = np.zeros((nspw, 15, 2))
            calamp = np.zeros((nspw, 15, 2))
            for s in range(nspw):
                for n in range(15):
                    for p in range(2):
                        calpha[s, n, p] = pha[n, p, bd[s]]
                        calamp[s, n, p] = amp[n, p, bd[s]]
                        para_pha.append(np.degrees(pha[n, p, bd[s]]))
                        para_amp.append(amp[n, p, bd[s]])

        if 'fluxcal' in caltype:
            calfac = pc.get_calfac(Time(t_mid.iso.split(' ')[0] + 'T23:59:59'))
            t_bp = Time(calfac['timestamp'], format='lv')
            if int(t_mid.mjd) == int(t_bp.mjd):
                accalfac = calfac['accalfac']  # (ant x pol x freq)
                caltb_autoamp = dirname + t_bp.isot[:-4].replace(
                    ':', '').replace('-', '') + '.bandpass'
                if not os.path.exists(caltb_autoamp):
                    bandpass(vis=msfile,
                             caltable=caltb_autoamp,
                             solint='inf',
                             refant='eo01',
                             minblperant=1,
                             minsnr=0,
                             bandtype='B',
                             docallib=False)
                    tb.open(caltb_autoamp, nomodify=False)  # (ant x spw)
                    bd_chanidx = np.hstack([[0], bd_nchan.cumsum()])
                    for ll in range(nspw):
                        cparam = np.zeros((2, bd_nchan[ll], nant))
                        cparam[:, :, :-3] = 1.0 / np.moveaxis(
                            accalfac[:, :, bd_chanidx[ll]:bd_chanidx[ll + 1]],
                            0, 2)
                        tb.putcol('CPARAM', cparam + 0j, ll * nant, nant)
                        paramerr = tb.getcol('PARAMERR', ll * nant, nant)
                        paramerr = paramerr * 0
                        tb.putcol('PARAMERR', paramerr, ll * nant, nant)
                        bpflag = tb.getcol('FLAG', ll * nant, nant)
                        bpant1 = tb.getcol('ANTENNA1', ll * nant, nant)
                        bpflagidx, = np.where(bpant1 >= 13)
                        bpflag[:] = False
                        bpflag[:, :, bpflagidx] = True
                        tb.putcol('FLAG', bpflag, ll * nant, nant)
                        bpsnr = tb.getcol('SNR', ll * nant, nant)
                        bpsnr[:] = 100.0
                        bpsnr[:, :, bpflagidx] = 0.0
                        tb.putcol('SNR', bpsnr, ll * nant, nant)
                    tb.close()
                    msg_prompt = "Scaling calibration is derived for {}.".format(
                        msfile)
                    casalog.post(msg_prompt)
                    print msg_prompt
                gaintables.append(caltb_autoamp)
            else:
                msg_prompt = "Caution: No TPCAL is available on {}. No scaling calibration is derived for {}.".format(
                    t_mid.datetime.strftime('%b %d, %Y'), msfile)
                casalog.post(msg_prompt)
                print msg_prompt

        if ('refpha' in caltype) or ('refcal' in caltype):
            # caltb_pha = os.path.basename(vis).replace('.ms', '.refpha')
            # check if the calibration table already exists
            caltb_pha = dirname + t_ref.isot[:-4].replace(':', '').replace(
                '-', '') + '.refpha'
            if not os.path.exists(caltb_pha):
                gencal(vis=msfile,
                       caltable=caltb_pha,
                       caltype='ph',
                       antenna=antennas,
                       pol='X,Y',
                       spw='0~' + str(nspw - 1),
                       parameter=para_pha)
            gaintables.append(caltb_pha)
        if ('refamp' in caltype) or ('refcal' in caltype):
            # caltb_amp = os.path.basename(vis).replace('.ms', '.refamp')
            caltb_amp = dirname + t_ref.isot[:-4].replace(':', '').replace(
                '-', '') + '.refamp'
            if not os.path.exists(caltb_amp):
                gencal(vis=msfile,
                       caltable=caltb_amp,
                       caltype='amp',
                       antenna=antennas,
                       pol='X,Y',
                       spw='0~' + str(nspw - 1),
                       parameter=para_amp)
            gaintables.append(caltb_amp)

        # calibration for the change of delay center between refcal time and beginning of scan -- hopefully none!
        xml, buf = ch.read_calX(4, t=[t_ref, btime], verbose=False)
        if buf:
            dly_t2 = Time(stf.extract(buf[0], xml['Timestamp']), format='lv')
            dlycen_ns2 = stf.extract(buf[0], xml['Delaycen_ns'])[:15]
            xml, buf = ch.read_calX(4, t=t_ref)
            dly_t1 = Time(stf.extract(buf, xml['Timestamp']), format='lv')
            dlycen_ns1 = stf.extract(buf, xml['Delaycen_ns'])[:15]
            dlycen_ns_diff = dlycen_ns2 - dlycen_ns1
            for n in range(2):
                dlycen_ns_diff[:, n] -= dlycen_ns_diff[0, n]
            print 'Multi-band delay is derived from delay center difference at {} & {}'.format(
                dly_t1.iso, dly_t2.iso)
            # print '=====Delays relative to Ant 14====='
            # for i, dl in enumerate(dlacen_ns_diff[:, 0] - dlacen_ns_diff[13, 0]):
            #     ant = antlist[i]
            #     print 'Ant eo{0:02d}: x {1:.2f} ns & y {2:.2f} ns'.format(int(ant) + 1, dl
            #           dlacen_ns_diff[i, 1] - dlacen_ns_diff[13, 1])
            # caltb_mbd0 = os.path.basename(vis).replace('.ms', '.mbd0')
            caltb_dlycen = dirname + dly_t2.isot[:-4].replace(':', '').replace(
                '-', '') + '.dlycen'
            if not os.path.exists(caltb_dlycen):
                gencal(vis=msfile,
                       caltable=caltb_dlycen,
                       caltype='mbd',
                       pol='X,Y',
                       antenna=antennas,
                       parameter=dlycen_ns_diff.flatten().tolist())
            gaintables.append(caltb_dlycen)

        if 'phacal' in caltype:
            phacals = np.array(
                ra.sql2phacalX([bt, et], neat=True, verbose=False))
            if not phacals.any() or len(phacals) == 0:
                print "Found no phacal records in SQL database, will skip phase calibration"
            else:
                # first generate all phacal calibration tables if not already exist
                t_phas = Time([phacal['t_pha'] for phacal in phacals])
                # sort the array in ascending order by t_pha
                sinds = t_phas.mjd.argsort()
                t_phas = t_phas[sinds]
                phacals = phacals[sinds]
                caltbs_phambd = []
                for i, phacal in enumerate(phacals):
                    # filter out phase cals with reference time stamp >30 min away from the provided refcal time
                    if (phacal['t_ref'].jd -
                            refcal['timestamp'].jd) > 30. / 1440.:
                        del phacals[i]
                        del t_phas[i]
                        continue
                    else:
                        t_pha = phacal['t_pha']
                        phambd_ns = phacal['pslope']
                        for n in range(2):
                            phambd_ns[:, n] -= phambd_ns[0, n]
                        # set all flagged values to be zero
                        phambd_ns[np.where(phacal['flag'] == 1)] = 0.
                        caltb_phambd = dirname + t_pha.isot[:-4].replace(
                            ':', '').replace('-', '') + '.phambd'
                        caltbs_phambd.append(caltb_phambd)
                        if not os.path.exists(caltb_phambd):
                            gencal(vis=msfile,
                                   caltable=caltb_phambd,
                                   caltype='mbd',
                                   pol='X,Y',
                                   antenna=antennas,
                                   parameter=phambd_ns.flatten().tolist())

                # now decides which table to apply depending on the interpolation method ("neatest" or "linear")
                if interp == 'nearest':
                    tbind = np.argmin(np.abs(t_phas.mjd - t_mid.mjd))
                    dt = np.min(np.abs(t_phas.mjd - t_mid.mjd)) * 24.
                    print "Selected nearest phase calibration table at " + t_phas[
                        tbind].iso
                    gaintables.append(caltbs_phambd[tbind])
                if interp == 'linear':
                    # bphacal = ra.sql2phacalX(btime)
                    # ephacal = ra.sql2phacalX(etime,reverse=True)
                    bt_ind, = np.where(t_phas.mjd < btime.mjd)
                    et_ind, = np.where(t_phas.mjd > etime.mjd)
                    if len(bt_ind) == 0 and len(et_ind) == 0:
                        print "No phacal found before or after the ms data within the day of observation"
                        print "Skipping daily phase calibration"
                    elif len(bt_ind) > 0 and len(et_ind) == 0:
                        gaintables.append(caltbs_phambd[bt_ind[-1]])
                    elif len(bt_ind) == 0 and len(et_ind) > 0:
                        gaintables.append(caltbs_phambd[et_ind[0]])
                    elif len(bt_ind) > 0 and len(et_ind) > 0:
                        bphacal = phacals[bt_ind[-1]]
                        ephacal = phacals[et_ind[0]]
                        # generate a new table interpolating between two daily phase calibrations
                        t_pha_mean = Time(np.mean(
                            [bphacal['t_pha'].mjd, ephacal['t_pha'].mjd]),
                                          format='mjd')
                        phambd_ns = (bphacal['pslope'] +
                                     ephacal['pslope']) / 2.
                        for n in range(2):
                            phambd_ns[:, n] -= phambd_ns[0, n]
                        # set all flagged values to be zero
                        phambd_ns[np.where(bphacal['flag'] == 1)] = 0.
                        phambd_ns[np.where(ephacal['flag'] == 1)] = 0.
                        caltb_phambd_interp = dirname + t_pha_mean.isot[:-4].replace(
                            ':', '').replace('-', '') + '.phambd'
                        if not os.path.exists(caltb_phambd_interp):
                            gencal(vis=msfile,
                                   caltable=caltb_phambd_interp,
                                   caltype='mbd',
                                   pol='X,Y',
                                   antenna=antennas,
                                   parameter=phambd_ns.flatten().tolist())
                        print "Using phase calibration table interpolated between records at " + bphacal[
                            't_pha'].iso + ' and ' + ephacal['t_pha'].iso
                        gaintables.append(caltb_phambd_interp)

        if docalib:
            clearcal(msfile)
            applycal(vis=msfile,
                     gaintable=gaintables,
                     applymode='calflag',
                     calwt=False)
            # delete the interpolated phase calibration table
            try:
                caltb_phambd_interp
            except:
                pass
            else:
                if os.path.exists(caltb_phambd_interp):
                    shutil.rmtree(caltb_phambd_interp)
        if doflag:
            if flagant:
                try:
                    flagdata(vis=msfile, antenna=flagant)
                except:
                    print "Something wrong with flagant. Abort..."

        if doimage:
            from matplotlib import pyplot as plt
            from suncasa.utils import helioimage2fits as hf
            from sunpy import map as smap

            if not antenna:
                antenna = '0~12'
            if not stokes:
                stokes = 'XX'
            if not timerange:
                timerange = ''
            if not spw:
                spw = '1~3'
            if not imagedir:
                imagedir = '.'
            #(yr, mon, day) = (bt.datetime.year, bt.datetime.month, bt.datetime.day)
            #dirname = imagedir + str(yr) + '/' + str(mon).zfill(2) + '/' + str(day).zfill(2) + '/'
            #if not os.path.exists(dirname):
            #    os.makedirs(dirname)
            bds = [spw]
            nbd = len(bds)
            imgs = []
            for bd in bds:
                if '~' in bd:
                    bdstr = bd.replace('~', '-')
                else:
                    bdstr = str(bd).zfill(2)
                imname = imagedir + '/' + os.path.basename(msfile).replace(
                    '.ms', '.bd' + bdstr)
                print 'Cleaning image: ' + imname
                try:
                    clean(vis=msfile,
                          imagename=imname,
                          antenna=antenna,
                          spw=bd,
                          timerange=timerange,
                          imsize=[512],
                          cell=['5.0arcsec'],
                          stokes=stokes,
                          niter=500)
                except:
                    print 'clean not successfull for band ' + str(bd)
                else:
                    imgs.append(imname + '.image')
                junks = ['.flux', '.mask', '.model', '.psf', '.residual']
                for junk in junks:
                    if os.path.exists(imname + junk):
                        shutil.rmtree(imname + junk)

            tranges = [btime.iso + '~' + etime.iso] * nbd
            fitsfiles = [img.replace('.image', '.fits') for img in imgs]
            hf.imreg(vis=msfile,
                     timerange=tranges,
                     imagefile=imgs,
                     fitsfile=fitsfiles,
                     usephacenter=False)
            plt.figure(figsize=(6, 6))
            for i, fitsfile in enumerate(fitsfiles):
                plt.subplot(1, nbd, i + 1)
                eomap = smap.Map(fitsfile)
                sz = eomap.data.shape
                if len(sz) == 4:
                    eomap.data = eomap.data.reshape((sz[2], sz[3]))
                eomap.plot_settings['cmap'] = plt.get_cmap('jet')
                eomap.plot()
                eomap.draw_limb()
                eomap.draw_grid()

            plt.show()

    if doconcat:
        if len(vis) > 1:
            from suncasa.eovsa import concateovsa as ce
            msname = os.path.basename(vis[0])
            msname = msname.split('.')[0] + '_concat.ms'
            visprefix = msoutdir + '/'
            ce.concateovsa(msname,
                           vis,
                           visprefix,
                           doclearcal=False,
                           keep_orig_ms=keep_orig_ms,
                           cols2rm=["MODEL_DATA", "CORRECTED_DATA"])
            return [visprefix + msname]
    else:
        return vis
Example #57
0
def vector_angle(v1, v2):
    v1_u = unit_vector(v1)
    v2_u = unit_vector(v2)
    tmp1 = np.degrees(np.arctan2(v1_u[1],v1_u[0]))#np.degrees(np.arctan((v1_u[1])/(v1_u[0])))
    tmp2 = np.degrees(np.arctan2(v2_u[1],v2_u[0]))#np.degrees(np.arctan(v2_u[1]/v2_u[0]))
    return tmp2-tmp1 #, tmp1-tmp2
Example #58
0
            file.write(str(r) + ': E=' + str(E(cor_arr)) + ' E_2=' + str(E_2(cor_arr)) + ' D=' + str(D(cor_arr)) + '\n')
        vares = [mix_norm_dist(size) for i in range(0, 1000)]
        vars_x = [[r[0] for r in var] for var in vares]
        vars_y = [[r[1] for r in var] for var in vares]
        cor_arr = [cor_coef[coef](vars_x[i], vars_y[i]) for i in range(0, len(vars_x))]
        file.write('for mixin: E=' + str(E(cor_arr)) + ' E_2=' + str(E_2(cor_arr)) + ' D=' + str(D(cor_arr)) + '\n')
    fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 12))
    ax = axes.flatten()
    for i in range(0, 4):
        if i != 3:
            points = normal_dist(ro[i], size)
            ax[i].set_title('n = ' + str(size) + ', r=' + str(ro[i]))
        else:
            points = mix_norm_dist(size)
            ax[i].set_title('n = ' + str(size) + ', mix')
        nstd = 2
        r_x = [point[0] for point in points]
        r_y = [point[1] for point in points]
        ax[i].plot(r_x, r_y, 'bo', ms=4)
        cov = np.cov(r_x, r_y)
        vals, vecs = eigsorted(cov)
        theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
        w, h = 2 * nstd * np.sqrt(vals)
        ell = Ellipse(xy=(np.mean(r_x), np.mean(r_y)),
                      width=w, height=h,
                      angle=theta, color='black')
        ell.set_facecolor('none')
        ax[i].add_artist(ell)
        plt.tight_layout()
        fig.savefig('ellipse n=' + str(size))
Example #59
0
def update_particles(particles, cam, velocity, angular_velocity, world,
                     WIN_RF1, WIN_World):
    raw_input()
    print 'update: ' + str(angular_velocity)
    cv2.waitKey(4)
    num_particles = len(particles)
    for p in particles:
        # calculates new orientation

        curr_angle = add_to_angular_v2(np.degrees(p.getTheta()), angular_velocity)
        print 'theta_rad: ' + str(p.getTheta())
        print 'theta_deg: ' + str(np.degrees(p.getTheta()))
        print 'cur_ang_deg: ' + str(np.degrees(curr_angle))
        if velocity > 0.0:
            [x, y] = move_vector(p, velocity)
            particle.move_particle(p, x, y, curr_angle)
        else:
            particle.move_particle(p, 0.0, 0.0, curr_angle)
            print 'cur_ang_rad: ' + str(curr_angle)
    if velocity != 0.0:
        particle.add_uncertainty(particles, 12, 15)
    if velocity == 0.0 and angular_velocity != 0.0:
        particle.add_uncertainty(particles, 0, 15)

    # Fetch next frame
    colour, distorted = cam.get_colour()

    # Detect objects
    objectType, measured_distance, measured_angle, colourProb = cam.get_object(
        colour)

    if objectType != 'none':
        obs_landmark = ret_landmark(colourProb, objectType)
        observed_obj = [objectType, measured_distance, measured_angle,
                        obs_landmark]


        list_of_particles = weight_particles(particles,
                                             np.degrees(measured_angle),
                                             measured_distance, obs_landmark)


        particles = []
        for count in range(0, num_particles):
            rando = np.random.uniform(0.0,1.0)  # np.random.normal(0.0, 1.0, 1)
            # dicto = {'i': 500,
            #          'n': 2}
            p = when_in_range(list_of_particles,
                              0,
                              num_particles,
                              rando)
            particles.append(
                particle.Particle(p.getX(), p.getY(), p.getTheta(),
                                  1.0 / num_particles))
        print 'list_of_particles: ' + str(list_of_particles)
        print 'particles: ' + str(particles)

        particle.add_uncertainty(particles, 5, 2)

        # new random particles added
        #for c in range(0, int(math.ceil(num_particles * 0.05))):
        #    p = particle.Particle(500.0 * np.random.ranf() - 100,
        #                          500.0 * np.random.ranf() - 100,
        #                          2.0 * np.pi * np.random.ranf() - np.pi, 0.0)

        #    particles.append(p)

        # Draw detected pattern
        cam.draw_object(colour)
    else:
        observed_obj = [None, None, None, None]
        # No observation - reset weights to uniform distribution
        for p in particles:
            p.setWeight(1.0 / num_particles)

        particle.add_uncertainty(particles, 5, 2)

    # est_pose = particle.estimate_pose(particles)  # The estimate of the robots current pose
    # return [est_pose, observed_obj]

    est_pose = particle.estimate_pose(
        particles)  # The estimate of the robots current pose

    print 'Updated pose: ' + str([est_pose.getX(), est_pose.getY()])
    draw_world(est_pose, particles, world)
    # Show frame
    cv2.imshow(WIN_RF1, colour)
    # Show world
    cv2.imshow(WIN_World, world)
    return {'est_pos': est_pose,
            'obs_obj': observed_obj,
            'particles': particles}
Example #60
0
def gt_callback(data):
    global_ground_truth = data.pose.pose

    gt_pose = global_ground_truth
    # Transform ground truth in body frame wrt. world frame to body frame wrt. landing platform

    ##########
    # 0 -> 2 #
    ##########

    # Position
    p_x = gt_pose.position.x
    p_y = gt_pose.position.y
    p_z = gt_pose.position.z

    # Translation of the world frame to body frame wrt. the world frame
    d_0_2 = np.array([p_x, p_y, p_z])

    # Orientation
    q_x = gt_pose.orientation.x
    q_y = gt_pose.orientation.y
    q_z = gt_pose.orientation.z
    q_w = gt_pose.orientation.w

    # Rotation of the body frame wrt. the world frame
    r_0_2 = R.from_quat([q_x, q_y, q_z, q_w])
    r_2_0 = r_0_2.inv()

    ##########
    # 0 -> 1 #
    ##########

    # Translation of the world frame to landing frame wrt. the world frame
    offset_x = 1.0
    offset_y = 0.0
    offset_z = 0.495
    d_0_1 = np.array([offset_x, offset_y, offset_z])

    # Rotation of the world frame to landing frame wrt. the world frame
    # r_0_1 = np.identity(3) # No rotation, only translation
    r_0_1 = np.identity(3)  # np.linalg.inv(r_0_1)

    ##########
    # 2 -> 1 #
    ##########
    # Transformation of the body frame to landing frame wrt. the body frame

    # Translation of the landing frame to bdy frame wrt. the landing frame
    d_1_2 = d_0_2 - d_0_1

    # Rotation of the body frame to landing frame wrt. the body frame
    r_2_1 = r_2_0

    yaw = r_2_1.as_euler('xyz')[2]

    r_2_1_yaw = R.from_euler('z', yaw)

    # Translation of the body frame to landing frame wrt. the body frame
    d_2_1 = -r_2_1_yaw.apply(d_1_2)

    # Translation of the landing frame to body frame wrt. the body frame
    # This is more intuitive for the controller
    d_2_1_inv = -d_2_1

    local_ground_truth = np.concatenate((d_2_1_inv, r_2_1.as_euler('xyz')))

    # Transform to get the correct yaw
    yaw = -np.degrees(local_ground_truth[5]) - 90
    if yaw < -180:
        gt_yaw = 360 + yaw
    else:
        gt_yaw = yaw
    local_ground_truth[5] = gt_yaw

    # local_ground_truth

    ground_truth_msg = Twist()
    ground_truth_msg.linear.x = local_ground_truth[0]
    ground_truth_msg.linear.y = local_ground_truth[1]
    ground_truth_msg.linear.z = local_ground_truth[2]
    ground_truth_msg.angular.z = local_ground_truth[5]

    pub_ground_truth.publish(ground_truth_msg)