Ejemplo n.º 1
0
    def test_csv_to_array(self):
        lat = csvi.csv_to_array(self.dummy_f, 'LATITUDE', float)
        lon = csvi.csv_to_array(self.dummy_f, 'LONGITUDE', float)
        wal = csvi.csv_to_array(self.dummy_f, 'WALLS', str)

        assert scipy.alltrue(self.LATITUDE == lat)
        assert scipy.alltrue(self.LONGITUDE == lon)
        assert scipy.alltrue(self.WALLS == wal)
Ejemplo n.º 2
0
 def test_pix_counts_basic_cases(self) :
     pixels = dirty_map.pixel_counts(self.data, self.ra_inds, self.dec_inds,
                                   self.pix_counts)
     self.assertTrue(sp.alltrue(self.pix_counts[pixels.index((8,1))] == 2))
     self.assertTrue(sp.alltrue(self.pix_counts[pixels.index((5,2))] == 3))
     self.assertTrue(sp.alltrue(self.pix_counts[pixels.index((9,5))] == 1))
     self.assertTrue(sp.alltrue(self.pix_counts[pixels.index((7,3))] == 1))
     self.assertTrue(sp.alltrue(self.pix_counts[pixels.index((8,4))] == 1))
Ejemplo n.º 3
0
 def test_csv_to_array(self):
     lat = csvi.csv_to_array(self.dummy_f, 'LATITUDE', float)        
     lon = csvi.csv_to_array(self.dummy_f, 'LONGITUDE', float)        
     wal = csvi.csv_to_array(self.dummy_f, 'WALLS', str)
     
     assert scipy.alltrue(self.LATITUDE == lat)
     assert scipy.alltrue(self.LONGITUDE == lon)
     assert scipy.alltrue(self.WALLS == wal)
Ejemplo n.º 4
0
 def check_ortho_norm(self, polys, window=1., axis=-1):
     # Always check that they are all orthonormal.
     n = polys.shape[0]
     for ii in range(n):
         for jj in range(n):
             prod = sp.sum(window * polys[ii, :] * polys[jj, :], axis)
             if ii == jj:
                 self.assertTrue(sp.alltrue(abs(prod - 1.) < 1e-8))
             else:
                 self.assertTrue(sp.alltrue(abs(prod) < 1e-8))
Ejemplo n.º 5
0
 def check_ortho_norm(self, polys, window=1., axis=-1):
     # Always check that they are all orthonormal.
     n = polys.shape[0]
     for ii in range(n):
         for jj in range(n):
             prod = sp.sum(window * polys[ii,:] * polys[jj,:], axis)
             if ii == jj:
                 self.assertTrue(sp.alltrue(abs(prod - 1.) < 1e-8))
             else:
                 self.assertTrue(sp.alltrue(abs(prod) < 1e-8))
Ejemplo n.º 6
0
    def test_quick_convert_csv_to_arrays_lats_longs(self):
        lon = csvi.quick_convert_csv_to_arrays(self.dummy_f, LONGITUDE=float)
        assert lon.keys()[0] == 'LONGITUDE'
        assert len(lon.keys()) == 1
        assert scipy.alltrue(self.LONGITUDE == lon['LONGITUDE'])

        all_conversions = {'LONGITUDE': float, 'LATITUDE': float, 'WALLS': str}

        all = csvi.quick_convert_csv_to_arrays(self.dummy_f, **all_conversions)
        assert len(all.keys()) == 3
        assert scipy.alltrue(self.LATITUDE == all['LATITUDE'])
        assert scipy.alltrue(self.LONGITUDE == all['LONGITUDE'])
        assert scipy.alltrue(self.WALLS == all['WALLS'])
Ejemplo n.º 7
0
    def test_quick_convert_csv_to_arrays_lats_longs(self):
        lon = csvi.quick_convert_csv_to_arrays(self.dummy_f, LONGITUDE=float)
        assert lon.keys()[0] == 'LONGITUDE'
        assert len(lon.keys()) == 1
        assert scipy.alltrue(self.LONGITUDE == lon['LONGITUDE'])
        
        all_conversions = {'LONGITUDE': float,
                           'LATITUDE': float,
                           'WALLS': str}

        all = csvi.quick_convert_csv_to_arrays(self.dummy_f, **all_conversions)
        assert len(all.keys()) == 3
        assert scipy.alltrue(self.LATITUDE == all['LATITUDE'])
        assert scipy.alltrue(self.LONGITUDE == all['LONGITUDE'])
        assert scipy.alltrue(self.WALLS == all['WALLS'])
Ejemplo n.º 8
0
 def test_correlated_scatter(self):
     n = 50
     r = (sp.arange(n, dtype=float) + 10.0 * n) / 10.0 * n
     data = sp.sin(sp.arange(n)) * r
     amp = 25.0
     theory = data / amp
     # Generate correlated matrix.
     C = random.rand(n, n)  # [0, 1)
     # Raise to high power to make values near 1 rare.
     C = (C**10) * 0.2
     C = (C + C.T) / 2.0
     C += sp.identity(n)
     C *= r[:, None] / 2.0
     C *= r[None, :] / 2.0
     # Generate random numbers in diagonal frame.
     h, R = linalg.eigh(C)
     self.assertTrue(sp.alltrue(h > 0))
     rand_vals = random.normal(size=n) * sp.sqrt(h)
     # Rotate back.
     data += sp.dot(R.T, rand_vals)
     out = utils.ampfit(data, C, theory)
     a, s = out['amp'], out['error']
     self.assertTrue(sp.allclose(a, amp, atol=5.0 * s, rtol=0))
     # Expect the next line to fail 1/100 trials.
     self.assertFalse(sp.allclose(a, amp, atol=0.01 * s, rtol=0))
Ejemplo n.º 9
0
 def test_merge_rts_no_intersect(self):
     """ Test merging two rts without intersection."""
     
     d1=[1]*int(spy.math.pow(2,10))
     d2=[2]*int(spy.math.pow(2,11))
     
     st1=datetime.datetime(year=1990,month=2,day=3,hour=11, minute=15)
     st2=datetime.datetime(year=1990,month=5,day=3,hour=11, minute=15)
     
     ts1=rts(d1,st1,time_interval(hours=1))
     ts2=rts(d2,st2,time_interval(hours=1))
     nt=merge(ts1,ts2)
     
     self.assertEqual(nt.start,ts1.start)
     self.assertEqual(nt.end,ts2.end)
     
     total_n=number_intervals(ts1.start,ts2.end,ts1.interval)+1
     
     self.assertEqual(len(nt.data),total_n)
     s1=nt.index_after(ts1.end)
     s2=nt.index_after(ts2.start)
     
     self.assert_(spy.alltrue(spy.isnan(nt.data[s1+1:s2])))
     self.assert_(spy.allclose(nt.data[0:s1+1],ts1.data))
     self.assert_(spy.allclose(nt.data[s2:len(nt.data)],ts2.data))
 def test_off_map(self) :
     Data = self.blocks[0]
     Data.calc_freq()
     map = self.map
     map[:,:,:] = 0.0
     Data.data[:,:,:,:] = 0.0
     # Rig the pointing but put one off the map.
     def rigged_pointing() :
         Data.ra = map.get_axis('ra')[range(10)]
         Data.dec = map.get_axis('dec')[range(10)]
         Data.ra[3] = Data.ra[3] - 8.0
     Data.calc_pointing = rigged_pointing
     smd.sub_map(Data, map)
     self.assertTrue(sp.alltrue(ma.getmaskarray(Data.data[3,:,:,:])))
     self.assertTrue(sp.alltrue(sp.logical_not(
                 ma.getmaskarray((Data.data[[0,1,2,4,5,6,7,8,9],:,:,:])))))
Ejemplo n.º 11
0
    def numpyAssertEqual(self, a1, a2):
        """Test for equality of array fields a1 and a2."""

        self.assertEqual(type(a1), type(a2))
        self.assertEqual(a1.shape, a2.shape)
        self.assertEqual(a1.dtype, a2.dtype)
        self.assertTrue(alltrue(equal(a1.ravel(), a2.ravel())))
    def validate_vulnerability_set(self):
        """The vulnerability set must provide curves for all sites in this
        object. A Vulnerability_Function needs to be defined to match each
        attributes['STRUCTURE_CLASSIFICATION'] identifier.

        Raises a RuntimeError if it cannot find a match.
        """
        if self.vulnerability_set is None:
            raise RuntimeError('Vulnerability Set must not be None')

        # Function IDs for the vulnerability set
        curves_defined = self.vulnerability_set.vulnerability_functions.keys()

        # Sites STRUCTURE_CLASSIFICATIONs
        structure_classifications = self.attributes['STRUCTURE_CLASSIFICATION']
        structure_classifications = unique(structure_classifications)

        # Are there any unique structure classifications that are not in the
        # curves defined?
        in_curves_defined = in1d(structure_classifications, curves_defined)
        if not alltrue(in_curves_defined):
            msg = 'The following structures do not have a vulnerability curve: '
            msg += '%s' % structure_classifications[where(
                in_curves_defined == False)]
            raise RuntimeError(msg)
Ejemplo n.º 13
0
    def test_mixed_spins(self):
        sigma = common.constants.pauli_matrices
        sys = SpinMatrix(N=2, spin_number=[2, 3], sparse_type=sp.sparse.csr_matrix)
        rand_mat_3 = sp.matrix([[0,  1, 2],
                                [2, -1, 2],
                                [0,  0, 1]])
        sys.add_kron_term([0, 1], [sigma[2], rand_mat_3]) # \krons(sigma_y, rand_mat_3)
        sys.add_kron_term([0], [sigma[3]]) # add an on-site potential

        print('Mixed spins (1/2 and 1): ')
        print(sys.matrix.todense())
        print()

        self.assertEqual(
            sp.alltrue(
                sys.matrix ==
                sp.array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.-1.j, 0.-2.j],
                          [ 0.+0.j, 1.+0.j, 0.+0.j, 0.-2.j, 0.+1.j, 0.-2.j],
                          [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j, 0.-1.j],
                          [ 0.+0.j, 0.+1.j, 0.+2.j,-1.+0.j, 0.+0.j, 0.+0.j],
                          [ 0.+2.j, 0.-1.j, 0.+2.j, 0.+0.j,-1.+0.j, 0.+0.j],
                          [ 0.+0.j, 0.+0.j, 0.+1.j, 0.+0.j, 0.+0.j,-1.+0.j]])
            ),
            True
        )
        self.assertEqual(sys.confs, ['00', '01', '02', '10', '11', '12'])
Ejemplo n.º 14
0
    def test_anisotropy_couplings_larger_matrix(self):
        sigma = common.constants.pauli_matrices
        sys = SpinMatrix(N=4, spin_number=2, sparse_type=sp.sparse.bsr_matrix)
        sys.add_kron_term([0, 2], [sigma[1], sigma[3]]) # \krons(sigma_x, sigma_z)
        sys.add_kron_term([0], [sigma[3]])

        print('Anistropy coupling with 4 1/2 spins: ')
        print(sys.matrix.todense())
        print()

        self.assertEqual(
            sp.alltrue(
                sys.matrix ==
                sp.array([[ 1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
                          [ 0., 1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
                          [ 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,-1., 0., 0., 0., 0., 0.],
                          [ 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,-1., 0., 0., 0., 0.],
                          [ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
                          [ 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
                          [ 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,-1., 0.],
                          [ 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,-1.],
                          [ 1., 0., 0., 0., 0., 0., 0., 0.,-1., 0., 0., 0., 0., 0., 0., 0.],
                          [ 0., 1., 0., 0., 0., 0., 0., 0., 0.,-1., 0., 0., 0., 0., 0., 0.],
                          [ 0., 0.,-1., 0., 0., 0., 0., 0., 0., 0.,-1., 0., 0., 0., 0., 0.],
                          [ 0., 0., 0.,-1., 0., 0., 0., 0., 0., 0., 0.,-1., 0., 0., 0., 0.],
                          [ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,-1., 0., 0., 0.],
                          [ 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,-1., 0., 0.],
                          [ 0., 0., 0., 0., 0., 0.,-1., 0., 0., 0., 0., 0., 0., 0.,-1., 0.],
                          [ 0., 0., 0., 0., 0., 0., 0.,-1., 0., 0., 0., 0., 0., 0., 0.,-1.]])
            ),
            True
        )
Ejemplo n.º 15
0
    def circle(self) :
        self.BlocksToWrite = copy.deepcopy(self.Blocks)
        self.Writer = fitsGBT.Writer(self.BlocksToWrite, 0)
        self.Writer.write('temp.fits')
        self.newReader = fitsGBT.Reader('temp.fits', 0)
        self.newBlocks = self.newReader.read()

        self.assertEqual(len(self.Blocks), len(self.newBlocks))
        for ii in range(len(self.newBlocks)) :
            OldDB = self.Blocks[ii]
            NewDB = self.newBlocks[ii]
            for jj in range(4) :
                self.assertEqual(OldDB.dims[ii], NewDB.dims[ii])
            self.assertTrue(ma.allclose(OldDB.data, NewDB.data))
            for field, axis in fitsGBT.fields_and_axes.iteritems() :
                self.assertEqual(axis, OldDB.field_axes[field])
                self.assertEqual(axis, NewDB.field_axes[field])
            for field in ['SCAN', 'OBJECT', 'TIMESTAMP',
                          'OBSERVER', 'CRPIX1', 'CDELT1'] :
                self.assertEqual(OldDB.field[field], NewDB.field[field])
            for field in ['CRVAL1', 'BANDWID', 'RESTFREQ', 'DURATION'] :
                self.assertAlmostEqual(OldDB.field[field], NewDB.field[field])
            for field in ['LST', 'ELEVATIO', 'AZIMUTH', 'RA', 'DEC',
                          'OBSFREQ', 'CRVAL2', 'CRVAL3', 'EXPOSURE'] :
                self.assertTrue(sp.allclose(OldDB.field[field], 
                                            NewDB.field[field]))
            for field in ['DATE-OBS'] :
                self.assertTrue(sp.alltrue(sp.equal(OldDB.field[field], 
                                            NewDB.field[field])))
            for field in ['CRVAL4', 'CAL'] :
                self.assertTrue(all(OldDB.field[field] == NewDB.field[field]))
Ejemplo n.º 16
0
    def numpyAssertEqual(self, a1, a2):
        """Test for equality of array fields a1 and a2."""

        self.assertEqual(type(a1), type(a2))
        self.assertEqual(a1.shape, a2.shape)
        self.assertEqual(a1.dtype, a2.dtype)
        self.assertTrue(alltrue(equal(a1.ravel(), a2.ravel())))
Ejemplo n.º 17
0
 def test_correlated_scatter(self) :
     n = 50
     r = (sp.arange(n, dtype=float) + 10.0*n)/10.0*n
     data = sp.sin(sp.arange(n)) * r 
     amp = 25.0
     theory = data/amp
     # Generate correlated matrix.
     C = random.rand(n, n) # [0, 1) 
     # Raise to high power to make values near 1 rare.
     C = (C**10) * 0.2
     C = (C + C.T)/2.0
     C += sp.identity(n)
     C *= r[:, None]/2.0
     C *= r[None, :]/2.0
     # Generate random numbers in diagonal frame.
     h, R = linalg.eigh(C)
     self.assertTrue(sp.alltrue(h>0))
     rand_vals = random.normal(size=n)*sp.sqrt(h)
     # Rotate back.
     data += sp.dot(R.T, rand_vals)
     out = utils.ampfit(data, C, theory)
     a, s = out['amp'], out['error']
     self.assertTrue(sp.allclose(a, amp, atol=5.0*s, rtol=0))
     # Expect the next line to fail 1/100 trials.
     self.assertFalse(sp.allclose(a, amp, atol=0.01*s, rtol=0))
Ejemplo n.º 18
0
def last_index(X):
    timestamps_infinite = sp.all(~sp.isfinite(X),
                                 axis=1)  # Are there NaNs padded after the TS?
    if sp.alltrue(~timestamps_infinite):
        idx = X.shape[0]
    else:  # Yes? then remove them
        idx = sp.nonzero(timestamps_infinite)[0][0]
    return idx
Ejemplo n.º 19
0
 def test_off_map(self) :
     pixels = dirty_map.pixel_counts(self.data, self.ra_inds, self.dec_inds,
                                   self.pix_counts, map_shape=(7,7))
     self.assertTrue(not (8,1) in pixels)
     self.assertTrue(not (9,5) in pixels)
     self.assertTrue(not (8,4) in pixels)
     self.assertTrue(not (7,3) in pixels)
     self.assertTrue(sp.alltrue(self.pix_counts[pixels.index((5,2))] == 3))
Ejemplo n.º 20
0
 def test_two_matrices_xx(self):
     """ kron(sigma_x, sigma_x) """
     sigma = common.constants.pauli_matrices
     elems = krons_by_search(matSeq=[sigma[1], sigma[1]])
     matrix = sp.array(to_sparse(elems, scipy.sparse.csr_matrix).todense())
     print(matrix)
     self.assertEqual(
         sp.alltrue(matrix == sp.array([[0, 0, 0, 1], [0, 0, 1, 0],
                                        [0, 1, 0, 0], [1, 0, 0, 0]])), True)
Ejemplo n.º 21
0
 def testGenerateIndex(self):
     i = 0
     n = 1000
     index = generateIndex(i, n)
     self.assertEqual(index.dimensions, INDEX)
     self.assertEqual(index.longname, "Index")
     self.assertEqual(index.shortname, "i")
     self.assertEqual(index.unit, 1)
     self.assertFalse(index.error)
     self.assertTrue(scipy.alltrue(index.data == scipy.arange(0, n)))
Ejemplo n.º 22
0
    def test_circle_condition(self):
        assert False
        for n in (10, 30, 100, 300, 1000, 3000):
            x, y = self.rs.uniform(0, 100, size=(2, n))
            tri = dlny.Triangulation(x, y)

            i = tri.triangle_nodes[:,0]
            r2 = ((x[i] - tri.circumcenters[:,0])**2
                + (y[i] - tri.circumcenters[:,1])**2)
            alldist2 = (sp.subtract.outer(x, tri.circumcenters[:,0])**2
                      + sp.subtract.outer(y, tri.circumcenters[:,1])**2)
            assert sp.alltrue(r2 <= alldist2)
Ejemplo n.º 23
0
    def test_off_map(self):
        Data = self.blocks[0]
        Data.calc_freq()
        map = self.map
        map[:, :, :] = 0.0
        Data.data[:, :, :, :] = 0.0

        # Rig the pointing but put one off the map.
        def rigged_pointing():
            Data.ra = map.get_axis('ra')[range(10)]
            Data.dec = map.get_axis('dec')[range(10)]
            Data.ra[3] = Data.ra[3] - 8.0

        Data.calc_pointing = rigged_pointing
        smd.sub_map(Data, map)
        self.assertTrue(sp.alltrue(ma.getmaskarray(Data.data[3, :, :, :])))
        self.assertTrue(
            sp.alltrue(
                sp.logical_not(
                    ma.getmaskarray(
                        (Data.data[[0, 1, 2, 4, 5, 6, 7, 8, 9], :, :, :])))))
Ejemplo n.º 24
0
def Example2():
    a = sp.array([[-1, 2, 3], [4, -5, 0]])
    print a
    #scipy.any(a): return True if any element of a is True
    #scipy.all(a): return True if all elements of a are True
    #scipy.alltrue(a, axis): perform logical_and along given axis of a
    print sp.any(a), sp.all(a), sp.alltrue(a, axis=0), sp.alltrue(a, axis=1)
    #scipy.append(a, values, axis): append values to a along specified axis
    print sp.append(a, [[7, 8, 9], [10, 11, 12]]), sp.append(
        a, [[7, 8, 9], [10, 11, 12]],
        axis=0), sp.append(a, [[7, 8, 9], [10, 11, 12]], axis=1)
    #scipy.concatenate((a1, a2, ...), axis): concatenate tuple of arrays along specified axis
    print sp.concatenate((a, [[7, 8, 9], [10, 11, 12]])), sp.concatenate(
        (a, [[7, 8, 9], [10, 11, 12]]), axis=0), sp.concatenate(
            (a, [[7, 8, 9], [10, 11, 12]]), axis=1)
    #scipy.min(a, axis=None), scipy.max(a, axis=None): get min/max values of a along specified axis (global min/max if axis=None)
    print np.min(a), np.max(a)
    #scipy.argmin(a, axis=None), scipy.argmax(a, axis=None): get indices of min/max of a along specified axis (global min/max if axis=None)
    print sp.argmin(a), sp.argmin(a, axis=0), sp.argmin(a, axis=1)
    print sp.argmax(a), sp.argmax(a, axis=0), sp.argmax(a, axis=1)
    #scipy.reshape(a, newshape): reshape a to newshape (must conserve total number of elements)
    b = sp.reshape(a, (3, 2))
    print b
    #scipy.matrix(a): create matrix from 2D array a (matrices implement matrix multiplication rather than element-wise multiplication)
    m = sp.matrix(a)
    print m
    #scipy.histogram, scipy.histogram2d, scipy.histogramdd: 1-dimensional, 2-dimensional, and d-dimensional histograms, respectively
    print sp.histogram([1, 2, 1], bins=[0, 1, 2, 3])
    print sp.histogram([[1, 2, 1], [1, 0, 1]], bins=[0, 1, 2, 3])
    #scipy.round(a, decimals=0): round elements of matrix a to specified number of decimals
    c = sp.array([[1.1, 2.2, 3.3], [4.5, 6.6, 6.7]])
    print np.round(c, decimals=0)
    #scipy.sign(a): return array of same shape as a, with -1 where a < 0, 0 where a = 0, and +1 where a > 0
    print sp.sign(a)
    #a.tofile(fid, sep="", format="%s"): write a to specified file (fid), in either binary or ascii format depending on options
    #scipy.fromfile(file=, dtype=float, count=-1, sep=''): read array from specified file (binary or ascii)
    #scipy.unique(a): return sorted unique elements of array a
    print sp.unique(a)
    #scipy.where(condition, x, y): return array with same shape as condition, where values from x are inserted in positions where condition is True, and values from y where condition is False
    print sp.where([[2, 3, 4], [0, 0, 0]], 1, -1)
Ejemplo n.º 25
0
    def test_csv_to_arrays(self):
        (handle, file_name) = tempfile.mkstemp('.csv', 'test_csv_interface_')
        os.close(handle)

        f = open(file_name, "wb")
        f.write('\n'.join(self.dummy_f))
        f.close()

        lon = csvi.csv_to_arrays(file_name, LONGITUDE=float)
        assert lon.keys()[0] == 'LONGITUDE'
        assert len(lon.keys()) == 1
        assert scipy.alltrue(self.LONGITUDE == lon['LONGITUDE'])

        all_conversions = {'LONGITUDE': float, 'LATITUDE': float, 'WALLS': str}

        all = csvi.csv_to_arrays(file_name, **all_conversions)
        assert len(all.keys()) == 3
        assert scipy.alltrue(self.LATITUDE == all['LATITUDE'])
        assert scipy.alltrue(self.LONGITUDE == all['LONGITUDE'])
        assert scipy.alltrue(self.WALLS == all['WALLS'])

        os.remove(file_name)
Ejemplo n.º 26
0
def simplex_array_boundary(s, parity):
    """
    Compute the boundary faces and boundary operator of an
    array of simplices with given simplex parities

    E.g.
    
      For a mesh with two triangles [0,1,2] and [1,3,2], the second
      triangle has opposite parity relative to sorted order.
      
      simplex_array_boundary(array([[0,1,2],[1,2,3]]),array([0,1]))
      
    """
    #TODO handle edge case as special case

    num_simplices = s.shape[0]
    faces_per_simplex = s.shape[1]
    num_faces = num_simplices * faces_per_simplex

    orientations = 1 - 2 * parity

    #faces[:,:-2] are the indices of the faces
    #faces[:,-2]  is the index of the simplex whose boundary produced the face
    #faces[:,-1]  is the orientation of the face in the boundary of the simplex
    faces = empty((num_faces, s.shape[1] + 1), dtype=s.dtype)
    for i in range(faces_per_simplex):
        rows = faces[num_simplices * i:num_simplices * (i + 1)]

        rows[:, :i] = s[:, :i]
        rows[:, i:-2] = s[:, i + 1:]
        rows[:, -2] = arange(num_simplices)
        rows[:, -1] = ((-1)**i) * orientations

    #sort rows
    faces = faces[lexsort(faces[:, :-2].T[::-1])]

    #find unique faces
    face_mask = ~hstack(
        (array([False]), alltrue(faces[1:, :-2] == faces[:-1, :-2], axis=1)))

    unique_faces = faces[face_mask, :-2]

    #compute CSR representation for boundary operator
    csr_indptr = hstack((arange(num_faces)[face_mask], array([num_faces])))
    csr_indices = ascontiguousarray(faces[:, -2])
    csr_data = faces[:, -1].astype('int8')

    shape = (len(unique_faces), num_simplices)
    boundary_operator = csr_matrix((csr_data, csr_indices, csr_indptr), shape)

    return unique_faces, boundary_operator
Ejemplo n.º 27
0
    def getDoppler(self,sensorloc=sp.zeros(3)):
        ncoords = self.Cart_Coords.shape[0]
        ntimes = len(self.Time_Vector)
        if not sp.alltrue(sensorloc == sp.zeros(3)):
            curcoords = self.Cart_Coords -sp.tile(sensorloc[sp.newaxis,:],(ncoords,1))
        else:
            curcoords = self.Cart_Coords
        denom = np.tile(sp.sqrt(sp.sum(curcoords**2,1))[:,sp.newaxis],(1,3))
        unit_coords = curcoords/denom
#        pdb.set_trace()
        Vi = sp.zeros((ncoords,ntimes))
        for itime in range(ntimes):
            Vi[:,itime] = (self.Velocity[:,itime]*unit_coords).sum(1)
        return Vi
Ejemplo n.º 28
0
def simplex_array_boundary(s,parity):
    """
    Compute the boundary faces and boundary operator of an
    array of simplices with given simplex parities

    E.g.
    
      For a mesh with two triangles [0,1,2] and [1,3,2], the second
      triangle has opposite parity relative to sorted order.
      
      simplex_array_boundary(array([[0,1,2],[1,2,3]]),array([0,1]))
      
    """
    #TODO handle edge case as special case
    
    num_simplices     = s.shape[0]
    faces_per_simplex = s.shape[1]
    num_faces         = num_simplices * faces_per_simplex

    orientations = 1 - 2*parity

    #faces[:,:-2] are the indices of the faces
    #faces[:,-2]  is the index of the simplex whose boundary produced the face
    #faces[:,-1]  is the orientation of the face in the boundary of the simplex
    faces = empty((num_faces,s.shape[1]+1),dtype=s.dtype)
    for i in range(faces_per_simplex):
        rows = faces[num_simplices*i:num_simplices*(i+1)]

        rows[:,  : i] = s[:,   :i]
        rows[:,i :-2] = s[:,i+1: ]
        rows[:, -2  ] = arange(num_simplices)
        rows[:, -1  ] = ((-1)**i)*orientations

    #sort rows
    faces = faces[lexsort( faces[:,:-2].T[::-1] )]

    #find unique faces
    face_mask    = -hstack((array([False]),alltrue(faces[1:,:-2] == faces[:-1,:-2],axis=1)))
    unique_faces = faces[face_mask,:-2]

    #compute CSR representation for boundary operator
    csr_indptr  = hstack((arange(num_faces)[face_mask],array([num_faces])))
    csr_indices = ascontiguousarray(faces[:,-2])
    csr_data    = faces[:,-1].astype('int8')
  
    shape = (len(unique_faces),num_simplices)   
    boundary_operator = csr_matrix((csr_data,csr_indices,csr_indptr), shape)

    return unique_faces,boundary_operator
Ejemplo n.º 29
0
def Example2():
    a = sp.array([[-1,2,3],[4,-5,0]])
    print a
    #scipy.any(a): return True if any element of a is True 
    #scipy.all(a): return True if all elements of a are True 
    #scipy.alltrue(a, axis): perform logical_and along given axis of a 
    print sp.any(a),sp.all(a),sp.alltrue(a,axis=0),sp.alltrue(a,axis=1)
    #scipy.append(a, values, axis): append values to a along specified axis 
    print sp.append(a,[[7,8,9],[10,11,12]]),sp.append(a,[[7,8,9],[10,11,12]],axis=0),sp.append(a,[[7,8,9],[10,11,12]],axis=1)
    #scipy.concatenate((a1, a2, ...), axis): concatenate tuple of arrays along specified axis 
    print sp.concatenate((a,[[7,8,9],[10,11,12]])),sp.concatenate((a,[[7,8,9],[10,11,12]]),axis=0),sp.concatenate((a,[[7,8,9],[10,11,12]]),axis=1)
    #scipy.min(a, axis=None), scipy.max(a, axis=None): get min/max values of a along specified axis (global min/max if axis=None) 
    print np.min(a),np.max(a)
    #scipy.argmin(a, axis=None), scipy.argmax(a, axis=None): get indices of min/max of a along specified axis (global min/max if axis=None) 
    print sp.argmin(a),sp.argmin(a,axis=0),sp.argmin(a,axis=1)
    print sp.argmax(a),sp.argmax(a,axis=0),sp.argmax(a,axis=1)
    #scipy.reshape(a, newshape): reshape a to newshape (must conserve total number of elements)
    b=sp.reshape(a,(3,2)) 
    print b
    #scipy.matrix(a): create matrix from 2D array a (matrices implement matrix multiplication rather than element-wise multiplication) 
    m=sp.matrix(a)
    print m
    #scipy.histogram, scipy.histogram2d, scipy.histogramdd: 1-dimensional, 2-dimensional, and d-dimensional histograms, respectively 
    print sp.histogram([1, 2, 1], bins=[0, 1, 2, 3])
    print sp.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
    #scipy.round(a, decimals=0): round elements of matrix a to specified number of decimals 
    c=sp.array([[1.1,2.2,3.3],[4.5,6.6,6.7]])
    print np.round(c,decimals=0)
    #scipy.sign(a): return array of same shape as a, with -1 where a < 0, 0 where a = 0, and +1 where a > 0 
    print sp.sign(a)
    #a.tofile(fid, sep="", format="%s"): write a to specified file (fid), in either binary or ascii format depending on options 
    #scipy.fromfile(file=, dtype=float, count=-1, sep=''): read array from specified file (binary or ascii) 
    #scipy.unique(a): return sorted unique elements of array a 
    print sp.unique(a)
    #scipy.where(condition, x, y): return array with same shape as condition, where values from x are inserted in positions where condition is True, and values from y where condition is False 
    print sp.where([[2,3,4],[0,0,0]],1,-1)
Ejemplo n.º 30
0
    def test_csv_to_arrays(self):
        (handle, file_name) = tempfile.mkstemp('.csv', 'test_csv_interface_')
        os.close(handle)

        f = open(file_name,"wb")
        f.write('\n'.join(self.dummy_f))
        f.close()
        
        lon = csvi.csv_to_arrays(file_name, LONGITUDE=float)
        assert lon.keys()[0] == 'LONGITUDE'
        assert len(lon.keys()) == 1
        assert scipy.alltrue(self.LONGITUDE == lon['LONGITUDE'])

        all_conversions = {'LONGITUDE': float,
                           'LATITUDE': float,
                           'WALLS': str}

        all = csvi.csv_to_arrays(file_name, **all_conversions)
        assert len(all.keys()) == 3
        assert scipy.alltrue(self.LATITUDE == all['LATITUDE'])
        assert scipy.alltrue(self.LONGITUDE == all['LONGITUDE'])
        assert scipy.alltrue(self.WALLS == all['WALLS'])

        os.remove(file_name)
Ejemplo n.º 31
0
    def test_all(self):
        for k,v,s,o,expected in self.cases:
            sc = simplicial_complex((v,s))
            M = whitney_innerproduct(sc,k)
            M = M.todense()
            
            #Permute the matrix to coincide with the ordering of the known matrix
            permute = [sc[k].simplex_to_index[simplex(x)] for x in o]                                           

            M = M[permute,:][:,permute]
            
            assert_almost_equal(M,expected)
            
            #check whether matrix is S.P.D.
            self.assert_(alltrue(isreal(eigvals(M))))
            self.assert_(min(real(eigvals(M))) >= 0)
            assert_almost_equal(M,M.T)
Ejemplo n.º 32
0
    def test_all(self):
        for k, v, s, o, expected in self.cases:
            sc = simplicial_complex((v, s))
            M = whitney_innerproduct(sc, k)
            M = M.todense()

            #Permute the matrix to coincide with the ordering of the known matrix
            permute = [sc[k].simplex_to_index[simplex(x)] for x in o]

            M = M[permute, :][:, permute]

            assert_almost_equal(M, expected)

            #check whether matrix is S.P.D.
            self.assert_(alltrue(isreal(eigvals(M))))
            self.assert_(min(real(eigvals(M))) >= 0)
            assert_almost_equal(M, M.T)
Ejemplo n.º 33
0
    def test_anisotropy_couplings(self):
        sigma = common.constants.pauli_matrices
        sys = SpinMatrix(N=2, spin_number=[2, 2], sparse_type=sp.sparse.csr_matrix)
        sys.add_kron_term([0, 1], [sigma[2], sigma[3]]) # \krons(sigma_y, sigma_z)

        print('Anistropy coupling: ')
        print(sys.matrix.todense())
        print()

        self.assertEqual(
            sp.alltrue(
                sys.matrix ==
                sp.array([[ 0.+0.j,  0.+0.j,  0.-1.j,  0.+0.j],
                          [ 0.+0.j,  0.+0.j,  0.+0.j,  0.+1.j],
                          [ 0.+1.j,  0.+0.j,  0.+0.j,  0.+0.j],
                          [ 0.+0.j,  0.-1.j,  0.+0.j,  0.+0.j]])
            ),
            True
        )
Ejemplo n.º 34
0
    def deactivate_test_profile(self):
        """Not an actual test, this is for profiling."""

        nf = 32
        nra = 32
        ndec = 32
        DM = DataMaker(nscans=10,
                       nt_scan=200,
                       nf=nf,
                       nra=nra,
                       ndec=ndec,
                       scan_size=5.0,
                       map_size=7.0,
                       add_noise=False,
                       add_ground=False)
        map = DM.get_map()
        time_stream, ra, dec, az, el, time, mask_inds = DM.get_all_trimmed()
        P = dirty_map.Pointing(("ra", "dec"), (ra, dec), map, 'linear')
        Noise = dirty_map.Noise(time_stream, time)
        thermal_noise_levels = sp.zeros((nf)) + 0.04  # Kelvin**2
        Noise.add_thermal(thermal_noise_levels)
        Noise.add_mask(mask_inds)
        self.assertTrue(sp.alltrue(Noise.diagonal[mask_inds] > 10))
        Noise.deweight_time_mean()
        Noise.deweight_time_slope()
        Noise.add_correlated_over_f(0.01, -1.2, 0.1)
        start = time_module.clock()
        Noise.finalize()
        stop = time_module.clock()
        print "Finalizing noise took %5.2f seconds." % (stop - start)
        # Do the profiling.
        map_noise_inv = sp.zeros((nf, nra, ndec, nf, nra, ndec), dtype=float)
        print "Frequency ind:",
        start = time_module.clock()
        for ii in xrange(1):
            print ii,
            for jj in xrange(nra):
                P.noise_to_map_domain(Noise, ii, jj,
                                      map_noise_inv[ii, jj, :, :, :, :])
        stop = time_module.clock()
        print
        print "Constructing map noise took %5.2f seconds." % (stop - start)
Ejemplo n.º 35
0
    def test_stitches(self) :
        self.data_blocks[0].calc_freq()
        min = self.data_blocks[0].freq[-1]
        # Add a random factor to this one and make sure it gets devided out.
        self.data_blocks[0].data[:,:,:,:] = self.data_blocks[0].freq*1.05
        self.data_blocks[1].calc_freq()
        max = self.data_blocks[1].freq[0]
        self.data_blocks[1].data[:,:,:,:] = self.data_blocks[1].freq
        
        NewData = swc.stitch(self.data_blocks)
        tol = abs(NewData.field['CDELT1']/2)
        data_col = NewData.data[2,2,0,:]
        self.assertTrue(abs(data_col[-1] - min) < tol)
        self.assertTrue(abs(data_col[0] - max) < tol)
        self.assertTrue(sp.allclose(sp.sort(-data_col), -data_col))

        # Make sure the stitched frequencies line up frequency axis.
        NewData.calc_freq()
        freq = NewData.freq
        self.assertTrue(sp.alltrue(abs(data_col - freq) < tol))
Ejemplo n.º 36
0
    def test_stitches(self):
        self.data_blocks[0].calc_freq()
        min = self.data_blocks[0].freq[-1]
        # Add a random factor to this one and make sure it gets devided out.
        self.data_blocks[0].data[:, :, :, :] = self.data_blocks[0].freq * 1.05
        self.data_blocks[1].calc_freq()
        max = self.data_blocks[1].freq[0]
        self.data_blocks[1].data[:, :, :, :] = self.data_blocks[1].freq

        NewData = swc.stitch(self.data_blocks)
        tol = abs(NewData.field['CDELT1'] / 2)
        data_col = NewData.data[2, 2, 0, :]
        self.assertTrue(abs(data_col[-1] - min) < tol)
        self.assertTrue(abs(data_col[0] - max) < tol)
        self.assertTrue(sp.allclose(sp.sort(-data_col), -data_col))

        # Make sure the stitched frequencies line up frequency axis.
        NewData.calc_freq()
        freq = NewData.freq
        self.assertTrue(sp.alltrue(abs(data_col - freq) < tol))
Ejemplo n.º 37
0
    def getDoppler(self,sensorloc=sp.zeros(3)):
        """
        This will return the line of sight velocity.
        Inputs
            sensorloc - The location of the sensor in local Cartisian coordinates.
        Outputs
            Vi - A numpy array Nlocation by Ntimes in m/s of the line of sight velocities.
        """
        ncoords = self.Cart_Coords.shape[0]
        ntimes = len(self.Time_Vector)
        if not sp.alltrue(sensorloc == sp.zeros(3)):
            curcoords = self.Cart_Coords -sp.tile(sensorloc[sp.newaxis,:],(ncoords,1))
        else:
            curcoords = self.Cart_Coords
        denom = np.tile(sp.sqrt(sp.sum(curcoords**2,1))[:,sp.newaxis],(1,3))
        unit_coords = curcoords/denom

        Vi = sp.zeros((ncoords,ntimes))
        for itime in range(ntimes):
            Vi[:,itime] = (self.Velocity[:,itime]*unit_coords).sum(1)
        return Vi
Ejemplo n.º 38
0
    def test_heisenberg_couplings_spin_number_list(self):
        sigma = common.constants.pauli_matrices
        sys = SpinMatrix(N=2, spin_number=[2, 2], sparse_type=sp.sparse.coo_matrix)
        sys.add_kron_term([0, 1], [sigma[1], sigma[1]]) # \krons(sigma_x, sigma_x)
        sys.add_kron_term([0, 1], [sigma[2], sigma[2]]) # \krons(sigma_y, sigma_y)
        sys.add_kron_term([0, 1], [sigma[3], sigma[3]]) # \krons(sigma_z, sigma_z)

        print('Heisenberg coupling: ')
        print(sys.matrix.todense())
        print()

        self.assertEqual(
            sp.alltrue(
                sys.matrix ==
                sp.array([[ 1.+0.j,  0.+0.j,  0.+0.j,  0.+0.j],
                          [ 0.+0.j, -1.+0.j,  2.+0.j,  0.+0.j],
                          [ 0.+0.j,  2.+0.j, -1.+0.j,  0.+0.j],
                          [ 0.+0.j,  0.+0.j,  0.+0.j,  1.+0.j]])
            ),
            True
        )
Ejemplo n.º 39
0
    def getDoppler(self, sensorloc=sp.zeros(3)):
        """ 
        This will return the line of sight velocity.
        Inputs
            sensorloc - The location of the sensor in local Cartisian coordinates.
        Outputs
            Vi - A numpy array Nlocation by Ntimes in m/s of the line of sight velocities.
        """
        ncoords = self.Cart_Coords.shape[0]
        ntimes = len(self.Time_Vector)
        if not sp.alltrue(sensorloc == sp.zeros(3)):
            curcoords = self.Cart_Coords - sp.tile(sensorloc[sp.newaxis, :],
                                                   (ncoords, 1))
        else:
            curcoords = self.Cart_Coords
        denom = np.tile(
            sp.sqrt(sp.sum(curcoords**2, 1))[:, sp.newaxis], (1, 3))
        unit_coords = curcoords / denom

        Vi = sp.zeros((ncoords, ntimes))
        for itime in range(ntimes):
            Vi[:, itime] = (self.Velocity[:, itime] * unit_coords).sum(1)
        return Vi
Ejemplo n.º 40
0
 def deactivate_test_profile(self):
     """Not an actual test, this is for profiling."""
     
     nf = 32
     nra = 32
     ndec = 32
     DM = DataMaker(nscans=10, nt_scan=200, nf=nf, nra=nra,
              ndec=ndec, scan_size=5.0, map_size=7.0,
              add_noise=False, add_ground=False)
     map = DM.get_map()
     time_stream, ra, dec, az, el, time, mask_inds = DM.get_all_trimmed()
     P = dirty_map.Pointing(("ra", "dec"), (ra, dec), map, 'linear')
     Noise = dirty_map.Noise(time_stream, time)
     thermal_noise_levels = sp.zeros((nf)) + 0.04  # Kelvin**2
     Noise.add_thermal(thermal_noise_levels)
     Noise.add_mask(mask_inds)
     self.assertTrue(sp.alltrue(Noise.diagonal[mask_inds] > 10))
     Noise.deweight_time_mean()
     Noise.deweight_time_slope()
     Noise.add_correlated_over_f(0.01, -1.2, 0.1)
     start = time_module.clock()
     Noise.finalize()
     stop = time_module.clock()
     print "Finalizing noise took %5.2f seconds." % (stop - start)
     # Do the profiling.
     map_noise_inv = sp.zeros((nf, nra, ndec, nf, nra, ndec),
                              dtype=float)
     print "Frequency ind:",
     start = time_module.clock()
     for ii in xrange(1):
         print ii,
         for jj in xrange(nra):
             P.noise_to_map_domain(Noise, ii, jj, 
                                   map_noise_inv[ii,jj,:,:,:,:])
     stop = time_module.clock()
     print
     print "Constructing map noise took %5.2f seconds." % (stop - start)
Ejemplo n.º 41
0
    def validate_vulnerability_set(self):
        """The vulnerability set must provide curves for all sites in this
        object. A Vulnerability_Function needs to be defined to match each
        attributes['STRUCTURE_CLASSIFICATION'] identifier.

        Raises a RuntimeError if it cannot find a match.
        """
        if self.vulnerability_set is None:
            raise RuntimeError('Vulnerability Set must not be None')

        # Function IDs for the vulnerability set
        curves_defined = self.vulnerability_set.vulnerability_functions.keys()

        # Sites STRUCTURE_CLASSIFICATIONs
        structure_classifications = self.attributes['STRUCTURE_CLASSIFICATION']
        structure_classifications = unique(structure_classifications)

        # Are there any unique structure classifications that are not in the
        # curves defined?
        in_curves_defined = in1d(structure_classifications, curves_defined)
        if not alltrue(in_curves_defined):
            msg = 'The following structures do not have a vulnerability curve: '
            msg += '%s' % structure_classifications[where(in_curves_defined == False)]
            raise RuntimeError(msg)
Ejemplo n.º 42
0
    def circle(self):
        self.BlocksToWrite = copy.deepcopy(self.Blocks)
        self.Writer = fitsGBT.Writer(self.BlocksToWrite, 0)
        self.Writer.write('temp.fits')
        self.newReader = fitsGBT.Reader('temp.fits', 0)
        self.newBlocks = self.newReader.read()

        self.assertEqual(len(self.Blocks), len(self.newBlocks))
        for ii in range(len(self.newBlocks)):
            OldDB = self.Blocks[ii]
            NewDB = self.newBlocks[ii]
            for jj in range(4):
                self.assertEqual(OldDB.dims[ii], NewDB.dims[ii])
            self.assertTrue(ma.allclose(OldDB.data, NewDB.data))
            for field, axis in fitsGBT.fields_and_axes.iteritems():
                self.assertEqual(axis, OldDB.field_axes[field])
                self.assertEqual(axis, NewDB.field_axes[field])
            for field in [
                    'SCAN', 'OBJECT', 'TIMESTAMP', 'OBSERVER', 'CRPIX1',
                    'CDELT1'
            ]:
                self.assertEqual(OldDB.field[field], NewDB.field[field])
            for field in ['CRVAL1', 'BANDWID', 'RESTFREQ', 'DURATION']:
                self.assertAlmostEqual(OldDB.field[field], NewDB.field[field])
            for field in [
                    'LST', 'ELEVATIO', 'AZIMUTH', 'RA', 'DEC', 'OBSFREQ',
                    'CRVAL2', 'CRVAL3', 'EXPOSURE'
            ]:
                self.assertTrue(
                    sp.allclose(OldDB.field[field], NewDB.field[field]))
            for field in ['DATE-OBS']:
                self.assertTrue(
                    sp.alltrue(sp.equal(OldDB.field[field],
                                        NewDB.field[field])))
            for field in ['CRVAL4', 'CAL']:
                self.assertTrue(all(OldDB.field[field] == NewDB.field[field]))
Ejemplo n.º 43
0
def make_masked_time_stream(Blocks,
                            ntime=None,
                            window=None,
                            return_means=False,
                            subtract_slope=False):
    """Converts Data Blocks into a single uniformly sampled time stream.
    
    Also produces the mask giving whether elements are valid entries or came
    from a zero pad.  This produes the required inputs for calculating a
    windowed power spectrum.

    Parameters
    ----------
    Blocks : tuple of DataBlock objects.
    ntime : int
        Total number of time bins in output arrays.  If shorter than required
        extra data is truncated.  If longer, extra data is masked.  Default is
        to use exactly the number that fits all the data.  Set to a negitive
        factor to zero pad to a power of 2 and by at least at least the factor.
    window : string or tuple
        Type of window to apply to each DataBlock.  Valid options are the valid
        arguments to scipy.signal.get_window().  By default, don't window.
    return_means : bool
        Whether to return an array of the channed means.
    subtract_slope : bool
        Whether to subtract a linear function of time from each channel.

    Returns
    -------
    time_stream : array
        All the data in `Blocks` but concatenated along the time axis and
        padded with zeros such that the time axis is uniformly sampled and
        uninterupted.
    mask : array same shape as `time_stream`
        1.0 if data in the correspoonding `time_stream` element is filled 
        and 0 if the data was missing.  This is like a window where 
        time_stream = mask*real_data.
    dt : float
        The time step of the returned time stream.
    means : array (optional)
        The mean from each channel.
    """

    # Shape of all axes except the time axis.
    back_shape = Blocks[0].dims[1:]
    # Get the time sample spacing.
    Blocks[0].calc_time()
    dt = abs(sp.mean(sp.diff(Blocks[0].time)))
    # Find the beginning and the end of the time axis by looping through
    # blocks.
    # Also get the time axis and the mask
    # for calculating basis polynomials.
    unmask = sp.zeros((0, ) + back_shape, dtype=bool)
    time = sp.zeros((0, ), dtype=float)
    start_ind = []
    min_time = float('inf')
    max_time = 0.0
    #mean_time = 0.0
    #n_data_times = 0
    for Data in Blocks:
        Data.calc_time()
        start_ind.append(len(time))
        time = sp.concatenate((time, Data.time))
        this_unmask = sp.logical_not(ma.getmaskarray(Data.data))
        unmask = sp.concatenate((unmask, this_unmask), 0)
        # Often the start or the end of a scan is completly masked.  Make sure
        # we don't start till the first unmasked time and end at the last
        # unmasked time.
        time_unmask = sp.alltrue(ma.getmaskarray(Data.data), -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        if sp.alltrue(time_unmask):
            continue
        time_unmask = sp.logical_not(time_unmask)
        min_time = min(min_time, min(Data.time[time_unmask]))
        max_time = max(min_time, max(Data.time[time_unmask]))
        #mean_time += sp.sum(Data.time[time_unmask])
        #n_data_times += len(Data.time[time_unmask])
        # Ensure that the time sampling is uniform.
        if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1) and
                sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt, rtol=0.001)):
            msg = ("Time sampling not uniformly spaced or Data Blocks don't "
                   "agree on sampling.")
            raise ce.DataError(msg)
        # Ensure the shapes are right.
        if Data.dims[1:] != back_shape:
            msg = ("All data blocks must have the same shape except the time "
                   "axis.")
            raise ce.DataError(msg)
    # Now calculate basis polynomials for the mean mode and the slope mode.
    polys = misc.ortho_poly(time[:, None, None, None], 2, unmask, 0)
    #mean_time /= n_data_times
    #if n_data_times == 0:
    #    n_data_times = 1
    # Very important to subtract the mean out of the signal, otherwise the
    # window coupling to the mean (0) mode will dominate everything. Can also
    # optionally take out a slope.
    # Old algorithm.
    #total_sum = 0.0
    #total_counts = 0
    #total_slope = 0.0
    #time_norm = 0.0
    #for Data in Blocks:
    #    total_sum += sp.sum(Data.data.filled(0), 0)
    #    total_counts += ma.count(Data.data, 0)
    #    total_slope += sp.sum(Data.data.filled(0)
    #                          * (Data.time[:,None,None,None] - mean_time), 0)
    #    time_norm += sp.sum(sp.logical_not(ma.getmaskarray(Data.data))
    #                        * (Data.time[:,None,None,None] - mean_time)**2, 0)
    #total_counts[total_counts == 0] = 1
    #time_norm[time_norm == 0.0] = 1
    #total_mean = total_sum / total_counts
    #total_slope /= time_norm
    # New algorithm.
    mean_amp = 0
    slope_amp = 0
    for ii, Data in enumerate(Blocks):
        si = start_ind[ii]
        this_nt = Data.dims[0]
        data = Data.data.filled(0)
        mean_amp += sp.sum(
            data * unmask[si:si + this_nt, ...] *
            polys[0, si:si + this_nt, ...], 0)
        slope_amp += sp.sum(
            data * unmask[si:si + this_nt, ...] *
            polys[1, si:si + this_nt, ...], 0)
    polys[0, ...] *= mean_amp
    polys[1, ...] *= slope_amp
    # Calculate the time axis.
    if min_time > max_time:
        min_time = 0
        max_time = 6 * dt
    if not ntime:
        ntime = (max_time - min_time) // dt + 1
    elif ntime < 0:
        # 0 pad by a factor of at least -ntime, but at most 10% more than this.
        time_min = -ntime * (max_time - min_time) / dt
        n_block = 1
        while n_block < time_min / 20.0:
            n_block *= 2
        ntime = (time_min // n_block + 1) * n_block

    time = sp.arange(ntime) * dt + min_time
    # Allowcate memory for the outputs.
    time_stream = sp.zeros((ntime, ) + back_shape, dtype=float)
    mask = sp.zeros((ntime, ) + back_shape, dtype=sp.float32)
    # Loop over all times and fill in the arrays.
    for ii, Data in enumerate(Blocks):
        this_nt = Data.dims[0]
        si = start_ind[ii]
        # Subtract the mean calculated above.
        this_data = Data.data.copy()
        this_data -= polys[0, si:si + this_nt, ...]
        # If desired, subtract of the linear function of time.
        if subtract_slope:
            #this_data -= (total_slope
            #              * (Data.time[:,None,None,None] - mean_time))
            this_data -= polys[1, si:si + this_nt, ...]
        # Find the first and last unmasked times.
        time_unmask = sp.alltrue(ma.getmaskarray(this_data), -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        if sp.alltrue(time_unmask):
            continue
        time_unmask = sp.logical_not(time_unmask)
        unmasked_ind, = sp.where(time_unmask)
        first_ind = min(unmasked_ind)
        last_ind = max(unmasked_ind)
        # Ensure that the time sampling is uniform.
        if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1) and
                sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt, rtol=0.001)):
            msg = ("Time sampling not uniformly spaced or Data Blocks don't "
                   "agree on sampling.")
            raise ce.DataError(msg)
        # Ensure the shapes are right.
        if Data.dims[1:] != back_shape:
            msg = ("All data blocks must have the same shape except the time "
                   "axis.")
        # Apply an offset to the time in case the start of the Data Block
        # doesn't line up with the time array perfectly.
        offset = (time[sp.argmin(abs(time - Data.time[first_ind]))] -
                  Data.time[first_ind])
        # Generate window function.
        if window:
            window_function = sig.get_window(window, last_ind - first_ind + 1)
        for ii in range(first_ind, last_ind + 1):
            ind = sp.argmin(abs(time - (Data.time[ii] + offset)))
            if abs(time[ind] - (Data.time[ii])) < 0.5 * dt:
                if sp.any(mask[ind, ...]):
                    msg = "Overlapping times in Data Blocks."
                    raise ce.DataError(msg)
                if window:
                    window_value = window_function[ii - first_ind]
                else:
                    window_value = 1.0
                time_stream[ind, ...] = (window_value *
                                         this_data[ii, ...].filled(0.0))
                mask[ind, ...] = window_value * sp.logical_not(
                    ma.getmaskarray(this_data)[ii, ...])
    if return_means:
        return time_stream, mask, dt, polys[0, 0, ...]
    else:
        return time_stream, mask, dt
Ejemplo n.º 44
0
 def execute(self, nprocesses=1):
     """Worker funciton."""
     params = self.params
     # Make parent directory and write parameter file.
     kiyopy.utils.mkparents(params['output_root'])
     parse_ini.write_params(params,
                            params['output_root'] + 'params.ini',
                            prefix=prefix)
     save_noise_diag = params['save_noise_diag']
     in_root = params['input_root']
     all_out_fname_list = []
     all_in_fname_list = []
     # Figure out what the band names are.
     bands = params['bands']
     if not bands:
         map_files = glob.glob(in_root + 'dirty_map_' + pol_str + "_*.npy")
         bands = []
         root_len = len(in_root + 'dirty_map_')
         for file_name in map_files:
             bands.append(file_name[root_len:-4])
     # Loop over files to process.
     for pol_str in params['polarizations']:
         for band in bands:
             if band == -1:
                 band_str = ''
             else:
                 band_str = "_" + repr(band)
             dmap_fname = (in_root + 'dirty_map_' + pol_str + band_str +
                           '.npy')
             all_in_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(dmap_fname))
             # Load the dirty map and the noise matrix.
             dirty_map = algebra.load(dmap_fname)
             dirty_map = algebra.make_vect(dirty_map)
             if dirty_map.axes != ('freq', 'ra', 'dec'):
                 msg = ("Expeced dirty map to have axes ('freq',"
                        "'ra', 'dec'), but it has axes: " +
                        str(dirty_map.axes))
                 raise ce.DataError(msg)
             shape = dirty_map.shape
             # Initialize the clean map.
             clean_map = algebra.info_array(sp.zeros(dirty_map.shape))
             clean_map.info = dict(dirty_map.info)
             clean_map = algebra.make_vect(clean_map)
             # If needed, initialize a map for the noise diagonal.
             if save_noise_diag:
                 noise_diag = algebra.zeros_like(clean_map)
             if params["from_eig"]:
                 # Solving from eigen decomposition of the noise instead of
                 # the noise itself.
                 # Load in the decomposition.
                 evects_fname = (in_root + 'noise_evects_' + pol_str +
                                 +band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using eigenvectors: " + evects_fname
                 evects = algebra.open_memmap(evects_fname, 'r')
                 evects = algebra.make_mat(evects)
                 evals_inv_fname = (in_root + 'noise_evalsinv_' + pol_str +
                                    "_" + repr(band) + '.npy')
                 evals_inv = algebra.load(evals_inv_fname)
                 evals_inv = algebra.make_mat(evals_inv)
                 # Solve for the map.
                 if params["save_noise_diag"]:
                     clean_map, noise_diag = solve_from_eig(
                         evals_inv, evects, dirty_map, True, self.feedback)
                 else:
                     clean_map = solve_from_eig(evals_inv, evects,
                                                dirty_map, False,
                                                self.feedback)
                 # Delete the eigen vectors to recover memory.
                 del evects
             else:
                 # Solving from the noise.
                 noise_fname = (in_root + 'noise_inv_' + pol_str +
                                band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using noise inverse: " + noise_fname
                 all_in_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_fname))
                 noise_inv = algebra.open_memmap(noise_fname, 'r')
                 noise_inv = algebra.make_mat(noise_inv)
                 # Two cases for the noise.  If its the same shape as the map
                 # then the noise is diagonal.  Otherwise, it should be
                 # block diagonal in frequency.
                 if noise_inv.ndim == 3:
                     if noise_inv.axes != ('freq', 'ra', 'dec'):
                         msg = ("Expeced noise matrix to have axes "
                                "('freq', 'ra', 'dec'), but it has: " +
                                str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Noise inverse can fit in memory, so copy it.
                     noise_inv_memory = sp.array(noise_inv, copy=True)
                     # Find the non-singular (covered) pixels.
                     max_information = noise_inv_memory.max()
                     good_data = noise_inv_memory < 1.0e-10 * max_information
                     # Make the clean map.
                     clean_map[good_data] = (dirty_map[good_data] /
                                             noise_inv_memory[good_data])
                     if save_noise_diag:
                         noise_diag[good_data] = \
                                 1/noise_inv_memory[good_data]
                 elif noise_inv.ndim == 5:
                     if noise_inv.axes != ('freq', 'ra', 'dec', 'ra',
                                           'dec'):
                         msg = ("Expeced noise matrix to have axes "
                                "('freq', 'ra', 'dec', 'ra', 'dec'), "
                                "but it has: " + str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Arrange the dirty map as a vector.
                     dirty_map_vect = sp.array(dirty_map)  # A view.
                     dirty_map_vect.shape = (shape[0], shape[1] * shape[2])
                     frequencies = dirty_map.get_axis('freq') / 1.0e6
                     # Allowcate memory only once.
                     noise_inv_freq = sp.empty(
                         (shape[1], shape[2], shape[1], shape[2]),
                         dtype=float)
                     if self.feedback > 1:
                         print "Inverting noise matrix."
                     # Block diagonal in frequency so loop over frequencies.
                     for ii in xrange(dirty_map.shape[0]):
                         if self.feedback > 1:
                             print "Frequency: ", "%5.1f" % (
                                 frequencies[ii]),
                         if self.feedback > 2:
                             print ", start mmap read:",
                             sys.stdout.flush()
                         noise_inv_freq[...] = noise_inv[ii, ...]
                         if self.feedback > 2:
                             print "done, start eig:",
                             sys.stdout.flush()
                         noise_inv_freq.shape = (shape[1] * shape[2],
                                                 shape[1] * shape[2])
                         # Solve the map making equation by diagonalization.
                         noise_inv_diag, Rot = sp.linalg.eigh(
                             noise_inv_freq, overwrite_a=True)
                         if self.feedback > 2:
                             print "done",
                         map_rotated = sp.dot(Rot.T, dirty_map_vect[ii])
                         # Zero out infinite noise modes.
                         bad_modes = (noise_inv_diag <
                                      1.0e-5 * noise_inv_diag.max())
                         if self.feedback > 1:
                             print ", discarded: ",
                             print "%4.1f" % (100.0 * sp.sum(bad_modes) /
                                              bad_modes.size),
                             print "% of modes",
                         if self.feedback > 2:
                             print ", start rotations:",
                             sys.stdout.flush()
                         map_rotated[bad_modes] = 0.
                         noise_inv_diag[bad_modes] = 1.0
                         # Solve for the clean map and rotate back.
                         map_rotated /= noise_inv_diag
                         map = sp.dot(Rot, map_rotated)
                         if self.feedback > 2:
                             print "done",
                             sys.stdout.flush()
                         # Fill the clean array.
                         map.shape = (shape[1], shape[2])
                         clean_map[ii, ...] = map
                         if save_noise_diag:
                             # Using C = R Lambda R^T
                             # where Lambda = diag(1/noise_inv_diag).
                             temp_noise_diag = 1 / noise_inv_diag
                             temp_noise_diag[bad_modes] = 0
                             # Multiply R by the diagonal eigenvalue matrix.
                             # Broadcasting does equivalent of mult by diag
                             # matrix.
                             temp_mat = Rot * temp_noise_diag
                             # Multiply by R^T, but only calculate the
                             # diagonal elements.
                             for jj in range(shape[1] * shape[2]):
                                 temp_noise_diag[jj] = sp.dot(
                                     temp_mat[jj, :], Rot[jj, :])
                             temp_noise_diag.shape = (shape[1], shape[2])
                             noise_diag[ii, ...] = temp_noise_diag
                         # Return workspace memory to origional shape.
                         noise_inv_freq.shape = (shape[1], shape[2],
                                                 shape[1], shape[2])
                         if self.feedback > 1:
                             print ""
                             sys.stdout.flush()
                 elif noise_inv.ndim == 6:
                     if save_noise_diag:
                         # OLD WAY.
                         #clean_map, noise_diag, chol = solve(noise_inv,
                         #        dirty_map, True, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_diag, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   True, feedback=self.feedback)
                     else:
                         # OLD WAY.
                         #clean_map, chol = solve(noise_inv, dirty_map,
                         #            False, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   False, feedback=self.feedback)
                     if params['save_cholesky']:
                         chol_fname = (params['output_root'] + 'chol_' +
                                       pol_str + band_str + '.npy')
                         sp.save(chol_fname, chol)
                     if params['save_noise_inv_diag']:
                         noise_inv_diag_fname = (params['output_root'] +
                                                 'noise_inv_diag_' +
                                                 pol_str + band_str +
                                                 '.npy')
                         algebra.save(noise_inv_diag_fname, noise_inv_diag)
                     # Delete the cholesky to recover memory.
                     del chol
                 else:
                     raise ce.DataError("Noise matrix has bad shape.")
                 # In all cases delete the noise object to recover memeory.
                 del noise_inv
             # Write the clean map to file.
             out_fname = (params['output_root'] + 'clean_map_' + pol_str +
                          band_str + '.npy')
             if self.feedback > 1:
                 print "Writing clean map to: " + out_fname
             algebra.save(out_fname, clean_map)
             all_out_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(out_fname))
             if save_noise_diag:
                 noise_diag_fname = (params['output_root'] + 'noise_diag_' +
                                     pol_str + band_str + '.npy')
                 algebra.save(noise_diag_fname, noise_diag)
                 all_out_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_diag_fname))
             # Check the clean map for faileur.
             if not sp.alltrue(sp.isfinite(clean_map)):
                 n_bad = sp.sum(sp.logical_not(sp.isfinite(clean_map)))
                 msg = ("Non finite entries found in clean map. Solve"
                        " failed. %d out of %d entries bad" %
                        (n_bad, clean_map.size))
                 raise RuntimeError(msg)
Ejemplo n.º 45
0
def measure_noise_parameters(Blocks,
                             parameters,
                             split_scans=False,
                             plots=False):
    """Given a set of data blocks, measure noise parameters.

    Measurement done for all polarizations but only the first cal state.
    """

    # Initialize the output.
    out_parameters = {}
    if set(parameters) == {"channel_var"}:
        # Calculate the full correlated power spectrum.
        power_mat, window_function, dt, channel_means = npow.full_power_diag(
            Blocks,
            window="hanning",
            deconvolve=False,
            n_time=-1.05,
            normalize=False,
            split_scans=split_scans,
            subtract_slope=True)
        # This shouldn't be nessisary, since I've tried to keep things finite in
        # the above function.  However, leave it in for now just in case.
        if not sp.alltrue(sp.isfinite(power_mat)):
            msg = ("Non finite power spectrum calculated.  Offending data in "
                   "file starting with scan %d." % (Blocks[0].field['SCAN']))
            raise ce.DataError(msg)
        # Get frequency axis and do unit conversions.
        n_time = power_mat.shape[0]
        n_chan = power_mat.shape[-1]
        frequency = npow.ps_freq_axis(dt, n_time)
        power_mat = npow.prune_power(power_mat, 0)
        power_mat = npow.make_power_physical_units(power_mat, dt)
        # Discard the mean mode.
        frequency = frequency[1:]
        power_mat = power_mat[1:, ...]
        n_f = len(frequency)
        # Loop over polarizations.
        cal_ind = 0
        n_pols = power_mat.shape[1]
        for ii in range(n_pols):
            this_pol_power = power_mat[:, ii, cal_ind, :]
            this_pol_window = window_function[:, ii, cal_ind, :]
            this_pol = Blocks[0].field['CRVAL4'][ii]
            this_pol_parameters = {}
            # If we are plotting, activate the subplot for this polarization and
            # this band.
            if plots:
                h = plt.gcf()
                # The last subplot should be hidden in the figure object.
                current_subplot = h.current_subplot
                current_subplot = current_subplot[:2] + (current_subplot[2] +
                                                         1, )
                h.current_subplot = current_subplot
            # Now figure out what we want to measure and measure it.
            if "channel_var" in parameters:
                power_diag = this_pol_power.view()
                window_function_diag = this_pol_window.view()
                # Integral of the power spectrum from -BW to BW.
                channel_var = sp.mean(power_diag, 0) / dt
                # Normalize for the window.
                norms = sp.mean(window_function_diag, 0).real
                bad_inds = norms < 10. / n_time
                norms[bad_inds] = 1
                channel_var /= norms
                # If a channel is completly masked Deweight it by giving a high
                # variance
                channel_var[bad_inds] = T_infinity**2
                this_pol_parameters["channel_var"] = channel_var
            out_parameters[this_pol] = this_pol_parameters
        return out_parameters
    else:
        # Calculate the full correlated power spectrum.
        power_mat, window_function, dt, channel_means = npow.full_power_mat(
            Blocks,
            window="hanning",
            deconvolve=False,
            n_time=-1.05,
            normalize=False,
            split_scans=split_scans,
            subtract_slope=True)
        # This shouldn't be nessisary, since I've tried to keep things finite in
        # the above function.  However, leave it in for now just in case.
        if not sp.alltrue(sp.isfinite(power_mat)):
            msg = ("Non finite power spectrum calculated.  Offending data in "
                   "file starting with scan %d." % (Blocks[0].field['SCAN']))
            raise ce.DataError(msg)
        # Get frequency axis and do unit conversions.
        n_time = power_mat.shape[0]
        n_chan = power_mat.shape[-1]
        frequency = npow.ps_freq_axis(dt, n_time)
        power_mat = npow.prune_power(power_mat, 0)
        power_mat = npow.make_power_physical_units(power_mat, dt)
        # Discard the mean mode.
        frequency = frequency[1:]
        power_mat = power_mat[1:, ...]
        n_f = len(frequency)
        # Loop over polarizations.
        cal_ind = 0
        n_pols = power_mat.shape[1]
        for ii in range(n_pols):
            this_pol_power = power_mat[:, ii, cal_ind, :, :]
            this_pol_window = window_function[:, ii, cal_ind, :, :]
            this_pol = Blocks[0].field['CRVAL4'][ii]
            this_pol_parameters = {}
            # If we are plotting, activate the subplot for this polarization and
            # this band.
            if plots:
                h = plt.gcf()
                # The last subplot should be hidden in the figure object.
                current_subplot = h.current_subplot
                current_subplot = current_subplot[:2] + (current_subplot[2] +
                                                         1, )
                h.current_subplot = current_subplot
            # Now figure out what we want to measure and measure it.
            if "channel_var" in parameters:
                power_diag = this_pol_power.view()
                power_diag.shape = (n_f, n_chan**2)
                power_diag = power_diag[:, ::n_chan + 1].real
                window_function_diag = this_pol_window.view()
                window_function_diag.shape = (n_time, n_chan**2)
                window_function_diag = window_function_diag[:, ::n_chan + 1]
                # Integral of the power spectrum from -BW to BW.
                channel_var = sp.mean(power_diag, 0) / dt
                # Normalize for the window.
                norms = sp.mean(window_function_diag, 0).real
                bad_inds = norms < 10. / n_time
                norms[bad_inds] = 1
                channel_var /= norms
                # If a channel is completly masked Deweight it by giving a high
                # variance
                channel_var[bad_inds] = T_infinity**2
                this_pol_parameters["channel_var"] = channel_var
            for noise_model in parameters:
                if noise_model[:18] == "freq_modes_over_f_":
                    n_modes = int(noise_model[18:])
                    this_pol_parameters[noise_model] = \
                            get_freq_modes_over_f(this_pol_power, this_pol_window,
                                                  frequency, n_modes, plots=plots)
            out_parameters[this_pol] = this_pol_parameters
        return out_parameters
Ejemplo n.º 46
0
def exportPointset(thepointset, infodict, separator='   ', linesep='\n',
                   precision=12, suppress_small=0, varvaldir='col',
                   ext='', append=False):

    assert varvaldir in ['col', 'row'], \
           "invalid variable value write direction"
    # in order to avoid import cycles, cannot explicitly check that
    # thepointset is of type Pointset, because Points.py imports this file
    # (utils.py), so check an attribute instead.
    try:
        thepointset.coordnames
    except AttributeError:
        raise TypeError, "Must pass Pointset to this function: use arrayToPointset first!"
    infodict_usedkeys = []
    for key, info in infodict.iteritems():
        if isinstance(info, str):
            infodict_usedkeys += [info]
        elif info == []:
            infodict[key] = copy.copy(thepointset.coordnames)
            infodict_usedkeys.extend(thepointset.coordnames)
        else:
            infodict_usedkeys += list(info)
    allnames = copy.copy(thepointset.coordnames)
    if thepointset._parameterized:
        allnames.append(thepointset.indepvarname)
    remlist = remain(infodict_usedkeys, allnames+range(len(allnames)))
    if remlist != []:
        print "Coords not found in pointset:", remlist
        raise ValueError, \
              "invalid keys in infodict - some not present in thepointset"
    assert isinstance(ext, str), "'ext' extension argument must be a string"
    if ext != '':
        if ext[0] != '.':
            ext = '.'+ext
    if append:
        assert varvaldir == 'col', ("append mode not supported for row"
                                     "format of data ordering")
        modestr = 'a'
    else:
        modestr = 'w'
    totlen = len(thepointset)
    if totlen == 0:
        raise ValueError, ("Pointset is empty")
    for fname, tup in infodict.iteritems():
        try:
            f = open(fname+ext, modestr)
        except IOError:
            print "There was a problem opening file "+fname+ext
            raise
        try:
            if isinstance(tup, str):
                try:
                    varray = thepointset[tup]
                except TypeError:
                    raise ValueError, "Invalid specification of coordinates"
            elif isinstance(tup, int):
                try:
                    varray = thepointset[:,tup].toarray()
                except TypeError:
                    raise ValueError, "Invalid specification of coordinates"
            elif type(tup) in [list, tuple]:
                if alltrue([type(ti)==str for ti in tup]):
                    thetup=list(tup)
                    if thepointset.indepvarname in tup:
                        tix = thetup.index(thepointset.indepvarname)
                        thetup.remove(thepointset.indepvarname)
                    try:
                        vlist = thepointset[thetup].toarray().tolist()
                    except TypeError:
                        raise ValueError, "Invalid specification of coordinates"
                    if len(thetup)==1:
                        vlist = [vlist]
                    if thepointset.indepvarname in tup:
                        vlist.insert(tix, thepointset.indepvararray.tolist())
                    varray = array(vlist)
                elif alltrue([type(ti)==int for ti in tup]):
                    try:
                        varray = thepointset[:,tup].toarray()
                    except TypeError:
                        raise ValueError, "Invalid specification of coordinates"
                else:
                    raise ValueError, "Invalid specification of coordinates"
            else:
                f.close()
                raise TypeError, \
                   "infodict values must be singletons or tuples/lists of strings or integers"
        except IOError:
            f.close()
            print "Problem writing to file"+fname+ext
            raise
        except KeyError:
            f.close()
            raise KeyError, ("Keys in infodict not found in pointset")
        if varvaldir == 'row':
            write_array(f, varray, separator, linesep,
                        precision, suppress_small, keep_open=0)
        else:
            write_array(f, transpose(varray), separator, linesep,
                        precision, suppress_small, keep_open=0)
Ejemplo n.º 47
0
def get_freq_modes_over_f(power_mat,
                          window_function,
                          frequency,
                          n_modes,
                          plots=False):
    """Fines the most correlated frequency modes and fits thier noise."""

    n_f = len(frequency)
    d_f = sp.mean(sp.diff(frequency))
    dt = 1. / 2. / frequency[-1]
    n_chan = power_mat.shape[-1]
    n_time = window_function.shape[0]
    # The threshold for assuming there isn't enough data to measure anything.
    no_data_thres = 10. / n_time
    # Initialize the dictionary that will hold all the parameters.
    output_params = {}
    # First take the low frequency part of the spetrum matrix and average over
    # enough bins to get a well conditioned matrix.
    low_f_mat = sp.mean(power_mat[:4 * n_chan, :, :].real, 0)
    # Factor the matrix to get the most correlated modes.
    e, v = linalg.eigh(low_f_mat)
    # Make sure they are sorted.
    if not sp.alltrue(sp.diff(e) >= 0):
        raise RuntimeError("Eigenvalues not sorted")
    # Power matrix striped of the biggest modes.
    reduced_power = sp.copy(power_mat)
    mode_list = []
    # Solve for the spectra of these modes.
    for ii in range(n_modes):
        this_mode_params = {}
        # Get power spectrum and window function for this mode.
        mode = v[:, -1 - ii]
        mode_power = sp.sum(mode * power_mat.real, -1)
        mode_power = sp.sum(mode * mode_power, -1)
        mode_window = sp.sum(mode[:, None]**2 * window_function, 1)
        mode_window = sp.sum(mode_window * mode[None, :]**2, 1)
        # Protect against no data.
        if sp.mean(mode_window).real < no_data_thres:
            this_mode_params['amplitude'] = 0.
            this_mode_params['index'] = 0.
            this_mode_params['f_0'] = 1.
            this_mode_params['thermal'] = T_infinity**2 * dt
        else:
            # Fit the spectrum.
            p = fit_overf_const(mode_power, mode_window, frequency)
            # Put all the parameters we measured into the output.
            this_mode_params['amplitude'] = p[0]
            this_mode_params['index'] = p[1]
            this_mode_params['f_0'] = p[2]
            this_mode_params['thermal'] = p[3]
        this_mode_params['mode'] = mode
        output_params['over_f_mode_' + str(ii)] = this_mode_params
        # Remove the mode from the power matrix.
        tmp_amp = sp.sum(reduced_power * mode, -1)
        tmp_amp2 = sp.sum(reduced_power * mode[:, None], -2)
        tmp_amp3 = sp.sum(tmp_amp2 * mode, -1)
        reduced_power -= tmp_amp[:, :, None] * mode
        reduced_power -= tmp_amp2[:, None, :] * mode[:, None]
        reduced_power += tmp_amp3[:, None, None] * mode[:, None] * mode
        mode_list.append(mode)
    # Initialize the compensation matrix, that will be used to restore thermal
    # noise that gets subtracted out.  See Jan 29, Feb 17th, 2012 of Kiyo's
    # notes.
    compensation = sp.eye(n_chan, dtype=float)
    for mode1 in mode_list:
        compensation.flat[::n_chan + 1] -= 2 * mode1**2
        for mode2 in mode_list:
            mode_prod = mode1 * mode2
            compensation += mode_prod[:, None] * mode_prod[None, :]
    # Now that we've striped the noisiest modes, measure the auto power
    # spectrum, averaged over channels.
    auto_spec_mean = reduced_power.view()
    auto_spec_mean.shape = (n_f, n_chan**2)
    auto_spec_mean = auto_spec_mean[:, ::n_chan + 1].real
    auto_spec_mean = sp.mean(auto_spec_mean, -1)
    diag_window = window_function.view()
    diag_window.shape = (n_time, n_chan**2)
    diag_window = diag_window[:, ::n_chan + 1]
    auto_spec_window = sp.mean(diag_window, -1)
    if sp.mean(auto_spec_window).real < no_data_thres:
        auto_cross_over = 0.
        auto_index = 0.
        auto_thermal = 0
    else:
        auto_spec_params = fit_overf_const(auto_spec_mean, auto_spec_window,
                                           frequency)
        auto_thermal = auto_spec_params[3]
        if (auto_spec_params[0] <= 0 or auto_spec_params[3] <= 0
                or auto_spec_params[1] > -0.599):
            auto_cross_over = 0.
            auto_index = 0.
        else:
            auto_index = auto_spec_params[1]
            auto_cross_over = auto_spec_params[2] * (
                auto_spec_params[0] / auto_spec_params[3])**(-1. / auto_index)
            #if auto_cross_over < d_f:
            #    auto_index = 0.
            #    auto_cross_over = 0.
    # Plot the mean auto spectrum if desired.
    if plots:
        h = plt.gcf()
        a = h.add_subplot(*h.current_subplot)
        norm = sp.mean(auto_spec_window).real
        auto_plot = auto_spec_mean / norm
        plotable = auto_plot > 0
        lines = a.loglog(frequency[plotable], auto_plot[plotable])
        c = lines[-1].get_color()
        # And plot the fit in a light color.
        if auto_cross_over > d_f / 4.:
            spec = npow.overf_power_spectrum(auto_thermal, auto_index,
                                             auto_cross_over, dt, n_time)
        else:
            spec = sp.zeros(n_time, dtype=float)
        spec += auto_thermal
        spec[0] = 0
        spec = npow.convolve_power(spec, auto_spec_window)
        spec = npow.prune_power(spec)
        spec = spec[1:].real
        if norm > no_data_thres:
            spec /= norm
        plotable = spec > 0
        a.loglog(frequency[plotable],
                 spec[plotable],
                 c=c,
                 alpha=0.4,
                 linestyle=':')
    output_params['all_channel_index'] = auto_index
    output_params['all_channel_corner_f'] = auto_cross_over
    # Finally measure the thermal part of the noise in each channel.
    cross_over_ind = sp.digitize([auto_cross_over * 4], frequency)[0]
    cross_over_ind = max(cross_over_ind, n_f // 2)
    cross_over_ind = min(cross_over_ind, int(9. * n_f / 10.))
    thermal = reduced_power[cross_over_ind:, :, :].real
    n_high_f = thermal.shape[0]
    thermal.shape = (n_high_f, n_chan**2)
    thermal = sp.mean(thermal[:, ::n_chan + 1], 0)
    thermal_norms = sp.mean(diag_window, 0).real
    bad_inds = thermal_norms < no_data_thres
    thermal_norms[bad_inds] = 1.
    # Compensate for power lost in mode subtraction.
    compensation[:, bad_inds] = 0
    compensation[bad_inds, :] = 0
    for ii in xrange(n_chan):
        if bad_inds[ii]:
            compensation[ii, ii] = 1.
    thermal = linalg.solve(compensation, thermal)
    # Normalize
    thermal /= thermal_norms
    thermal[bad_inds] = T_infinity**2 * dt
    # Occationally the compensation fails horribly on a few channels.
    # When this happens, zero out the offending indices.
    thermal[thermal < 0] = 0
    output_params['thermal'] = thermal
    # Now that we know what thermal is, we can subtract it out of the modes we
    # already measured.
    for ii in range(n_modes):
        mode_params = output_params['over_f_mode_' + str(ii)]
        thermal_contribution = sp.sum(mode_params['mode']**2 * thermal)
        # Subtract a maximum of 90% of the white noise to keep things positive
        # definate.
        new_white = max(mode_params['thermal'] - thermal_contribution,
                        0.1 * mode_params['thermal'])
        if mode_params['thermal'] < 0.5 * T_infinity**2 * dt:
            mode_params['thermal'] = new_white
    return output_params
Ejemplo n.º 48
0
def makeGenInfoEntry(generator, allgen_names, swmap_list=[]):
    """Create an entry for the genInfo attribute of a Model."""

    assert isinstance(allgen_names, list), \
                             "'allgen_names' argument must be a list"
    special_reasons = ['time'] + generator.variables.keys()
    assert generator.name not in special_reasons + ['terminate'], \
         "Cannot use variable names or internal names 'time' and 'terminate' as generator names"
    try:
        allEndReasonNames = map(lambda (n,o):n,
                                generator.eventstruct.getTermEvents()) \
                            + special_reasons
    except AttributeError:
        # no events associated with the generator
        allEndReasonNames = special_reasons
    assert generator.name in allgen_names, (
        'Generator`s name not in list of all '
        'available Generator names!')
    assert alltrue([name not in allEndReasonNames for name in allgen_names]), \
           'Generator names overlapped with event or variable names'
    alltarg_names = allgen_names + ['terminate']
    # if no event map function specified, assume the identity fn
    seenReasons = []
    swmap_pairs = []
    if swmap_list != []:
        for mapentry in swmap_list:
            # check the entries of swmap_list and turn into a
            # (reason, infopair) pair, adding a default event map function
            # to some entries
            reason = mapentry[0]
            mapping_info = mapentry[1]
            if len(mapentry) > 2:
                raise ValueError(
                    "mapping entry must be (reason, infopair) tuple")
            if isinstance(mapping_info, tuple):
                genTargetName = mapping_info[0]
                numargs = len(mapping_info)
            elif isinstance(mapping_info, str):
                genTargetName = mapentry[1]
                numargs = 1
            else:
                raise TypeError("Invalid event mapping entry")
            if numargs == 2:
                epmap = mapping_info[1]
                assert isinstance(epmap,
                                  EvMapping), "Must supply EvMapping class"
                swmap_pairs.append((reason, mapping_info))
            elif numargs == 1:
                # use default identity mapping fn for event
                # and make this entry into a three-tuple
                swmap_pairs.append((reason, (genTargetName, EvMapping())))
            else:
                raise ValueError("Expected 2 or 3 arguments to Generator "
                                 "switch map entry")
            assert reason not in seenReasons, ('reason cannot appear more than'
                                               ' once in map domain')
            seenReasons.append(reason)
            assert reason in allEndReasonNames, ("name '" + reason +
                                                 "' in map "
                                                 "domain is missing")
            assert genTargetName in alltarg_names, ("name '" + genTargetName +
                                                    "' in "
                                                    "map range is missing")
    else:
        # There had better be only a single Generator in allgen_names,
        # otherwise we need a map
        assert len(allgen_names) == 1, (
            "There must be an event mapping "
            "specified when there is more than one Generator in the Model")
    unseen_sr = remain(allEndReasonNames, seenReasons)
    if unseen_sr != []:
        # then there are 'end reasons' that do not have switch rules,
        # so give them defaults (terminate)
        for r in unseen_sr:
            swmap_pairs.append((r, ('terminate', EvMapping())))
    if len(swmap_pairs) != len(allEndReasonNames):
        info(dict(swmap_pairs))
        print "(%i in total),versus:" % len(swmap_pairs)
        print allEndReasonNames, "(%i in total)" % len(allEndReasonNames)
        raise ValueError('Incorrect number of map pairs given in argument')
    return {generator.name: (generator, dict(swmap_pairs))}
Ejemplo n.º 49
0
 def execute(self, nprocesses=1) :
     """Worker funciton."""
     params = self.params
     # Make parent directory and write parameter file.
     kiyopy.utils.mkparents(params['output_root'])
     parse_ini.write_params(params, params['output_root'] + 'params.ini',
                            prefix=prefix)
     save_noise_diag = params['save_noise_diag']
     in_root = params['input_root']
     all_out_fname_list = []
     all_in_fname_list = []
     # Figure out what the band names are.
     bands = params['bands']
     if not bands:
         map_files = glob.glob(in_root + 'dirty_map_' + pol_str + "_*.npy")
         bands = []
         root_len = len(in_root + 'dirty_map_')
         for file_name in map_files:
             bands.append(file_name[root_len:-4])
     # Loop over files to process.
     for pol_str in params['polarizations']:
         for band in bands:
             if band == -1:
                 band_str = ''
             else:
                 band_str =  "_" + repr(band)
             dmap_fname = (in_root + 'dirty_map_' + pol_str + 
                           band_str + '.npy')
             all_in_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(dmap_fname))
             # Load the dirty map and the noise matrix.
             dirty_map = algebra.load(dmap_fname)
             dirty_map = algebra.make_vect(dirty_map)
             if dirty_map.axes != ('freq', 'ra', 'dec') :
                 msg = ("Expeced dirty map to have axes ('freq',"
                        "'ra', 'dec'), but it has axes: "
                        + str(dirty_map.axes))
                 raise ce.DataError(msg)
             shape = dirty_map.shape
             # Initialize the clean map.
             clean_map = algebra.info_array(sp.zeros(dirty_map.shape))
             clean_map.info = dict(dirty_map.info)
             clean_map = algebra.make_vect(clean_map)
             # If needed, initialize a map for the noise diagonal.
             if save_noise_diag :
                 noise_diag = algebra.zeros_like(clean_map)
             if params["from_eig"]:
                 # Solving from eigen decomposition of the noise instead of
                 # the noise itself.
                 # Load in the decomposition.
                 evects_fname = (in_root + 'noise_evects_' + pol_str +
                                 + band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using eigenvectors: " + evects_fname
                 evects = algebra.open_memmap(evects_fname, 'r')
                 evects = algebra.make_mat(evects)
                 evals_inv_fname = (in_root + 'noise_evalsinv_' + pol_str
                                    + "_" + repr(band) + '.npy')
                 evals_inv = algebra.load(evals_inv_fname)
                 evals_inv = algebra.make_mat(evals_inv)
                 # Solve for the map.
                 if params["save_noise_diag"]:
                     clean_map, noise_diag = solve_from_eig(evals_inv,
                                 evects, dirty_map, True, self.feedback)
                 else:
                     clean_map = solve_from_eig(evals_inv,
                                 evects, dirty_map, False, self.feedback)
                 # Delete the eigen vectors to recover memory.
                 del evects
             else:
                 # Solving from the noise.
                 noise_fname = (in_root + 'noise_inv_' + pol_str +
                                band_str + '.npy')
                 if self.feedback > 1:
                     print "Using dirty map: " + dmap_fname
                     print "Using noise inverse: " + noise_fname
                 all_in_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_fname))
                 noise_inv = algebra.open_memmap(noise_fname, 'r')
                 noise_inv = algebra.make_mat(noise_inv)
                 # Two cases for the noise.  If its the same shape as the map
                 # then the noise is diagonal.  Otherwise, it should be
                 # block diagonal in frequency.
                 if noise_inv.ndim == 3 :
                     if noise_inv.axes != ('freq', 'ra', 'dec') :
                         msg = ("Expeced noise matrix to have axes "
                                 "('freq', 'ra', 'dec'), but it has: "
                                 + str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Noise inverse can fit in memory, so copy it.
                     noise_inv_memory = sp.array(noise_inv, copy=True)
                     # Find the non-singular (covered) pixels.
                     max_information = noise_inv_memory.max()
                     good_data = noise_inv_memory < 1.0e-10*max_information
                     # Make the clean map.
                     clean_map[good_data] = (dirty_map[good_data] 
                                             / noise_inv_memory[good_data])
                     if save_noise_diag :
                         noise_diag[good_data] = \
                                 1/noise_inv_memory[good_data]
                 elif noise_inv.ndim == 5 :
                     if noise_inv.axes != ('freq', 'ra', 'dec', 'ra',
                                           'dec'):
                         msg = ("Expeced noise matrix to have axes "
                                "('freq', 'ra', 'dec', 'ra', 'dec'), "
                                "but it has: " + str(noise_inv.axes))
                         raise ce.DataError(msg)
                     # Arrange the dirty map as a vector.
                     dirty_map_vect = sp.array(dirty_map) # A view.
                     dirty_map_vect.shape = (shape[0], shape[1]*shape[2])
                     frequencies = dirty_map.get_axis('freq')/1.0e6
                     # Allowcate memory only once.
                     noise_inv_freq = sp.empty((shape[1], shape[2], 
                                     shape[1], shape[2]), dtype=float)
                     if self.feedback > 1 :
                         print "Inverting noise matrix."
                     # Block diagonal in frequency so loop over frequencies.
                     for ii in xrange(dirty_map.shape[0]) :
                         if self.feedback > 1:
                             print "Frequency: ", "%5.1f"%(frequencies[ii]),
                         if self.feedback > 2:
                             print ", start mmap read:",
                             sys.stdout.flush()
                         noise_inv_freq[...] = noise_inv[ii, ...]
                         if self.feedback > 2:
                             print "done, start eig:",
                             sys.stdout.flush()
                         noise_inv_freq.shape = (shape[1]*shape[2],
                                                 shape[1]*shape[2])
                         # Solve the map making equation by diagonalization.
                         noise_inv_diag, Rot = sp.linalg.eigh(
                             noise_inv_freq, overwrite_a=True)
                         if self.feedback > 2:
                             print "done",
                         map_rotated = sp.dot(Rot.T, dirty_map_vect[ii])
                         # Zero out infinite noise modes.
                         bad_modes = (noise_inv_diag
                                      < 1.0e-5 * noise_inv_diag.max())
                         if self.feedback > 1:
                             print ", discarded: ",
                             print "%4.1f" % (100.0 * sp.sum(bad_modes) 
                                              / bad_modes.size),
                             print "% of modes",
                         if self.feedback > 2:
                             print ", start rotations:",
                             sys.stdout.flush()
                         map_rotated[bad_modes] = 0.
                         noise_inv_diag[bad_modes] = 1.0
                         # Solve for the clean map and rotate back.
                         map_rotated /= noise_inv_diag
                         map = sp.dot(Rot, map_rotated)
                         if self.feedback > 2:
                             print "done",
                             sys.stdout.flush()
                         # Fill the clean array.
                         map.shape = (shape[1], shape[2])
                         clean_map[ii, ...] = map
                         if save_noise_diag :
                             # Using C = R Lambda R^T 
                             # where Lambda = diag(1/noise_inv_diag).
                             temp_noise_diag = 1/noise_inv_diag
                             temp_noise_diag[bad_modes] = 0
                             # Multiply R by the diagonal eigenvalue matrix.
                             # Broadcasting does equivalent of mult by diag
                             # matrix.
                             temp_mat = Rot*temp_noise_diag
                             # Multiply by R^T, but only calculate the
                             # diagonal elements.
                             for jj in range(shape[1]*shape[2]) :
                                 temp_noise_diag[jj] = sp.dot(
                                     temp_mat[jj,:], Rot[jj,:])
                             temp_noise_diag.shape = (shape[1], shape[2])
                             noise_diag[ii, ...] = temp_noise_diag
                         # Return workspace memory to origional shape.
                         noise_inv_freq.shape = (shape[1], shape[2],
                                                 shape[1], shape[2])
                         if self.feedback > 1:
                             print ""
                             sys.stdout.flush()
                 elif noise_inv.ndim == 6 :
                     if save_noise_diag:
                         # OLD WAY.
                         #clean_map, noise_diag, chol = solve(noise_inv,
                         #        dirty_map, True, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_diag, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   True, feedback=self.feedback)
                     else:
                         # OLD WAY.
                         #clean_map, chol = solve(noise_inv, dirty_map, 
                         #            False, feedback=self.feedback)
                         # NEW WAY.
                         clean_map, noise_inv_diag, chol = \
                                   solve(noise_fname, noise_inv, dirty_map,
                                   False, feedback=self.feedback)
                     if params['save_cholesky']:
                         chol_fname = (params['output_root'] + 'chol_'
                                     + pol_str + band_str + '.npy')
                         sp.save(chol_fname, chol)
                     if params['save_noise_inv_diag']:
                         noise_inv_diag_fname = (params['output_root'] +
                                    'noise_inv_diag_' + pol_str + band_str 
                                    + '.npy')
                         algebra.save(noise_inv_diag_fname, noise_inv_diag)
                     # Delete the cholesky to recover memory.
                     del chol
                 else :
                     raise ce.DataError("Noise matrix has bad shape.")
                 # In all cases delete the noise object to recover memeory.
                 del noise_inv
             # Write the clean map to file.
             out_fname = (params['output_root'] + 'clean_map_'
                          + pol_str + band_str + '.npy')
             if self.feedback > 1:
                 print "Writing clean map to: " + out_fname
             algebra.save(out_fname, clean_map)
             all_out_fname_list.append(
                 kiyopy.utils.abbreviate_file_path(out_fname))
             if save_noise_diag :
                 noise_diag_fname = (params['output_root'] + 'noise_diag_'
                                     + pol_str + band_str + '.npy')
                 algebra.save(noise_diag_fname, noise_diag)
                 all_out_fname_list.append(
                     kiyopy.utils.abbreviate_file_path(noise_diag_fname))
             # Check the clean map for faileur.
             if not sp.alltrue(sp.isfinite(clean_map)):
                 n_bad = sp.sum(sp.logical_not(sp.isfinite(clean_map)))
                 msg = ("Non finite entries found in clean map. Solve"
                        " failed. %d out of %d entries bad" 
                        % (n_bad, clean_map.size)) 
                 raise RuntimeError(msg)
Ejemplo n.º 50
0
def exportPointset(thepointset,
                   infodict,
                   separator='   ',
                   linesep='\n',
                   precision=12,
                   suppress_small=0,
                   varvaldir='col',
                   ext='',
                   append=False):

    assert varvaldir in ['col', 'row'], \
           "invalid variable value write direction"
    # in order to avoid import cycles, cannot explicitly check that
    # thepointset is of type Pointset, because Points.py imports this file
    # (utils.py), so check an attribute instead.
    try:
        thepointset.coordnames
    except AttributeError:
        raise TypeError, "Must pass Pointset to this function: use arrayToPointset first!"
    infodict_usedkeys = []
    for key, info in infodict.iteritems():
        if isinstance(info, str):
            infodict_usedkeys += [info]
        elif info == []:
            infodict[key] = copy.copy(thepointset.coordnames)
            infodict_usedkeys.extend(thepointset.coordnames)
        else:
            infodict_usedkeys += list(info)
    allnames = copy.copy(thepointset.coordnames)
    if thepointset._parameterized:
        allnames.append(thepointset.indepvarname)
    remlist = remain(infodict_usedkeys, allnames + range(len(allnames)))
    if remlist != []:
        print "Coords not found in pointset:", remlist
        raise ValueError, \
              "invalid keys in infodict - some not present in thepointset"
    assert isinstance(ext, str), "'ext' extension argument must be a string"
    if ext != '':
        if ext[0] != '.':
            ext = '.' + ext
    if append:
        assert varvaldir == 'col', ("append mode not supported for row"
                                    "format of data ordering")
        modestr = 'a'
    else:
        modestr = 'w'
    totlen = len(thepointset)
    if totlen == 0:
        raise ValueError, ("Pointset is empty")
    for fname, tup in infodict.iteritems():
        try:
            f = open(fname + ext, modestr)
        except IOError:
            print "There was a problem opening file " + fname + ext
            raise
        try:
            if isinstance(tup, str):
                try:
                    varray = thepointset[tup]
                except TypeError:
                    raise ValueError, "Invalid specification of coordinates"
            elif isinstance(tup, int):
                try:
                    varray = thepointset[:, tup].toarray()
                except TypeError:
                    raise ValueError, "Invalid specification of coordinates"
            elif type(tup) in [list, tuple]:
                if alltrue([type(ti) == str for ti in tup]):
                    thetup = list(tup)
                    if thepointset.indepvarname in tup:
                        tix = thetup.index(thepointset.indepvarname)
                        thetup.remove(thepointset.indepvarname)
                    try:
                        vlist = thepointset[thetup].toarray().tolist()
                    except TypeError:
                        raise ValueError, "Invalid specification of coordinates"
                    if len(thetup) == 1:
                        vlist = [vlist]
                    if thepointset.indepvarname in tup:
                        vlist.insert(tix, thepointset.indepvararray.tolist())
                    varray = array(vlist)
                elif alltrue([type(ti) == int for ti in tup]):
                    try:
                        varray = thepointset[:, tup].toarray()
                    except TypeError:
                        raise ValueError, "Invalid specification of coordinates"
                else:
                    raise ValueError, "Invalid specification of coordinates"
            else:
                f.close()
                raise TypeError, \
                   "infodict values must be singletons or tuples/lists of strings or integers"
        except IOError:
            f.close()
            print "Problem writing to file" + fname + ext
            raise
        except KeyError:
            f.close()
            raise KeyError, ("Keys in infodict not found in pointset")
        if varvaldir == 'row':
            write_array(f,
                        varray,
                        separator,
                        linesep,
                        precision,
                        suppress_small,
                        keep_open=0)
        else:
            write_array(f,
                        transpose(varray),
                        separator,
                        linesep,
                        precision,
                        suppress_small,
                        keep_open=0)
Ejemplo n.º 51
0
def makeGenInfoEntry(generator, allgen_names, swmap_list=[]):
    """Create an entry for the genInfo attribute of a Model."""

    assert isinstance(allgen_names, list), "'allgen_names' argument must be a list"
    special_reasons = ["time"] + generator.variables.keys()
    assert generator.name not in special_reasons + [
        "terminate"
    ], "Cannot use variable names or internal names 'time' and 'terminate' as generator names"
    try:
        allEndReasonNames = map(lambda (n, o): n, generator.eventstruct.getTermEvents()) + special_reasons
    except AttributeError:
        # no events associated with the generator
        allEndReasonNames = special_reasons
    assert generator.name in allgen_names, "Generator`s name not in list of all " "available Generator names!"
    assert alltrue(
        [name not in allEndReasonNames for name in allgen_names]
    ), "Generator names overlapped with event or variable names"
    alltarg_names = allgen_names + ["terminate"]
    # if no event map function specified, assume the identity fn
    seenReasons = []
    swmap_pairs = []
    if swmap_list != []:
        for mapentry in swmap_list:
            # check the entries of swmap_list and turn into a
            # (reason, infopair) pair, adding a default event map function
            # to some entries
            reason = mapentry[0]
            mapping_info = mapentry[1]
            if len(mapentry) > 2:
                raise ValueError("mapping entry must be (reason, infopair) tuple")
            if isinstance(mapping_info, tuple):
                genTargetName = mapping_info[0]
                numargs = len(mapping_info)
            elif isinstance(mapping_info, str):
                genTargetName = mapentry[1]
                numargs = 1
            else:
                raise TypeError("Invalid event mapping entry")
            if numargs == 2:
                epmap = mapping_info[1]
                assert isinstance(epmap, EvMapping), "Must supply EvMapping class"
                swmap_pairs.append((reason, mapping_info))
            elif numargs == 1:
                # use default identity mapping fn for event
                # and make this entry into a three-tuple
                swmap_pairs.append((reason, (genTargetName, EvMapping())))
            else:
                raise ValueError("Expected 2 or 3 arguments to Generator " "switch map entry")
            assert reason not in seenReasons, "reason cannot appear more than" " once in map domain"
            seenReasons.append(reason)
            assert reason in allEndReasonNames, "name '" + reason + "' in map " "domain is missing"
            assert genTargetName in alltarg_names, "name '" + genTargetName + "' in " "map range is missing"
    else:
        # There had better be only a single Generator in allgen_names,
        # otherwise we need a map
        assert len(allgen_names) == 1, (
            "There must be an event mapping " "specified when there is more than one Generator in the Model"
        )
    unseen_sr = remain(allEndReasonNames, seenReasons)
    if unseen_sr != []:
        # then there are 'end reasons' that do not have switch rules,
        # so give them defaults (terminate)
        for r in unseen_sr:
            swmap_pairs.append((r, ("terminate", EvMapping())))
    if len(swmap_pairs) != len(allEndReasonNames):
        info(dict(swmap_pairs))
        print "(%i in total),versus:" % len(swmap_pairs)
        print allEndReasonNames, "(%i in total)" % len(allEndReasonNames)
        raise ValueError("Incorrect number of map pairs given in argument")
    return {generator.name: (generator, dict(swmap_pairs))}
Ejemplo n.º 52
0
def measure_noise_parameters(Blocks, parameters, split_scans=False, plots=False):
    """Given a set of data blocks, measure noise parameters.

    Measurement done for all polarizations but only the first cal state.
    """

    # Initialize the output.
    out_parameters = {}
    if set(parameters) == {"channel_var"}:
        # Calculate the full correlated power spectrum.
        power_mat, window_function, dt, channel_means = npow.full_power_diag(
            Blocks,
            window="hanning",
            deconvolve=False,
            n_time=-1.05,
            normalize=False,
            split_scans=split_scans,
            subtract_slope=True,
        )
        # This shouldn't be nessisary, since I've tried to keep things finite in
        # the above function.  However, leave it in for now just in case.
        if not sp.alltrue(sp.isfinite(power_mat)):
            msg = "Non finite power spectrum calculated.  Offending data in " "file starting with scan %d." % (
                Blocks[0].field["SCAN"]
            )
            raise ce.DataError(msg)
        # Get frequency axis and do unit conversions.
        n_time = power_mat.shape[0]
        n_chan = power_mat.shape[-1]
        frequency = npow.ps_freq_axis(dt, n_time)
        power_mat = npow.prune_power(power_mat, 0)
        power_mat = npow.make_power_physical_units(power_mat, dt)
        # Discard the mean mode.
        frequency = frequency[1:]
        power_mat = power_mat[1:, ...]
        n_f = len(frequency)
        # Loop over polarizations.
        cal_ind = 0
        n_pols = power_mat.shape[1]
        for ii in range(n_pols):
            this_pol_power = power_mat[:, ii, cal_ind, :]
            this_pol_window = window_function[:, ii, cal_ind, :]
            this_pol = Blocks[0].field["CRVAL4"][ii]
            this_pol_parameters = {}
            # If we are plotting, activate the subplot for this polarization and
            # this band.
            if plots:
                h = plt.gcf()
                # The last subplot should be hidden in the figure object.
                current_subplot = h.current_subplot
                current_subplot = current_subplot[:2] + (current_subplot[2] + 1,)
                h.current_subplot = current_subplot
            # Now figure out what we want to measure and measure it.
            if "channel_var" in parameters:
                power_diag = this_pol_power.view()
                window_function_diag = this_pol_window.view()
                # Integral of the power spectrum from -BW to BW.
                channel_var = sp.mean(power_diag, 0) / dt
                # Normalize for the window.
                norms = sp.mean(window_function_diag, 0).real
                bad_inds = norms < 10.0 / n_time
                norms[bad_inds] = 1
                channel_var /= norms
                # If a channel is completly masked Deweight it by giving a high
                # variance
                channel_var[bad_inds] = T_infinity ** 2
                this_pol_parameters["channel_var"] = channel_var
            out_parameters[this_pol] = this_pol_parameters
        return out_parameters
    else:
        # Calculate the full correlated power spectrum.
        power_mat, window_function, dt, channel_means = npow.full_power_mat(
            Blocks,
            window="hanning",
            deconvolve=False,
            n_time=-1.05,
            normalize=False,
            split_scans=split_scans,
            subtract_slope=True,
        )
        # This shouldn't be nessisary, since I've tried to keep things finite in
        # the above function.  However, leave it in for now just in case.
        if not sp.alltrue(sp.isfinite(power_mat)):
            msg = "Non finite power spectrum calculated.  Offending data in " "file starting with scan %d." % (
                Blocks[0].field["SCAN"]
            )
            raise ce.DataError(msg)
        # Get frequency axis and do unit conversions.
        n_time = power_mat.shape[0]
        n_chan = power_mat.shape[-1]
        frequency = npow.ps_freq_axis(dt, n_time)
        power_mat = npow.prune_power(power_mat, 0)
        power_mat = npow.make_power_physical_units(power_mat, dt)
        # Discard the mean mode.
        frequency = frequency[1:]
        power_mat = power_mat[1:, ...]
        n_f = len(frequency)
        # Loop over polarizations.
        cal_ind = 0
        n_pols = power_mat.shape[1]
        for ii in range(n_pols):
            this_pol_power = power_mat[:, ii, cal_ind, :, :]
            this_pol_window = window_function[:, ii, cal_ind, :, :]
            this_pol = Blocks[0].field["CRVAL4"][ii]
            this_pol_parameters = {}
            # If we are plotting, activate the subplot for this polarization and
            # this band.
            if plots:
                h = plt.gcf()
                # The last subplot should be hidden in the figure object.
                current_subplot = h.current_subplot
                current_subplot = current_subplot[:2] + (current_subplot[2] + 1,)
                h.current_subplot = current_subplot
            # Now figure out what we want to measure and measure it.
            if "channel_var" in parameters:
                power_diag = this_pol_power.view()
                power_diag.shape = (n_f, n_chan ** 2)
                power_diag = power_diag[:, :: n_chan + 1].real
                window_function_diag = this_pol_window.view()
                window_function_diag.shape = (n_time, n_chan ** 2)
                window_function_diag = window_function_diag[:, :: n_chan + 1]
                # Integral of the power spectrum from -BW to BW.
                channel_var = sp.mean(power_diag, 0) / dt
                # Normalize for the window.
                norms = sp.mean(window_function_diag, 0).real
                bad_inds = norms < 10.0 / n_time
                norms[bad_inds] = 1
                channel_var /= norms
                # If a channel is completly masked Deweight it by giving a high
                # variance
                channel_var[bad_inds] = T_infinity ** 2
                this_pol_parameters["channel_var"] = channel_var
            for noise_model in parameters:
                if noise_model[:18] == "freq_modes_over_f_":
                    n_modes = int(noise_model[18:])
                    this_pol_parameters[noise_model] = get_freq_modes_over_f(
                        this_pol_power, this_pol_window, frequency, n_modes, plots=plots
                    )
            out_parameters[this_pol] = this_pol_parameters
        return out_parameters
Ejemplo n.º 53
0
def get_freq_modes_over_f(power_mat, window_function, frequency, n_modes, plots=False):
    """Fines the most correlated frequency modes and fits thier noise."""

    n_f = len(frequency)
    d_f = sp.mean(sp.diff(frequency))
    dt = 1.0 / 2.0 / frequency[-1]
    n_chan = power_mat.shape[-1]
    n_time = window_function.shape[0]
    # The threshold for assuming there isn't enough data to measure anything.
    no_data_thres = 10.0 / n_time
    # Initialize the dictionary that will hold all the parameters.
    output_params = {}
    # First take the low frequency part of the spetrum matrix and average over
    # enough bins to get a well conditioned matrix.
    low_f_mat = sp.mean(power_mat[: 4 * n_chan, :, :].real, 0)
    # Factor the matrix to get the most correlated modes.
    e, v = linalg.eigh(low_f_mat)
    # Make sure they are sorted.
    if not sp.alltrue(sp.diff(e) >= 0):
        raise RuntimeError("Eigenvalues not sorted")
    # Power matrix striped of the biggest modes.
    reduced_power = sp.copy(power_mat)
    mode_list = []
    # Solve for the spectra of these modes.
    for ii in range(n_modes):
        this_mode_params = {}
        # Get power spectrum and window function for this mode.
        mode = v[:, -1 - ii]
        mode_power = sp.sum(mode * power_mat.real, -1)
        mode_power = sp.sum(mode * mode_power, -1)
        mode_window = sp.sum(mode[:, None] ** 2 * window_function, 1)
        mode_window = sp.sum(mode_window * mode[None, :] ** 2, 1)
        # Protect against no data.
        if sp.mean(mode_window).real < no_data_thres:
            this_mode_params["amplitude"] = 0.0
            this_mode_params["index"] = 0.0
            this_mode_params["f_0"] = 1.0
            this_mode_params["thermal"] = T_infinity ** 2 * dt
        else:
            # Fit the spectrum.
            p = fit_overf_const(mode_power, mode_window, frequency)
            # Put all the parameters we measured into the output.
            this_mode_params["amplitude"] = p[0]
            this_mode_params["index"] = p[1]
            this_mode_params["f_0"] = p[2]
            this_mode_params["thermal"] = p[3]
        this_mode_params["mode"] = mode
        output_params["over_f_mode_" + str(ii)] = this_mode_params
        # Remove the mode from the power matrix.
        tmp_amp = sp.sum(reduced_power * mode, -1)
        tmp_amp2 = sp.sum(reduced_power * mode[:, None], -2)
        tmp_amp3 = sp.sum(tmp_amp2 * mode, -1)
        reduced_power -= tmp_amp[:, :, None] * mode
        reduced_power -= tmp_amp2[:, None, :] * mode[:, None]
        reduced_power += tmp_amp3[:, None, None] * mode[:, None] * mode
        mode_list.append(mode)
    # Initialize the compensation matrix, that will be used to restore thermal
    # noise that gets subtracted out.  See Jan 29, Feb 17th, 2012 of Kiyo's
    # notes.
    compensation = sp.eye(n_chan, dtype=float)
    for mode1 in mode_list:
        compensation.flat[:: n_chan + 1] -= 2 * mode1 ** 2
        for mode2 in mode_list:
            mode_prod = mode1 * mode2
            compensation += mode_prod[:, None] * mode_prod[None, :]
    # Now that we've striped the noisiest modes, measure the auto power
    # spectrum, averaged over channels.
    auto_spec_mean = reduced_power.view()
    auto_spec_mean.shape = (n_f, n_chan ** 2)
    auto_spec_mean = auto_spec_mean[:, :: n_chan + 1].real
    auto_spec_mean = sp.mean(auto_spec_mean, -1)
    diag_window = window_function.view()
    diag_window.shape = (n_time, n_chan ** 2)
    diag_window = diag_window[:, :: n_chan + 1]
    auto_spec_window = sp.mean(diag_window, -1)
    if sp.mean(auto_spec_window).real < no_data_thres:
        auto_cross_over = 0.0
        auto_index = 0.0
        auto_thermal = 0
    else:
        auto_spec_params = fit_overf_const(auto_spec_mean, auto_spec_window, frequency)
        auto_thermal = auto_spec_params[3]
        if auto_spec_params[0] <= 0 or auto_spec_params[3] <= 0 or auto_spec_params[1] > -0.599:
            auto_cross_over = 0.0
            auto_index = 0.0
        else:
            auto_index = auto_spec_params[1]
            auto_cross_over = auto_spec_params[2] * (auto_spec_params[0] / auto_spec_params[3]) ** (-1.0 / auto_index)
            # if auto_cross_over < d_f:
            #    auto_index = 0.
            #    auto_cross_over = 0.
    # Plot the mean auto spectrum if desired.
    if plots:
        h = plt.gcf()
        a = h.add_subplot(*h.current_subplot)
        norm = sp.mean(auto_spec_window).real
        auto_plot = auto_spec_mean / norm
        plotable = auto_plot > 0
        lines = a.loglog(frequency[plotable], auto_plot[plotable])
        c = lines[-1].get_color()
        # And plot the fit in a light color.
        if auto_cross_over > d_f / 4.0:
            spec = npow.overf_power_spectrum(auto_thermal, auto_index, auto_cross_over, dt, n_time)
        else:
            spec = sp.zeros(n_time, dtype=float)
        spec += auto_thermal
        spec[0] = 0
        spec = npow.convolve_power(spec, auto_spec_window)
        spec = npow.prune_power(spec)
        spec = spec[1:].real
        if norm > no_data_thres:
            spec /= norm
        plotable = spec > 0
        a.loglog(frequency[plotable], spec[plotable], c=c, alpha=0.4, linestyle=":")
    output_params["all_channel_index"] = auto_index
    output_params["all_channel_corner_f"] = auto_cross_over
    # Finally measure the thermal part of the noise in each channel.
    cross_over_ind = sp.digitize([auto_cross_over * 4], frequency)[0]
    cross_over_ind = max(cross_over_ind, n_f // 2)
    cross_over_ind = min(cross_over_ind, int(9.0 * n_f / 10.0))
    thermal = reduced_power[cross_over_ind:, :, :].real
    n_high_f = thermal.shape[0]
    thermal.shape = (n_high_f, n_chan ** 2)
    thermal = sp.mean(thermal[:, :: n_chan + 1], 0)
    thermal_norms = sp.mean(diag_window, 0).real
    bad_inds = thermal_norms < no_data_thres
    thermal_norms[bad_inds] = 1.0
    # Compensate for power lost in mode subtraction.
    compensation[:, bad_inds] = 0
    compensation[bad_inds, :] = 0
    for ii in xrange(n_chan):
        if bad_inds[ii]:
            compensation[ii, ii] = 1.0
    thermal = linalg.solve(compensation, thermal)
    # Normalize
    thermal /= thermal_norms
    thermal[bad_inds] = T_infinity ** 2 * dt
    # Occationally the compensation fails horribly on a few channels.
    # When this happens, zero out the offending indices.
    thermal[thermal < 0] = 0
    output_params["thermal"] = thermal
    # Now that we know what thermal is, we can subtract it out of the modes we
    # already measured.
    for ii in range(n_modes):
        mode_params = output_params["over_f_mode_" + str(ii)]
        thermal_contribution = sp.sum(mode_params["mode"] ** 2 * thermal)
        # Subtract a maximum of 90% of the white noise to keep things positive
        # definate.
        new_white = max(mode_params["thermal"] - thermal_contribution, 0.1 * mode_params["thermal"])
        if mode_params["thermal"] < 0.5 * T_infinity ** 2 * dt:
            mode_params["thermal"] = new_white
    return output_params
Ejemplo n.º 54
0
def make_masked_time_stream(Blocks, ntime=None, window=None, 
                            return_means=False, subtract_slope=False) :
    """Converts Data Blocks into a single uniformly sampled time stream.
    
    Also produces the mask giving whether elements are valid entries or came
    from a zero pad.  This produes the required inputs for calculating a
    windowed power spectrum.

    Parameters
    ----------
    Blocks : tuple of DataBlock objects.
    ntime : int
        Total number of time bins in output arrays.  If shorter than required
        extra data is truncated.  If longer, extra data is masked.  Default is
        to use exactly the number that fits all the data.  Set to a negitive
        factor to zero pad to a power of 2 and by at least at least the factor.
    window : string or tuple
        Type of window to apply to each DataBlock.  Valid options are the valid
        arguments to scipy.signal.get_window().  By default, don't window.
    return_means : bool
        Whether to return an array of the channed means.
    subtract_slope : bool
        Whether to subtract a linear function of time from each channel.

    Returns
    -------
    time_stream : array
        All the data in `Blocks` but concatenated along the time axis and
        padded with zeros such that the time axis is uniformly sampled and
        uninterupted.
    mask : array same shape as `time_stream`
        1.0 if data in the correspoonding `time_stream` element is filled 
        and 0 if the data was missing.  This is like a window where 
        time_stream = mask*real_data.
    dt : float
        The time step of the returned time stream.
    means : array (optional)
        The mean from each channel.
    """

    # Shape of all axes except the time axis.
    back_shape = Blocks[0].dims[1:]
    # Get the time sample spacing.
    Blocks[0].calc_time()
    dt = abs(sp.mean(sp.diff(Blocks[0].time)))
    # Find the beginning and the end of the time axis by looping through
    # blocks.
    # Also get the time axis and the mask
    # for calculating basis polynomials.
    unmask = sp.zeros((0,) + back_shape, dtype=bool)
    time = sp.zeros((0,), dtype=float)
    start_ind = []
    min_time = float('inf')
    max_time = 0.0
    #mean_time = 0.0
    #n_data_times = 0
    for Data in Blocks :
        Data.calc_time()
        start_ind.append(len(time))
        time = sp.concatenate((time, Data.time))
        this_unmask = sp.logical_not(ma.getmaskarray(Data.data))
        unmask = sp.concatenate((unmask, this_unmask), 0)
        # Often the start or the end of a scan is completly masked.  Make sure
        # we don't start till the first unmasked time and end at the last
        # unmasked time.
        time_unmask = sp.alltrue(ma.getmaskarray(Data.data), -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        if sp.alltrue(time_unmask):
            continue
        time_unmask = sp.logical_not(time_unmask)
        min_time = min(min_time, min(Data.time[time_unmask]))
        max_time = max(min_time, max(Data.time[time_unmask]))
        #mean_time += sp.sum(Data.time[time_unmask])
        #n_data_times += len(Data.time[time_unmask])
        # Ensure that the time sampling is uniform.
        if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1)
                and sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt,
                                rtol=0.001)) :
            msg = ("Time sampling not uniformly spaced or Data Blocks don't "
                   "agree on sampling.")
            raise ce.DataError(msg)
        # Ensure the shapes are right.
        if Data.dims[1:] != back_shape :
            msg = ("All data blocks must have the same shape except the time "
                   "axis.")
            raise ce.DataError(msg)
    # Now calculate basis polynomials for the mean mode and the slope mode.
    polys = misc.ortho_poly(time[:,None,None,None], 2, unmask, 0)
    #mean_time /= n_data_times
    #if n_data_times == 0:
    #    n_data_times = 1
    # Very important to subtract the mean out of the signal, otherwise the
    # window coupling to the mean (0) mode will dominate everything. Can also
    # optionally take out a slope.
    # Old algorithm.
    #total_sum = 0.0
    #total_counts = 0
    #total_slope = 0.0
    #time_norm = 0.0
    #for Data in Blocks:
    #    total_sum += sp.sum(Data.data.filled(0), 0)
    #    total_counts += ma.count(Data.data, 0)
    #    total_slope += sp.sum(Data.data.filled(0) 
    #                          * (Data.time[:,None,None,None] - mean_time), 0)
    #    time_norm += sp.sum(sp.logical_not(ma.getmaskarray(Data.data))
    #                        * (Data.time[:,None,None,None] - mean_time)**2, 0)
    #total_counts[total_counts == 0] = 1
    #time_norm[time_norm == 0.0] = 1
    #total_mean = total_sum / total_counts
    #total_slope /= time_norm
    # New algorithm.
    mean_amp = 0
    slope_amp = 0
    for ii, Data in enumerate(Blocks):
        si = start_ind[ii]
        this_nt = Data.dims[0]
        data = Data.data.filled(0)
        mean_amp += sp.sum(data * unmask[si:si + this_nt,...]
                           * polys[0,si:si + this_nt,...], 0)
        slope_amp += sp.sum(data * unmask[si:si + this_nt,...]
                            * polys[1,si:si + this_nt,...], 0)
    polys[0,...] *= mean_amp
    polys[1,...] *= slope_amp
    # Calculate the time axis.
    if min_time > max_time:
        min_time = 0
        max_time = 6 * dt
    if not ntime :
        ntime = (max_time - min_time) // dt + 1
    elif ntime < 0:
        # 0 pad by a factor of at least -ntime, but at most 10% more than this.
        time_min = -ntime * (max_time - min_time) / dt
        n_block = 1
        while n_block < time_min/20.0:
            n_block *= 2
        ntime = (time_min//n_block  + 1) * n_block

    time = sp.arange(ntime)*dt + min_time
    # Allowcate memory for the outputs.
    time_stream = sp.zeros((ntime,) + back_shape, dtype=float)
    mask = sp.zeros((ntime,) + back_shape, dtype=sp.float32)
    # Loop over all times and fill in the arrays.
    for ii, Data in enumerate(Blocks):
        this_nt = Data.dims[0]
        si = start_ind[ii]
        # Subtract the mean calculated above.
        this_data = Data.data.copy()
        this_data -= polys[0,si:si + this_nt,...]
        # If desired, subtract of the linear function of time.
        if subtract_slope:
            #this_data -= (total_slope 
            #              * (Data.time[:,None,None,None] - mean_time))
            this_data -= polys[1,si:si + this_nt,...]
        # Find the first and last unmasked times.
        time_unmask = sp.alltrue(ma.getmaskarray(this_data), -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        time_unmask = sp.alltrue(time_unmask, -1)
        if sp.alltrue(time_unmask):
            continue
        time_unmask = sp.logical_not(time_unmask)
        unmasked_ind, = sp.where(time_unmask)
        first_ind = min(unmasked_ind)
        last_ind = max(unmasked_ind)
        # Ensure that the time sampling is uniform.
        if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1)
                and sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt,
                                rtol=0.001)) :
            msg = ("Time sampling not uniformly spaced or Data Blocks don't "
                   "agree on sampling.")
            raise ce.DataError(msg)
        # Ensure the shapes are right.
        if Data.dims[1:] != back_shape :
            msg = ("All data blocks must have the same shape except the time "
                   "axis.")
        # Apply an offset to the time in case the start of the Data Block
        # doesn't line up with the time array perfectly.
        offset = (time[sp.argmin(abs(time - Data.time[first_ind]))]
                  - Data.time[first_ind])
        # Generate window function.
        if window:
            window_function = sig.get_window(window, last_ind - first_ind + 1)
        for ii in range(first_ind, last_ind + 1) :
            ind = sp.argmin(abs(time - (Data.time[ii] + offset)))
            if abs(time[ind] - (Data.time[ii])) < 0.5*dt :
                if sp.any(mask[ind, ...]) :
                    msg = "Overlapping times in Data Blocks."
                    raise ce.DataError(msg)
                if window:
                    window_value = window_function[ii - first_ind]
                else :
                    window_value = 1.0
                time_stream[ind, ...] = (window_value 
                                         * this_data[ii, ...].filled(0.0))
                mask[ind, ...] = window_value * sp.logical_not(ma.getmaskarray(
                                     this_data)[ii, ...])
    if return_means:
        return time_stream, mask, dt, polys[0,0,...]
    else :
        return time_stream, mask, dt
Ejemplo n.º 55
0
 def test_build_noise(self):
     map = self.map
     time_stream, ra, dec, az, el, time, mask_inds = \
                                            self.DM.get_all_trimmed()
     nt = len(time)
     Noise = dirty_map.Noise(time_stream, time)
     thermal_noise_levels = sp.zeros((nf_d)) + 0.04  # Kelvin**2
     Noise.add_thermal(thermal_noise_levels)
     Noise.add_mask(mask_inds)
     self.assertTrue(sp.alltrue(Noise.diagonal[mask_inds] > 10))
     Noise.deweight_time_mean()
     Noise.deweight_time_slope()
     Noise.add_correlated_over_f(0.01, -1.2, 0.1)
     Noise.finalize()
     #### Test the full inverse.
     # Frist get a full representation of the noise matrix
     #tmp_mat = sp.zeros((nf_d, nt, nf_d, nt))
     #tmp_mat.flat[::nt*nf_d + 1] += Noise.diagonal.flat
     #for jj in xrange(Noise.time_modes.shape[0]):
     #    tmp_mat += (Noise.time_mode_noise[jj,:,None,:,None]
     #                * Noise.time_modes[jj,None,:,None,None]
     #                * Noise.time_modes[jj,None,None,None,:])
     #for jj in xrange(Noise.freq_modes.shape[0]):
     #    tmp_mat +=  (Noise.freq_mode_noise[jj,None,:,None,:]
     #                 * Noise.freq_modes[jj,:,None,None,None]
     #                 * Noise.freq_modes[jj,None,None,:,None])
     tmp_mat = Noise.get_mat()
     tmp_mat.shape = (nt*nf_d, nt*nf_d)
     # Check that the matrix I built for testing is indeed symetric.
     self.assertTrue(sp.allclose(tmp_mat, tmp_mat.transpose()))
     noise_inv = Noise.get_inverse()
     noise_inv.shape = (nt*nf_d, nt*nf_d)
     # Check that the production matrix is symetric.
     self.assertTrue(sp.allclose(noise_inv, noise_inv.transpose()))
     tmp_eye = sp.dot(tmp_mat, noise_inv)
     #print tmp_eye
     noise_inv.shape = (nf_d, nt, nf_d, nt)
     self.assertTrue(sp.allclose(tmp_eye, sp.identity(nt*nf_d)))
     # Check that the calculation of the diagonal is correct.
     noise_inv_diag = Noise.get_inverse_diagonal()
     self.assertTrue(sp.allclose(noise_inv_diag.flat, 
                                 noise_inv.flat[::nf_d*nt + 1]))
     #### Test the noise weighting of the data.
     noise_weighted_data = Noise.weight_time_stream(time_stream)
     self.assertTrue(sp.allclose(noise_weighted_data, al.dot(noise_inv,
                                                             time_stream)))
     #### Test making noise in map space.
     # First make the noise matrix by brute force.
     P = dirty_map.Pointing(("ra", "dec"), (ra, dec), map, 'nearest')
     P_mat = P.get_matrix()
     tmp_map_noise_inv = al.partial_dot(noise_inv,
                                        P_mat)
     tmp_map_noise_inv = al.partial_dot(P_mat.mat_transpose(), 
                                        tmp_map_noise_inv)
     # I mess up the meta data by doing this, but rotate the axes so they
     # are in the desired order.
     tmp_map_noise_inv = sp.rollaxis(tmp_map_noise_inv, 2, 0)
     # Now use fast methods.
     map_noise_inv = sp.zeros((nf_d, nra_d, ndec_d, nf_d, nra_d, ndec_d),
                              dtype=float)
     map_noise_inv = al.make_mat(map_noise_inv, axis_names=('freq', 'ra', 
         'dec', 'freq', 'ra', 'dec'), row_axes=(0, 1, 2), 
         col_axes=(3, 4, 5))
     start = time_module.clock()
     for ii in xrange(nf_d):
         for jj in xrange(nra_d):
             P.noise_to_map_domain(Noise, ii, jj,
                                   map_noise_inv[ii,jj,:,:,:,:])
     stop = time_module.clock()
     #print "Constructing map noise took %5.2f seconds." % (stop - start)
     self.assertTrue(sp.allclose(map_noise_inv, tmp_map_noise_inv))
def stitch(blocks) :
    """Stitches to gether data blocks from different frequency windows.

    Accepts a tuple of Data Block objects to be stitched together.
    """
    
    # Sort data blocks by frequeny.
    try :
        blocks = sorted(blocks, key=lambda Data : -1*Data.field['CRVAL1'])
    except KeyError :
        raise ce.DataError('All blocks must have frequency axis'
                           ' information.')
    # Stitched data starts life as a copy of one of the old data blocks.
    OutData = copy.deepcopy(blocks[0])
    
    # First make sure all the data is compatible.
    for Data in blocks :
        # Make sure the data we need is here.
        if not (Data.field.has_key('CRVAL1') 
                and Data.field.has_key('CDELT1')
                and Data.field.has_key('CRPIX1')) :
            raise ce.DataError('All blocks must have frequency axis'
                               ' information.')
        # Make sure all the data not involved in the stitching is the same.
        # For now enforce that CDELT1 be the same for all IFs.
        for key, field_data in OutData.field.iteritems() :
            if not key in ('CRVAL1', 'CRPIX1', 'OBSFREQ', 'RESTFREQ') :
                if not Data.field.has_key(key) :
                    raise ce.DataError('All blocks must have the same data '
                                       'fields.')
                # Treat strings differently.
                if OutData.field_formats[key][-1] != 'A' :
                    if not sp.allclose(field_data, Data.field[key]) :
                        raise ce.DataError('All blocks to be stitched must '
                                           'have matching data fields execpt '
                                           'for frequency axis information.')
                else :
                    if not sp.alltrue(field_data == Data.field[key]) :
                        raise ce.DataError('All blocks to be stitched must '
                                           'have matching data fields execpt '
                                           'for frequency axis information.')
    # For now assume that the frequencies are reversed ordered.
    if OutData.field['CDELT1'] >= 0 :
        raise NotImplementedError('Expected frequency steps to be negitive.')
    
    delt = abs(OutData.field['CDELT1']) 
    # Loop over data and stitch.
    for Data in blocks[1:] :
        # Get the freqeuncy axes
        OutData.calc_freq()
        Data.calc_freq()

        n_over = list(Data.freq >= OutData.freq[-1]).count(True)
        if n_over == 0 :
            raise ce.DataError('Frequency windows do not overlap.')
        # Use mean, not sum, to normalize in case of flagged data.
        factor = ma.mean(OutData.data[:,:,:,-2*n_over//3:-n_over//3], axis=3)
        factor /= ma.mean(Data.data[:,:,:,n_over//3:2*n_over//3], axis=3)
        Data.data *= factor[:,:,:,sp.newaxis]
        OutData.set_data(ma.concatenate((OutData.data[:,:,:,:-n_over//2], 
                                         Data.data[:,:,:,n_over//2:]), axis=3))
    OutData.calc_freq()
    OutData.set_field('BANDWID', abs(OutData.freq[0] - OutData.freq[-1]),
                      (), format='D')

    return OutData
Ejemplo n.º 57
0
def is_point_within_rectangle(point, Xmin, Xmax):
    return sp.alltrue(point >= Xmin) and sp.alltrue(point <= Xmax)
Ejemplo n.º 58
0
 def test_build_noise(self):
     map = self.map
     time_stream, ra, dec, az, el, time, mask_inds = \
                                            self.DM.get_all_trimmed()
     nt = len(time)
     Noise = dirty_map.Noise(time_stream, time)
     thermal_noise_levels = sp.zeros((nf_d)) + 0.04  # Kelvin**2
     Noise.add_thermal(thermal_noise_levels)
     Noise.add_mask(mask_inds)
     self.assertTrue(sp.alltrue(Noise.diagonal[mask_inds] > 10))
     Noise.deweight_time_mean()
     Noise.deweight_time_slope()
     Noise.add_correlated_over_f(0.01, -1.2, 0.1)
     Noise.finalize()
     #### Test the full inverse.
     # Frist get a full representation of the noise matrix
     #tmp_mat = sp.zeros((nf_d, nt, nf_d, nt))
     #tmp_mat.flat[::nt*nf_d + 1] += Noise.diagonal.flat
     #for jj in xrange(Noise.time_modes.shape[0]):
     #    tmp_mat += (Noise.time_mode_noise[jj,:,None,:,None]
     #                * Noise.time_modes[jj,None,:,None,None]
     #                * Noise.time_modes[jj,None,None,None,:])
     #for jj in xrange(Noise.freq_modes.shape[0]):
     #    tmp_mat +=  (Noise.freq_mode_noise[jj,None,:,None,:]
     #                 * Noise.freq_modes[jj,:,None,None,None]
     #                 * Noise.freq_modes[jj,None,None,:,None])
     tmp_mat = Noise.get_mat()
     tmp_mat.shape = (nt * nf_d, nt * nf_d)
     # Check that the matrix I built for testing is indeed symetric.
     self.assertTrue(sp.allclose(tmp_mat, tmp_mat.transpose()))
     noise_inv = Noise.get_inverse()
     noise_inv.shape = (nt * nf_d, nt * nf_d)
     # Check that the production matrix is symetric.
     self.assertTrue(sp.allclose(noise_inv, noise_inv.transpose()))
     tmp_eye = sp.dot(tmp_mat, noise_inv)
     #print tmp_eye
     noise_inv.shape = (nf_d, nt, nf_d, nt)
     self.assertTrue(sp.allclose(tmp_eye, sp.identity(nt * nf_d)))
     # Check that the calculation of the diagonal is correct.
     noise_inv_diag = Noise.get_inverse_diagonal()
     self.assertTrue(
         sp.allclose(noise_inv_diag.flat, noise_inv.flat[::nf_d * nt + 1]))
     #### Test the noise weighting of the data.
     noise_weighted_data = Noise.weight_time_stream(time_stream)
     self.assertTrue(
         sp.allclose(noise_weighted_data, al.dot(noise_inv, time_stream)))
     #### Test making noise in map space.
     # First make the noise matrix by brute force.
     P = dirty_map.Pointing(("ra", "dec"), (ra, dec), map, 'nearest')
     P_mat = P.get_matrix()
     tmp_map_noise_inv = al.partial_dot(noise_inv, P_mat)
     tmp_map_noise_inv = al.partial_dot(P_mat.mat_transpose(),
                                        tmp_map_noise_inv)
     # I mess up the meta data by doing this, but rotate the axes so they
     # are in the desired order.
     tmp_map_noise_inv = sp.rollaxis(tmp_map_noise_inv, 2, 0)
     # Now use fast methods.
     map_noise_inv = sp.zeros((nf_d, nra_d, ndec_d, nf_d, nra_d, ndec_d),
                              dtype=float)
     map_noise_inv = al.make_mat(map_noise_inv,
                                 axis_names=('freq', 'ra', 'dec', 'freq',
                                             'ra', 'dec'),
                                 row_axes=(0, 1, 2),
                                 col_axes=(3, 4, 5))
     start = time_module.clock()
     for ii in xrange(nf_d):
         for jj in xrange(nra_d):
             P.noise_to_map_domain(Noise, ii, jj,
                                   map_noise_inv[ii, jj, :, :, :, :])
     stop = time_module.clock()
     #print "Constructing map noise took %5.2f seconds." % (stop - start)
     self.assertTrue(sp.allclose(map_noise_inv, tmp_map_noise_inv))