コード例 #1
0
ファイル: fitsGBT.py プロジェクト: adam-lewis/analysis_IM
    def _add_single_block(self, Block) :
        """Adds all the data in a DataBlock Object to the Writer such that it
        can be written to a fits file eventually."""
        
        Block.verify()
        # Merge the histories
        if self.first_block_added :
            self.history = db.History(Block.history)
        else :
            self.history = db.merge_histories(self.history, Block)
        # Some dimensioning and such
        dims = tuple(Block.dims)
        n_records = dims[0]*dims[1]*dims[2]
        block_shape = dims[0:-1]
        # For now automatically determine the format for the data field.
        data_format = str(dims[-1]) + 'E'
        if self.first_block_added :
            self.data_format = data_format
        elif self.data_format != data_format :
            raise ce.DataError('Data shape miss match: freq axis must be same'
                               ' length for all DataBlocks added to Wirter.')

        # Copy the reshaped data from the DataBlock
        data = sp.array(ma.filled(Block.data, float('nan')))
        if self.first_block_added :
            self.data = data.reshape((n_records, dims[3]))
        else :
            self.data = sp.concatenate((self.data, data.reshape((
                                        n_records, dims[3]))), axis=0)

        # Now get all stored fields for writing out.
        for field, axes in Block.field_axes.iteritems() :
            # Need to expand the field data to the full ntimes x npol x ncal
            # length (with lots of repitition).  We will use np broadcasting.
            broadcast_shape = [1,1,1]
            for axis in axes :
                axis_ind = list(Block.axes).index(axis)
                broadcast_shape[axis_ind] = dims[axis_ind]
            # Allowcate memory for the new full field.
            data_type = Block.field[field].dtype
            field_data = sp.empty(block_shape, dtype=data_type)
            # Copy data with the entries, expanding dummy axes.
            field_data[:,:,:] = sp.reshape(Block.field[field],
                                                 broadcast_shape)
            if self.first_block_added :
                self.field[field] = field_data.reshape(n_records)
                self.formats[field] = Block.field_formats[field]
            else :
                self.field[field] = sp.concatenate((self.field[field],
                                        field_data.reshape(n_records)), axis=0)
                if self.formats[field] != Block.field_formats[field] :
                    raise ce.DataError('Format miss match in added data blocks'
                                       ' and field: ' + field)
        self.first_block_added = False
コード例 #2
0
ファイル: fitsGBT.py プロジェクト: wheeyeon/analysis_IM
    def _add_single_block(self, Block) :
        """Adds all the data in a DataBlock Object to the Writer such that it
        can be written to a fits file eventually."""
        
        Block.verify()
        # Merge the histories
        if self.first_block_added :
            self.history = db.History(Block.history)
        else :
            self.history = db.merge_histories(self.history, Block)
        # Some dimensioning and such
        dims = tuple(Block.dims)
        n_records = dims[0]*dims[1]*dims[2]
        block_shape = dims[0:-1]
        # For now automatically determine the format for the data field.
        data_format = str(dims[-1]) + 'E'
        if self.first_block_added :
            self.data_format = data_format
        elif self.data_format != data_format :
            raise ce.DataError('Data shape miss match: freq axis must be same'
                               ' length for all DataBlocks added to Wirter.')

        # Copy the reshaped data from the DataBlock
        data = sp.array(ma.filled(Block.data, float('nan')))
        if self.first_block_added :
            self.data = data.reshape((n_records, dims[3]))
        else :
            self.data = sp.concatenate((self.data, data.reshape((
                                        n_records, dims[3]))), axis=0)

        # Now get all stored fields for writing out.
        for field, axes in Block.field_axes.iteritems() :
            # Need to expand the field data to the full ntimes x npol x ncal
            # length (with lots of repitition).  We will use np broadcasting.
            broadcast_shape = [1,1,1]
            for axis in axes :
                axis_ind = list(Block.axes).index(axis)
                broadcast_shape[axis_ind] = dims[axis_ind]
            # Allowcate memory for the new full field.
            data_type = Block.field[field].dtype
            field_data = sp.empty(block_shape, dtype=data_type)
            # Copy data with the entries, expanding dummy axes.
            field_data[:,:,:] = sp.reshape(Block.field[field],
                                                 broadcast_shape)
            if self.first_block_added :
                self.field[field] = field_data.reshape(n_records)
                self.formats[field] = Block.field_formats[field]
            else :
                self.field[field] = sp.concatenate((self.field[field],
                                        field_data.reshape(n_records)), axis=0)
                if self.formats[field] != Block.field_formats[field] :
                    raise ce.DataError('Format miss match in added data blocks'
                                       ' and field: ' + field)
        self.first_block_added = False
コード例 #3
0
 def test_merge_multiple_histories(self) :
     entry1 = 'Read from file.'
     entry2 = 'Saved to file.'
     DBs = ()
     n = 10
     for ii in range(n) :
         tempDB = db.DataBlock()
         tempDB.add_history(entry1, 'filename: ' + str(ii))
         tempDB.add_history(entry2, 'saved filename not iterated')
         DBs = DBs + (tempDB, )
     merged = db.merge_histories(*DBs)
     self.assertEqual(len(merged['000: '+entry1]), n)
     self.assertEqual(len(merged['001: '+entry2]), 1)
コード例 #4
0
 def test_merge_multiple_histories(self):
     entry1 = 'Read from file.'
     entry2 = 'Saved to file.'
     DBs = ()
     n = 10
     for ii in range(n):
         tempDB = db.DataBlock()
         tempDB.add_history(entry1, 'filename: ' + str(ii))
         tempDB.add_history(entry2, 'saved filename not iterated')
         DBs = DBs + (tempDB, )
     merged = db.merge_histories(*DBs)
     self.assertEqual(len(merged['000: ' + entry1]), n)
     self.assertEqual(len(merged['001: ' + entry2]), 1)
コード例 #5
0
 def test_merge_histories(self) :
     # Basic tests
     self.TestDB.add_history(self.hist_str, self.hist_detail)
     SecondDB = db.DataBlock()
     second_details = ('second file name', )
     SecondDB.add_history(self.hist_str, second_details)
     merged = db.merge_histories(self.TestDB, SecondDB)
     self.assertEqual(len(merged.keys()), 1)
     self.assertTrue(second_details[0] in merged['000: '+self.hist_str])
     self.assertTrue(self.hist_detail[0] in merged['000: '+self.hist_str])
     self.assertEqual(len(merged['000: '+self.hist_str]), 3)
     # Enforce matching.
     ThirdDB = db.DataBlock()
     ThirdDB.add_history(self.hist_str, self.hist_detail)
     ThirdDB.add_history('Read from file.', self.hist_detail)
     self.assertRaises(ce.DataError, db.merge_histories, SecondDB, ThirdDB)
コード例 #6
0
 def test_merge_histories(self):
     # Basic tests
     self.TestDB.add_history(self.hist_str, self.hist_detail)
     SecondDB = db.DataBlock()
     second_details = ('second file name', )
     SecondDB.add_history(self.hist_str, second_details)
     merged = db.merge_histories(self.TestDB, SecondDB)
     self.assertEqual(len(merged.keys()), 1)
     self.assertTrue(second_details[0] in merged['000: ' + self.hist_str])
     self.assertTrue(self.hist_detail[0] in merged['000: ' + self.hist_str])
     self.assertEqual(len(merged['000: ' + self.hist_str]), 3)
     # Enforce matching.
     ThirdDB = db.DataBlock()
     ThirdDB.add_history(self.hist_str, self.hist_detail)
     ThirdDB.add_history('Read from file.', self.hist_detail)
     self.assertRaises(ce.DataError, db.merge_histories, SecondDB, ThirdDB)