Ejemplo n.º 1
0
def writeplanes(save_name='/mnt/work/erosolow/GRS_13CO_all.fits'):
    spatial_template = fits.open('INTEG/COHRS_RELEASE1_FULL_INTEG.fit')
    spectral_template = SpectralCube.read('reprojected.fits')

    # Smoosh astrometry components together
    spatial_header = spatial_template[0].header
    spectral_header = spectral_template.header

    new_header = spatial_header.copy()
    new_header["NAXIS"] = 3
    for keyword in ['NAXIS3', 'CRVAL3', 'CDELT3','CRPIX3','CUNIT3']:
        new_header[keyword] = spectral_header[keyword]
    new_header['BMAJ'] = 14./3600
    new_header['BMIN'] = 14./3600
    new_header['BPA'] = 0.00
    
    if os.path.exists(save_name):
        raise Exception("The file name {} already "
                        "exists".format(save_name))

    # Open a file and start filling this with planes.
    output_fits = fits.StreamingHDU(save_name, new_header)
    # Again, set  up a common vel axis and spin out
    vel = np.linspace(-30, 160, 191)
    for v in vel:
        output_fits.write(fits.getdata(planesdir +
                                       'GRSPLANE_{0}'.format(v) +
                          '.fits'))

    output_fits.close()
Ejemplo n.º 2
0
 def _make_streaming_hdu(self, fileobj):
     hd = fits.Header()
     hd['SIMPLE'] = (True, 'conforms to FITS standard')
     hd['BITPIX'] = (32, 'array data type')
     hd['NAXIS'] = (2, 'number of array dimensions')
     hd['NAXIS1'] = 5
     hd['NAXIS2'] = 5
     hd['EXTEND'] = True
     return fits.StreamingHDU(fileobj, hd)
Ejemplo n.º 3
0
    def test_streaming_hdu_file_wrong_mode(self):
        """
        Test that streaming an HDU to a file opened in the wrong mode fails as
        expected.
        """

        with open(self.temp('new.fits'), 'wb') as f:
            header = fits.Header()
            fits.StreamingHDU(f, header)
Ejemplo n.º 4
0
hdr['EXTEND'] = 'T'
primary_hdu = fits.PrimaryHDU(header=hdr)
primary_hdu.writeto('test.fits')

# Use BinTableHDU as a template
hdr = fits.BinTableHDU(Table(names=['SIGNAL'],
                             dtype=['=f4' if bPotential else '=i4']),
                       name='BINTABLE').header
hdr['ORDERING'] = ("RING", "Pixel ordering scheme, either RING or NESTED")
hdr['INDXSCHM'] = ("IMPLICIT", "Pixel indexing scheme (IMPLICIT or EXPLICIT)")
hdr['NSIDE'] = (NSIDE, "Resolution parameter for HEALPIX")
hdr['COORDSYS'] = ("C", "Pixelisation coordinate system")
hdr['PIXTYPE'] = ("HEALPIX", "HEALPIX Pixelisation")
hdr['NAXIS'] = 2
hdr['NAXIS2'] = N
hdr['NAXIS1'] = 1
hdr['BITPIX'] = -32 if bPotential else 32

hdu = fits.StreamingHDU('test.fits', hdr)
for file in argv[1:]:
    with open(file, 'rb') as hpb:
        data = np.fromfile(hpb, dtype=hpb_type)
        data = pd.DataFrame(data, columns=data.dtype.names)
        if bPotential:
            hdu.write(data['potential'].values)
        else:
            data = (data.grouped + data.ungrouped).to_frame()
            data.columns = ['signal']
            hdu.write(data['signal'].values)
hdu.close()
Ejemplo n.º 5
0
def write_fits(filename, data, header, extension, extname, comm):
    """
    Collectively write local arrays into a single FITS file.

    Parameters
    ----------
    filename : str
        The FITS file name.
    data : ndarray
        The array to be written.
    header : pyfits.Header
        The data FITS header. None can be set, in which case a minimal FITS
        header will be inferred from the data.
    extension : boolean
        If True, the data will be written as an extension to an already
        existing FITS file.
    extname : str
        The FITS extension name. Use None to write the primary HDU.
    comm : mpi4py.Comm
        The MPI communicator of the local arrays. Use MPI.COMM_SELF if the data
        are not meant to be combined into a global array. Make sure that the
        MPI processes are not executing this routine with the same file name.

    """
    # check if the file name is the same for all MPI jobs
    files = comm.allgather(filename + str(extname))
    all_equal = all(f == files[0] for f in files)
    if comm.size > 1 and not all_equal:
        raise ValueError('The file name is not the same for all MPI jobs.')
    ndims = comm.allgather(data.ndim)
    if any(n != ndims[0] for n in ndims):
        raise ValueError("The arrays have an incompatible number of dimensions"
                         ": '{0}'.".format(', '.join(str(n) for n in ndims)))
    ndim = ndims[0]
    shapes = comm.allgather(data.shape)
    if any(s[1:] != shapes[0][1:] for s in shapes):
        raise ValueError("The arrays have incompatible shapes: '{0}'.".format(
            strshape(shapes)))

    # get header
    if header is None:
        header = create_fitsheader_for(data, extname=extname)
    else:
        header = header.copy()
    if extname is not None:
        header['extname'] = extname

    # we remove the file first to avoid an annoying pyfits informative message
    if not extension:
        if comm.rank == 0:
            try:
                os.remove(filename)
            except OSError:
                pass

    # case without MPI communication
    if comm.size == 1:
        if not extension:
            hdu = pyfits.PrimaryHDU(data, header)
            hdu.writeto(filename, overwrite=True)
        else:
            pyfits.append(filename, data, header)
        return

    # get global/local parameters
    nglobal = sum(s[0] for s in shapes)
    s = split(nglobal, comm.size, comm.rank)
    nlocal = s.stop - s.start
    if data.shape[0] != nlocal:
        raise ValueError(
            "On rank {}, the local array shape '{}' is invalid. T"
            "he first dimension does not match the expected local"
            " number '{}' given the global number '{}'.{}".format(
                comm.rank, data.shape, nlocal, nglobal,
                '' if comm.rank > 0 else ' Shapes are: {}.'.format(shapes)))

    # write FITS header
    if comm.rank == 0:
        header['NAXIS' + str(ndim)] = nglobal
        shdu = pyfits.StreamingHDU(filename, header)
        data_loc = shdu._datLoc
        shdu.close()
    else:
        data_loc = None
    data_loc = comm.bcast(data_loc)

    # get a communicator excluding the processes which have no work to do
    # (Create_subarray does not allow 0-sized subarrays)
    chunk = product(data.shape[1:])
    rank_nowork = min(comm.size, nglobal)
    group = comm.Get_group()
    group.Incl(list(range(rank_nowork)))
    newcomm = comm.Create(group)

    # collectively write data
    if comm.rank < rank_nowork:
        # mpi4py 1.2.2: pb with viewing data as big endian KeyError '>d'
        if sys.byteorder == 'little' and data.dtype.byteorder == '=' or \
           data.dtype.byteorder == '<':
            data = data.byteswap()
        data = data.newbyteorder('=')
        mtype = DTYPE_MAP[data.dtype]
        ftype = mtype.Create_subarray([nglobal * chunk], [nlocal * chunk],
                                      [s.start * chunk])
        ftype.Commit()
        f = MPI.File.Open(newcomm,
                          filename,
                          amode=MPI.MODE_APPEND | MPI.MODE_WRONLY
                          | MPI.MODE_CREATE)
        f.Set_view(data_loc, mtype, ftype, 'native', MPI.INFO_NULL)
        f.Write_all(data)
        f.Close()
        ftype.Free()
    newcomm.Free()

    # pad FITS file with zeros
    if comm.rank == 0:
        datasize = nglobal * chunk * data.dtype.itemsize
        BLOCK_SIZE = 2880
        padding = BLOCK_SIZE - (datasize % BLOCK_SIZE)
        with open(filename, 'a') as f:
            if f.tell() - data_loc != datasize:
                raise RuntimeError('Unexpected file size.')
            f.write(padding * '\0')

    comm.Barrier()