Ejemplo n.º 1
0
def save_compound_array(db, parent, name, data, overwrite=False):
    if overwrite and name in parent:
        logger.warn('Removing existing group %s' % name)
        parent._f_getChild(name)._f_remove(recursive=True)
    grp = db.createGroup(parent, name)
    # HACK: pytables does not support compound dtypes.
    for field in data.dtype.names:
        save_array(db, grp, field, data[field], overwrite)
Ejemplo n.º 2
0
 def onParamUpdate(self, event):
     param = self.params[event.param]
     param.setValue(event.value)
     try:
         param.setLimits(event.llimit, event.ulimit)
     except:
         logger.warn('Bad limits: %f, %f' % (event.llimit, event.ulimit))
     self.sendModelUpdateEvent()
Ejemplo n.º 3
0
 def onSaveButton(self, event):
     self.originalModel = deepcopy(self.model)
     with open(self.modelFile, 'w') as f:
         logger.debug('Saving model file %s.' % self.modelFile)
         try:
             f.write(str(self.originalModel))
         except:
             logger.warn('Could not write cache model file %s' % self.modelFile)
Ejemplo n.º 4
0
def save_array(db, parent, name, data, overwrite=False):
    if overwrite and name in parent:
        logger.warn('Removing existing array %s' % name)
        parent._f_getChild(name)._f_remove()
    ca = db.createCArray(parent, name, Atom.from_dtype(data.dtype), data.shape,
                         filters=Filters(1, 'blosc'))
    if isinstance(data, np.ma.MaskedArray):
        data = data.filled()
    ca[...] = data
Ejemplo n.º 5
0
 def writeHDF5(self, db, parent, name, overwrite=False):
     self.updateIntegratedSpec()
     
     if overwrite and name in parent:
         logger.warn('Removing existing array %s' % name)
         parent._f_getChild(name)._f_remove(recursive=True)
     grp = db.createGroup(parent, name)
     
     save_array(db, grp, 'wl', self.wl, overwrite)
     save_array(db, grp, 'f_obs', self.f_obs, overwrite)
     save_array(db, grp, 'f_err', self.f_err, overwrite)
     save_array(db, grp, 'f_flag', self.f_flag, overwrite)
     save_array(db, grp, 'mask', self.mask, overwrite)
     save_array(db, grp, 'i_f_obs', self.i_f_obs, overwrite)
     save_array(db, grp, 'i_f_err', self.i_f_err, overwrite)
     save_array(db, grp, 'i_f_flag', self.i_f_flag, overwrite)
Ejemplo n.º 6
0
 def loadModel(self, model_file):
     if not path.exists(model_file):
         logger.warn('Initial model file not found (%s), guessing one. ' % model_file)
         x0 = (self.flux.shape[1] / 2.0) + 1.0
         y0 = (self.flux.shape[0] / 2.0) + 1.0
         pa, ell = ellipse_params(self.flux, x0, y0)
         r = distance(self.flux.shape, x0, y0, pa, ell)
         r = np.ma.array(r, mask=self.flux.mask)
         hlr = r50(self.flux, r)
         I_e = self.flux.max() * 0.1
         r_e = 0.5 * hlr
         n = 2.0
         I_0 = self.flux.max() * 0.1
         h = 1.0 * hlr
         model = BDModel()
         model.wl = 5635.0
         model.x0.setValue(x0)
         model.x0.setLimitsRel(10, 10)
         model.y0.setValue(y0)
         model.y0.setLimitsRel(10, 10)
         
         model.disk.I_0.setValue(I_0)
         model.disk.I_0.setLimits(0.0, 10.0 * I_0)
         model.disk.h.setValue(h)
         model.disk.h.setLimits(0.0, 5.0 * hlr)
         model.disk.PA.setValue(pa)
         model.disk.PA.setLimits(0.0, 180.0)
         model.disk.ell.setValue(ell)
         model.disk.ell.setLimits(0.0, 1.0)
     
         model.bulge.I_e.setValue(I_e)
         model.bulge.I_e.setLimits(1e-33, 3.0 * I_e)
         model.bulge.r_e.setValue(r_e)
         model.bulge.r_e.setLimits(1e-33, 2.5 * r_e)
         model.bulge.n.setValue(n, vmin=1.0, vmax=5.0)
         model.bulge.PA.setValue(pa)
         model.bulge.PA.setLimits(0.0, 180.0)
         model.bulge.ell.setValue(ell)
         model.bulge.ell.setLimits(0.0, 1.0)
         
         return model
     else:
         return BDModel.load(model_file)
Ejemplo n.º 7
0
########## Decomposition 
##########
################################################################################

logger.info('Beginning decomposition.')
decomp = IFSDecomposer()
logger.info('Model using PSF FWHM = %.2f ", beta = %.2f.' % (args.modelPsfFWHM, args.modelPsfBeta))
decomp.setSynthPSF(FWHM=args.modelPsfFWHM, beta=args.modelPsfBeta, size=15)
decomp.loadData(wl, full_ifs / flux_unit, full_ifs_noise / flux_unit, np.zeros_like(full_ifs, dtype='bool'))

swll, swlu = 5590.0, 5680.0
sl1 = find_nearest_index(decomp.wl, swll)
sl2 = find_nearest_index(decomp.wl, swlu)
qSignal, qNoise, qWl = decomp.getSpectraSlice(sl1, sl2)

logger.warn('Computing initial model (takes a LOT of time).')
t1 = time.time()
initial_model = bd_initial_model(qSignal, qNoise, decomp.PSF, quiet=False, cache_model_file=args.cacheModel)
bulge_image, disk_image = create_model_images(initial_model, qSignal.shape, decomp.PSF)
logger.warn('Initial model time: %.2f\n' % (time.time() - t1))

logger.debug('Plotting guessed initial model.')
vmin = np.log10(qSignal.min())
vmax = np.log10(qSignal.max())
fig = plt.figure(figsize=(8, 6))
gs = plt.GridSpec(2, 3, height_ratios=[2.0, 3.0])
ax = plt.subplot(gs[0,0])
ax.imshow(np.log10(qSignal), vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(r'Total')
Ejemplo n.º 8
0
def decomp(cube, sampleId, args):
    galaxyId = califa_id_from_cube(cube)
    c = DecompContainer()
    if not args.overwrite:
        logger.info('Checking if the decomposition is already done for %s ...' % galaxyId)
        try:
            c.loadHDF5(args.db, sampleId, galaxyId)
            logger.warn('Previous data found, skipping decomposition.')
            return c
        except Exception as e:
            print e
            logger.info('No previous data found, continuing decomposition.')
            
    logger.info('Starting fit for %s...' % galaxyId)
    dec = CALIFADecomposer(cube, grating=args.grating, nproc=args.nproc)
    npix = dec.K.qMask.sum()
    dec.minNPix = npix / 2
    logger.info('Minimum number of pixels for fitting: %d' % dec.minNPix)
    dec.useEstimatedVariance = args.estVar
    dec.setSynthPSF(FWHM=args.psfFWHM, beta=args.psfBeta, size=args.psfSize)
    
    logger.warn('Computing initial model using DE algorithm (takes a LOT of time).')
    t1 = time.time()
    if not path.exists(args.maskFile):
        logger.error('Mask file %s not found.' % args.maskFile)
        exit(1)
    logger.info('Using mask file %s.' % args.maskFile)
    masked_wl = load_line_mask(args.maskFile, dec.wl)
    
    l1 = find_nearest_index(dec.wl, 4500.0)
    l2 = dec.Nl_obs
    cache_file = cube + '.initmodel'
    if not path.exists(cache_file):
        logger.info('Creating gray image for initial model.')
        gray_image, gray_noise, _ = dec.getSpectraSlice(l1, l2, masked_wl)
    else:
        gray_image = None
        gray_noise = None
    initial_model = bd_initial_model(gray_image, gray_noise, dec.PSF, quiet=False, nproc=args.nproc,
                                            cache_model_file=cache_file)
    logger.debug('Refined initial model:\n%s\n' % initial_model)
    logger.warn('Initial model time: %.2f\n' % (time.time() - t1))
    
    t1 = time.time()
    c.zones = np.ma.array(dec.K.qZones, mask=dec.K.qZones < 0)
    c.initialParams = initial_model.getParams()
    c.attrs = dict(PSF_FWHM=args.psfFWHM,
                   PSF_beta=args.psfBeta,
                   PSF_size=args.psfSize,
                   box_step=args.boxStep,
                   box_radius=args.boxRadius,
                   orig_file=cube,
                   mask_file=args.maskFile, 
                   object_name=dec.K.galaxyName,
                   flux_unit=dec.flux_unit,
                   distance_Mpc=dec.K.distance_Mpc,
                   x0=dec.K.x0,
                   y0=dec.K.y0,
                   target_vd=dec.targetVd,
                   wl_FWHM=dec.wlFWHM)
    
    models = dec.fitSpectra(step=50*args.boxStep, box_radius=25*args.boxStep,
                            initial_model=initial_model, mode=args.fitAlgorithm, masked_wl=masked_wl)
    c.firstPassParams = np.array([m.getParams() for m in models], dtype=models[0].dtype)
    logger.info('Done first pass modeling, time: %.2f' % (time.time() - t1))
    
    t1 = time.time()
    logger.info('Smoothing parameters.')
    models = smooth_models(models, dec.wl, degree=1)
    
    logger.info('Starting second pass modeling...')
    models = dec.fitSpectra(step=args.boxStep, box_radius=args.boxRadius,
                            initial_model=models, mode=args.fitAlgorithm, insist=False, masked_wl=masked_wl)
    logger.info('Done second pass modeling, time: %.2f' % (time.time() - t1))
    
    t1 = time.time()
    logger.info('Computing model spectra...')
    c.total.f_obs = dec.flux[::args.boxStep]
    c.total.f_err = dec.error[::args.boxStep]
    c.total.f_flag = dec.flags[::args.boxStep]
    c.total.mask = dec.K.qMask
    c.total.wl = dec.wl[::args.boxStep]
    
    c.bulge.f_obs, c.disk.f_obs = dec.getModelSpectra(models, args.nproc)
    c.bulge.mask = dec.K.qMask
    c.bulge.wl = dec.wl[::args.boxStep]
    c.disk.mask = dec.K.qMask
    c.disk.wl = dec.wl[::args.boxStep]

    # TODO: better array and dtype handling.
    c.fitParams = np.array([m.getParams() for m in models], dtype=models[0].dtype)
    
    flag_bad_fit = c.fitParams['flag'][:, np.newaxis, np.newaxis] > 0.0
    c.updateErrorsFlags(flag_bad_fit)
    c.updateIntegratedSpec()
    
    logger.info('Saving qbick planes...')
    fname = path.join(args.zoneFileDir, '%s_%s-planes.fits' % (galaxyId, sampleId))
    save_qbick_images(c.total, dec, fname, overwrite=args.overwrite)
    fname = path.join(args.zoneFileDir, '%s_%s-bulge-planes.fits' % (galaxyId, sampleId))
    save_qbick_images(c.bulge, dec, fname, overwrite=args.overwrite)
    fname = path.join(args.zoneFileDir, '%s_%s-disk-planes.fits' % (galaxyId, sampleId))
    save_qbick_images(c.disk, dec, fname, overwrite=args.overwrite)
    
    logger.info('Saving to storage...')
    c.writeHDF5(args.db, sampleId, galaxyId, args.overwrite)
    logger.info('Storage complete, time: %.2f' % (time.time() - t1))
    
    return c
Ejemplo n.º 9
0
                        help='PSF beta parameter for Moffat profile. If not set, use Gaussian.')
    parser.add_argument('--psf-size', dest='psfSize', type=int, default=15,
                        help='PSF size, in pixels. Must be an odd number.')
    parser.add_argument('--overwrite', dest='overwrite', action='store_true',
                        help='Overwrite data.')
    parser.add_argument('--nproc', dest='nproc', type=int, default=None,
                        help='Number of processors to use.')
    
    return parser.parse_args()
################################################################################


################################################################################
if __name__ =='__main__':
    args = parse_args()
    if args.verbose:
        logger.setLevel(-1)
        logger.debug('Verbose output enabled.')

    sample = load_sample(args.sample)
    sampleId = path.basename(args.sample)

    for gal in sample:
        cube = gal['cube']
        try:
            c = decomp(cube, sampleId, args)
        except Exception as e:
            logger.error('Error decomposing cube %s' % cube)
            logger.error('Exception: %s.' % str(e))
            logger.warn('Skipping to next galaxy.')
Ejemplo n.º 10
0
########## Decomposition
##########
################################################################################

logger.info("Beginning decomposition.")
decomp = IFSDecomposer()
logger.info('Model using PSF FWHM = %.2f ".' % args.modelPsfFWHM)
decomp.setSynthPSF(FWHM=args.modelPsfFWHM, size=9)
decomp.loadData(l_ssp, full_ifs, full_ifs_noise, np.zeros_like(full_ifs, dtype="bool"))

swll, swlu = 5590.0, 5680.0
sl1 = find_nearest_index(decomp.wl, swll)
sl2 = find_nearest_index(decomp.wl, swlu)
qSignal, qNoise, qWl = decomp.getSpectraSlice(sl1, sl2)

logger.warn("Computing initial model (takes a LOT of time).")
t1 = time.time()
initial_model = bd_initial_model(qSignal, qNoise, decomp.PSF, quiet=False)
bulge_image, disk_image = create_model_images(initial_model, qSignal.shape, decomp.PSF)
logger.warn("Initial model time: %.2f\n" % (time.time() - t1))

logger.debug("Plotting guessed initial model.")
vmin = np.log10(qSignal.min())
vmax = np.log10(qSignal.max())
fig = plt.figure(figsize=(8, 6))
gs = plt.GridSpec(2, 3, height_ratios=[2.0, 3.0])
ax = plt.subplot(gs[0, 0])
ax.imshow(np.log10(qSignal), vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(r"Total")