Example #1
0
            reserr[key] = [[] for i in range(NM0,nm)]
        #use only valid inputs
        delkeys=[]
        for k in bss[0].iterkeys():
            if k=='flag' or k=='galnum' or bss[0][k][1]==float('nan') :#or bss[1][k][1]==bss[0][k][1]:
                delkeys.append(k)
        for k in delkeys:
            del bss[0][k]
            del bss[1][k]
        

        import metscales as ms
        nps = min (mpc.cpu_count()-1 or 1, MAXPROCESSES)

        if multiproc and nps>1:
            scales=[ms.diagnostics(newnsample, logf,nps) for i in range(nm)]
            
            print >>logf, "\n\n\nrunning on %d threads\n\n\n"%nps
            second_args=[sample,flux,err,nm,bss,mds,VERBOSE, dust_corr,VERBOSE,res,scales,nps, logf]
            pool = mpc.Pool(processes=nps) # depends on available cores
            rr = pool.map(calc, itertools.izip(range(NM0,nm), itertools.repeat(second_args))) # for i in range(nm): result[i] = f(i, second_args)
            for ri,r  in enumerate(rr):
                for kk in r.iterkeys(): res[kk][ri]=r[kk][ri]

            for ri,r  in enumerate(rr):
                for kk in r.iterkeys(): res[kk][ri]=r[kk][ri]
            pool.close() # not optimal! but easy
            pool.join()
            for key in scales[i].mds.iterkeys():
                if key in Zs:
                    res[key] = np.array(res[key]).T
Example #2
0
        for key in Zserr:
            reserr[key] = [[] for i in range(NM0, nm)]
        #use only valid inputs
        delkeys = []
        for k in bss[0].iterkeys():
            if k == 'flag' or k == 'galnum' or bss[0][k][1] == float('nan'):  # or bss[1][k][1]==bss[0][k][1]:
                delkeys.append(k)
        for k in delkeys:
            del bss[0][k]
            del bss[1][k]

        import metscales as ms
        nps = min(mpc.cpu_count() - 1 or 1, MAXPROCESSES)

        if multiproc and nps > 1:
            scales = [ms.diagnostics(newnsample, logf, nps) for i in range(nm)]

            logf.write( "\n\n\nrunning on %d threads\n\n\n" % nps)
            #print >> logf, "\n\n\nrunning on %d threads\n\n\n" % nps
            second_args = [sample, flux, err, nm, bss, mds, VERBOSE, dust_corr, VERBOSE, res, scales, nps, logf]
            pool = mpc.Pool(processes=nps)  # depends on available cores
            rr = pool.map(calc, itertools.izip(range(NM0, nm), itertools.repeat(second_args)))  # for i in range(nm): result[i] = f(i, second_args)


            for ri, r  in enumerate(rr):
                for kk in r.iterkeys():
                    res[kk][ri] = r[kk][ri]

            pool.close()  # not optimal! but easy
            pool.join()
            for key in scales[i].mds.iterkeys():
Example #3
0
File: mcz.py Project: zpace/pyMCZ
    def sample(self, nsample=1000, from_estimate=False, test=False):

        if (nsample == 1) and (from_estimate == False):
            raise ValueError("for nsample = 1, use .estimate() method!")
        elif (1 < nsample < 100) and (test == False):
            raise ValueError("need at least 100 samples!")
        elif (1 < nsample < 100) and (test == True):
            print "not enough samples, remember to only " + "use this as a test-bed!"

        # increasing sample by 10% to ensure
        # robustness against rejected samples
        nsample = int(nsample)

        # set up a dictionary to store tables of relevant data for each spaxel
        res_d = {}

        tfcnames = [k for k in self.flux.colnames if len(self.flux[k]) > np.isnan(self.flux[k]).sum()]
        self.tfcnames = tfcnames

        # looping over nm measurements
        pbar = ProgressBar(widgets=[Percentage(), Bar(), ETA()], maxval=self.nm).start()

        for i in range(self.NM0, self.nm):
            blockPrint()
            galnum = self.flux["galnum"][i]
            fr = self.flux[i]
            er = self.err[i]

            fluxi = {
                k: np.random.normal(fr[k], er[k], nsample)
                for k in fr.colnames
                if ((k != "galnum") and (~np.isnan(fr[k])))
            }

            # set up a table for a given galnum
            res_d[galnum] = t.Table()
            # add a column for flux information
            for n in tfcnames:
                if n != "galnum":
                    if np.isnan(self.flux[n]).sum() != len(self.flux[n]):
                        res_d[galnum][n] = fluxi[n]
                        res_d[galnum][n].unit = u.Unit("1e-17 erg cm^-2 s^-1")

            scales = ms.diagnostics(nsample, None, self.nps)

            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                success = metallicity.calculation(
                    scales,
                    fluxi,
                    self.nm,
                    "all",
                    1,
                    self.logf,
                    disp=self.verbose,
                    dust_corr=self.dust_corr,
                    verbose=self.verbose,
                )
            if success == -1:
                raise ValueError(
                    "MINIMUM REQUIRED LINES:  [OII]3727 "
                    + "& [OIII] + 5007, or [NII]6584, and Ha & Hb if "
                    + "you want dereddening"
                )

            for k, v in scales.mds.iteritems():
                if type(v) == np.ndarray:
                    if np.isnan(v).sum() != len(v):
                        res_d[galnum][k] = v

            enablePrint()
            pbar.update(i)

        pbar.finish()

        self.res_d = res_d
        self.nsample = nsample
        self.Zdiags = res_d[galnum].colnames
Example #4
0
File: mcz.py Project: zpace/pyMCZ
    def sample(self, nsample=1000, from_estimate=False, test=False):

        if (nsample == 1) and (from_estimate == False):
            raise ValueError('for nsample = 1, use .estimate() method!')
        elif (1 < nsample < 100) and (test == False):
            raise ValueError('need at least 100 samples!')
        elif (1 < nsample < 100) and (test == True):
            print 'not enough samples, remember to only ' + \
                'use this as a test-bed!'

        # increasing sample by 10% to ensure
        # robustness against rejected samples
        nsample = int(nsample)

        # set up a dictionary to store tables of relevant data for each spaxel
        res_d = {}

        tfcnames = [
            k for k in self.flux.colnames
            if len(self.flux[k]) > np.isnan(self.flux[k]).sum()
        ]
        self.tfcnames = tfcnames

        #looping over nm measurements
        pbar = ProgressBar(widgets=[Percentage(), Bar(),
                                    ETA()],
                           maxval=self.nm).start()

        for i in range(self.NM0, self.nm):
            blockPrint()
            galnum = self.flux['galnum'][i]
            fr = self.flux[i]
            er = self.err[i]

            fluxi = {
                k: np.random.normal(fr[k], er[k], nsample)
                for k in fr.colnames
                if ((k != 'galnum') and (~np.isnan(fr[k])))
            }

            # set up a table for a given galnum
            res_d[galnum] = t.Table()
            # add a column for flux information
            for n in tfcnames:
                if (n != 'galnum'):
                    if (np.isnan(self.flux[n]).sum() != len(self.flux[n])):
                        res_d[galnum][n] = fluxi[n]
                        res_d[galnum][n].unit = u.Unit('1e-17 erg cm^-2 s^-1')

            scales = ms.diagnostics(nsample, None, self.nps)

            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                success = metallicity.calculation(scales,
                                                  fluxi,
                                                  self.nm,
                                                  'all',
                                                  1,
                                                  self.logf,
                                                  disp=self.verbose,
                                                  dust_corr=self.dust_corr,
                                                  verbose=self.verbose)
            if success == -1:
                raise ValueError('MINIMUM REQUIRED LINES:  [OII]3727 ' + \
                    '& [OIII] + 5007, or [NII]6584, and Ha & Hb if ' + \
                    'you want dereddening')

            for k, v in scales.mds.iteritems():
                if type(v) == np.ndarray:
                    if np.isnan(v).sum() != len(v):
                        res_d[galnum][k] = v

            enablePrint()
            pbar.update(i)

        pbar.finish()

        self.res_d = res_d
        self.nsample = nsample
        self.Zdiags = res_d[galnum].colnames
Example #5
0
File: mcz.py Project: fcullen/pyMCZ
        for key in Zs:
            res[key]=[[] for i in range(NM0,nm)]
            
        #use only valid inputs
        delkeys=[]
        for k in bss[0].iterkeys():
            if k=='flag' or k=='galnum' or bss[0][k][1]==0 :#or bss[1][k][1]==bss[0][k][1]:
                delkeys.append(k)
        for k in delkeys:
            del bss[0][k]
            del bss[1][k]
        

        import metscales as ms
        nps = min (mpc.cpu_count()-1 or 1, MAXPROCESSES)
        scales=ms.diagnostics(newnsample, logf,nps)

        if multiproc and nps>1:
            print >>logf, "\n\n\nrunning on %d threads\n\n\n"%nps
            second_args=[sample,flux,err,nm,bss,mds,VERBOSE, dust_corr,VERBOSE,res,scales,nps, logf]
            pool = mpc.Pool(processes=nps) # depends on available cores
            rr = pool.map(calc, itertools.izip(range(NM0,nm), itertools.repeat(second_args))) # for i in range(nm): result[i] = f(i, second_args)
            for ri,r  in enumerate(rr):
                for kk in r.iterkeys(): res[kk][ri]=r[kk][ri]

            for ri,r  in enumerate(rr):
                for kk in r.iterkeys(): res[kk][ri]=r[kk][ri]
            pool.close() # not optimal! but easy
            pool.join()
        else: 
            #looping over nm spectra