Beispiel #1
0
	def get_5pops_mass_redshift_bins(self, znodes, mnodes, linear_mass=1):
		#pop_suf = ['sf','qt','agn','sb','loc']
		self.id_z_ms_5pop = {}
		for iz in range(len(znodes[:-1])):
			for jm in range(len(mnodes[:-1])):
				if linear_mass == 1:
					ind_mz_sf =( (self.table.sfg == 1) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
						(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )
					ind_mz_qt =( (self.table.sfg == 0) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
						(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )
					ind_mz_agn =( (self.table.sfg == 2) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
						(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )
					ind_mz_sb =( (self.table.sfg == 4) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
						(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )
					ind_mz_loc=( (self.table.sfg == 3) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
						(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )
				else:
					ind_mz_sf =( (self.table.sfg == 1) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
						(self.table[self.mkey] >= np.min(mnodes[jm:jm+2])) & (self.table[self.mkey] < np.max(mnodes[jm:jm+2])) )
					ind_mz_qt =( (self.table.sfg == 0) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
						(self.table[self.mkey] >= np.min(mnodes[jm:jm+2])) & (self.table[self.mkey] < np.max(mnodes[jm:jm+2])) )
					ind_mz_agn =( (self.table.sfg == 2) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
						(self.table[self.mkey] >= np.min(mnodes[jm:jm+2])) & (self.table[self.mkey] < np.max(mnodes[jm:jm+2])) )
					ind_mz_sb =( (self.table.sfg == 4) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
						(self.table[self.mkey] >= np.min(mnodes[jm:jm+2])) & (self.table[self.mkey] < np.max(mnodes[jm:jm+2])) )
					ind_mz_loc=( (self.table.sfg == 3) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
						(self.table[self.mkey] >= np.min(mnodes[jm:jm+2])) & (self.table[self.mkey] < np.max(mnodes[jm:jm+2])) )

				self.id_z_ms_5pop['z_'+clean_args(str('{:.2f}'.format(znodes[iz])))+'_'+clean_args(str('{:.2f}'.format(znodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_sf'] = self.table.ID[ind_mz_sf].values
				self.id_z_ms_5pop['z_'+clean_args(str('{:.2f}'.format(znodes[iz])))+'_'+clean_args(str('{:.2f}'.format(znodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_qt'] = self.table.ID[ind_mz_qt].values
				self.id_z_ms_5pop['z_'+clean_args(str('{:.2f}'.format(znodes[iz])))+'_'+clean_args(str('{:.2f}'.format(znodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_agn'] = self.table.ID[ind_mz_agn].values
				self.id_z_ms_5pop['z_'+clean_args(str('{:.2f}'.format(znodes[iz])))+'_'+clean_args(str('{:.2f}'.format(znodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_sb'] = self.table.ID[ind_mz_sb].values
				self.id_z_ms_5pop['z_'+clean_args(str('{:.2f}'.format(znodes[iz])))+'_'+clean_args(str('{:.2f}'.format(znodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_loc'] = self.table.ID[ind_mz_loc].values
Beispiel #2
0
def propagate_predict(modelname, dissimname, predictionsname, outdir=None):
    if outdir == None:
        outdir = tempfile.mkdtemp(dir=os.curdir, prefix='out')
    else:
        if not os.path.exists(outdir):
            tsh.makedirs(outdir)
    args = {}
    predictions_meta, predictions = read_listfile(predictionsname)
    args.update(predictions_meta)
    dissim_meta, sample_ids, dissim = read_weightsfile(dissimname)
    assert (predictions['id'] == np.array(sample_ids)).all()
    assert predictions_meta['input_name'] == dissim_meta['input_name'], \
            'Expecting same input names (%s x %s)' % (predictions_meta['input_name'], dissim_meta['input_name'])
    inputname = predictions_meta['input_name']
    args.update(dissim_meta)
    model = read_propagatorfile(modelname)
    args.update(model['meta'])
    method_name = model['propagator']['method_name']
    del model['propagator']['method_name']
    args.update(model['propagator'])
    args, prop = propagate(method_name,
                           args,
                           predictions,
                           dissim,
                           output_dir=outdir)
    clean_args(args)
    del args['cv_results']
    write_listfile(os.path.join(outdir, inputname + '-propagated.csv.gz'),
                   prop, **args)
Beispiel #3
0
def classifier_predict(listname, modelname, outdir=None, n_jobs=None):
    if outdir == None:
        outdir = tempfile.mkdtemp(dir=os.curdir, prefix='out')
    else:
        if not os.path.exists(outdir):
            tsh.makedirs(outdir)
    inputname = os.path.splitext(os.path.basename(listname))[0]
    if listname.endswith('.gz'):
        inputname = os.path.splitext(inputname)[0]
    meta, data = read_listfile(listname)
    classifier = read_classifierfile(modelname)
    feature_method = classifier['features']['meta']['feature_method']
    feature_args = meta.copy()
    # Training input_name would shadow the current one.
    del classifier['features']['meta']['input_name']
    featurename = os.path.join(outdir, inputname + '-feats.csv.gz')
    if os.path.exists(featurename):
        _, features = read_listfile(featurename)
    else:
        feature_args.update(classifier['features']['meta'])
        args, features = compute_features(feature_method, feature_args, data,
                input_name=inputname, n_jobs=n_jobs, output_dir=outdir)
        assert (data['id'] == features['id']).all()
        clean_args(args)
        write_listfile(featurename, features, input_name=inputname, **args)
    labels_name = classifier['meta']['truth'] + '_labels'
    labels = classifier['meta'][labels_name]
    pred = predict(classifier['classifier'], sorted(labels.keys()), features,
            output_dir=outdir)
    write_listfile(os.path.join(outdir, inputname + '-predictions.csv.gz'), pred,
            classifier_name=modelname, truth=classifier['meta']['truth'],
            labels_name=labels, input_name=inputname)
def dissimilarities(methodname,
                    listname,
                    argsname=None,
                    n_jobs=None,
                    outdir=None):
    if outdir == None:
        outdir = tempfile.mkdtemp(dir=os.curdir, prefix='out')
    else:
        if not os.path.exists(outdir):
            tsh.makedirs(outdir)
    inputname = os.path.splitext(os.path.basename(listname))[0]
    if listname.endswith('.gz'):
        inputname = os.path.splitext(inputname)[0]
    meta, data = read_listfile(listname)
    args = meta
    if argsname != None:
        args.update(read_argsfile(argsname))
    args, w = compute_dissimilarity(methodname,
                                    args,
                                    data,
                                    n_jobs=n_jobs,
                                    output_dir=outdir,
                                    input_name=inputname)
    if 'threshold' in args and args['threshold'] != 'False':
        args, w = threshold_dissimilarity(args['threshold'], args, w)
    dissim = prepare_weights_data(data['id'], data.dtype['id'], w)
    clean_args(args)
    write_listfile(os.path.join(outdir, inputname + '-dissim.csv.gz'),
                   dissim,
                   input_name=inputname,
                   **args)
Beispiel #5
0
	def get_criteria_specific_redshift_bins(self, znodes, mnodes, sfg = 1, criteria = '', crange = [1.0], initialize_pop = False):
		pop = ['qt','sf']
		nc = len(crange)
		if initialize_pop == True: self.id_crit = {}

		for iz in range(len(znodes[:-1])):
			for jm in range(len(mnodes[:-1])):
				if nc > 1:
					for kc in range(len(crange[:-1])):
						ind_crit =( (self.table.sfg == sfg) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
							(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) &
							(clean_nans(self.table[criteria]) >= crange[kc]) & (clean_nans(self.table[criteria]) < crange[kc+1]) )

						arg = 'z_'+str('{:.2f}'.format(znodes[iz]))+'_'+str('{:.2f}'.format(znodes[iz+1]))+'__m_'+str('{:.2f}'.format(mnodes[jm]))+'_'+str('{:.2f}'.format(mnodes[jm+1]))+'__'+criteria+'_'+str('{:.2f}'.format(crange[kc],2))+'_'+str('{:.2f}'.format(crange[kc+1],2))+'_'+pop[sfg]
						self.id_crit[clean_args(arg)] = self.table.ID[ind_crit].values
				else:
					#above and below no?
					ind_above =( (self.table.sfg == sfg) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
						(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) &
						(clean_nans(self.table[criteria]) >= crange[0]) )
					ind_below =( (self.table.sfg == sfg) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
						(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) &
						(clean_nans(self.table[criteria]) < crange[0]) )

					arg = 'z_'+str('{:.2f}'.format(znodes[iz]))+'_'+str('{:.2f}'.format(znodes[iz+1]))+'__m_'+str('{:.2f}'.format(mnodes[jm]))+'_'+str('{:.2f}'.format(mnodes[jm+1]))+'__'+criteria+'_ge_'+str('{:.2f}'.format(crange[0],2))+'_'+pop[sfg]
					self.id_crit[clean_args(arg)] = self.table.ID[ind_above].values
					arg = 'z_'+str('{:.2f}'.format(znodes[iz]))+'_'+str('{:.2f}'.format(znodes[iz+1]))+'__m_'+str('{:.2f}'.format(mnodes[jm]))+'_'+str('{:.2f}'.format(mnodes[jm+1]))+'__'+criteria+'_lt_'+str('{:.2f}'.format(crange[0],2))+'_'+pop[sfg]
					self.id_crit[clean_args(arg)] = self.table.ID[ind_below].values
Beispiel #6
0
	def get_general_redshift_bins(self, znodes, mnodes, sfg = 1, suffx = '', Fcut = 25, Ahat = 1.0, initialize_pop = False):
		if initialize_pop == True: self.id_z_ms = {}
		for iz in range(len(znodes[:-1])):
			for jm in range(len(mnodes[:-1])):
				ind_mz =( (self.table.sfg == 1) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
					(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )

				self.id_z_ms['z_'+clean_args(str('{:.2f}'.format(znodes[iz])))+'_'+clean_args(str('{:.2f}'.format(znodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+suffx] = self.table.ID[ind_mz].values
Beispiel #7
0
	def get_mass_redshift_bins(self, znodes, mnodes, sfg = 1, pop_suffix = '', initialize_pop = False):
		if initialize_pop == True: self.id_z_ms_pop = {}
		for iz in range(len(znodes[:-1])):
			for jm in range(len(mnodes[:-1])):
				ind_mz =( (self.table.sfg == sfg) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
					(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )

				self.id_z_ms_pop['z_'+clean_args(str('{:.2f}'.format(znodes[iz])))+'_'+clean_args(str('{:.2f}'.format(znodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+pop_suffix] = self.table.ID[ind_mz].values
Beispiel #8
0
	def get_sf_qt_mass_redshift_bins(self, znodes, mnodes):
		self.id_z_ms = {}
		for iz in range(len(znodes[:-1])):
			for jm in range(len(mnodes[:-1])):
				ind_mz_sf =( (self.table.sfg == 1) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
					(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )
				ind_mz_qt =( (self.table.sfg == 0) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
					(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )

				self.id_z_ms['z_'+clean_args(str('{:.2f}'.format(znodes[iz])))+'_'+clean_args(str('{:.2f}'.format(znodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_sf'] = self.table.ID[ind_mz_sf].values
				self.id_z_ms['z_'+clean_args(str('{:.2f}'.format(znodes[iz])))+'_'+clean_args(str('{:.2f}'.format(znodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_qt'] = self.table.ID[ind_mz_qt].values
Beispiel #9
0
	def get_sf_qt_mass_lookback_time_bins(self, tnodes, mnodes):
		self.id_lookt_mass = {}
		age_universe = cosmo.age(0).value # 13.797617455819209 Gyr
		znodes = np.array([z_at_value(cosmo.age,(age_universe - i) * u.Gyr) for i in tnodes])

		for iz in range(len(znodes[:-1])):
			for jm in range(len(mnodes[:-1])):
				ind_mt_sf =( (self.table.sfg == 1) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
					(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )
				ind_mt_qt =( (self.table.sfg == 0) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
					(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )

				self.id_lookt_mass['lookt_'+clean_args(str('{:.2f}'.format(tnodes[iz])))+'_'+clean_args(str('{:.2f}'.format(tnodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_sf'] = self.table.ID[ind_mt_sf].values
				self.id_lookt_mass['lookt_'+clean_args(str('{:.2f}'.format(tnodes[iz])))+'_'+clean_args(str('{:.2f}'.format(tnodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_qt'] = self.table.ID[ind_mt_qt].values
Beispiel #10
0
	def get_mass_redshift_uvj_bins(self, znodes, mnodes, cnodes, linear_mass=1):
		#pop_suf = ['qt','sf0','sf1','sf2','sf3']
		self.id_z_ms_pop = {}
		for iz in range(len(znodes[:-1])):
			for jm in range(len(mnodes[:-1])):
				for kc in range(len(cnodes)):
					if linear_mass == 1:
						ind_mz =( (self.table.sfg == kc) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
							(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )
					else:
						ind_mz =( (self.table.sfg == kc) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
							(self.table[self.mkey] >= np.min(mnodes[jm:jm+2])) & (self.table[self.mkey] < np.max(mnodes[jm:jm+2])) )

					if kc < len(cnodes)-1: csuf = 'sf'+str(kc)
					else: csuf = 'qt'
					self.id_z_ms_pop['z_'+clean_args(str('{:.2f}'.format(znodes[iz])))+'_'+clean_args(str('{:.2f}'.format(znodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_'+csuf] = self.table.ID[ind_mz].values
Beispiel #11
0
    def __init__(self, action=None, delta=None, when=None, recurring=None, name=None):
        if not when and not delta:
            raise "Need a time or time span to schedule at"

        if not name:
            name = action.__name__

        if type(when) == time:
            today = date.today()
            now = datetime.now(tzutc()).timetz()
            when = datetime.combine(today if now < when else today + timedelta(days=1), when)
        elif type(when) == date:
            when = datetime.combine(when, time(0, 0, tzinfo=tzutc()))

        if delta and not when:  # provide a period but no start
            self.delta = delta
            self.next = datetime.now(tzutc()) + delta
        elif when and not delta:  # provide a start but no period
            self.delta = when - datetime.now(tzutc())
            self.next = when
        else:  # provide a start and period, this should always be recurring
            self.delta = delta
            self.next = when

        self.recurring = recurring
        self.done = False
        self.name = name

        Hook.__init__(self, events.TASK, clean_args(action if action else identity))
        logger.debug(self)
Beispiel #12
0
	def get_subpop_ids(self, znodes, mnodes, pop_dict, linear_mass=1, lookback_time = False):
		self.subpop_ids = {}
		if lookback_time == True:
			age_universe = cosmo.age(0).value # 13.797617455819209 Gyr
			znodes = np.array([z_at_value(cosmo.age,(age_universe - i) * u.Gyr) for i in znodes])

		for iz in range(len(znodes[:-1])):
			for jm in range(len(mnodes[:-1])):
				for k in pop_dict:
					if linear_mass == 1:
						ind_mz =( (self.table.sfg.values == pop_dict[k][0]) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
							(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )
					else:
						ind_mz =( (self.table.sfg == pop_dict[k][0]) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
							(self.table[self.mkey] >= np.min(mnodes[jm:jm+2])) & (self.table[self.mkey] < np.max(mnodes[jm:jm+2])) )

					self.subpop_ids['z_'+clean_args(str('{:.2f}'.format(znodes[iz])))+'_'+clean_args(str('{:.2f}'.format(znodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_'+k] = self.table.ID[ind_mz].values
def dissimilarities(methodname, listname, argsname=None, n_jobs=None, outdir=None):
    if outdir == None:
        outdir = tempfile.mkdtemp(dir=os.curdir, prefix='out')
    else:
        if not os.path.exists(outdir):
            tsh.makedirs(outdir)
    inputname = os.path.splitext(os.path.basename(listname))[0]
    if listname.endswith('.gz'):
        inputname = os.path.splitext(inputname)[0]
    meta, data = read_listfile(listname)
    args = meta
    if argsname != None:
        args.update(read_argsfile(argsname))
    args, w = compute_dissimilarity(methodname, args, data, n_jobs=n_jobs, output_dir=outdir, input_name=inputname)
    if 'threshold' in args and args['threshold'] != 'False':
        args, w = threshold_dissimilarity(args['threshold'], args, w)
    dissim = prepare_weights_data(data['id'], data.dtype['id'], w)
    clean_args(args)
    write_listfile(os.path.join(outdir, inputname + '-dissim.csv.gz'), dissim, input_name=inputname, **args)
def propagate_predict(modelname, dissimname, predictionsname, outdir=None):
    if outdir == None:
        outdir = tempfile.mkdtemp(dir=os.curdir, prefix='out')
    else:
        if not os.path.exists(outdir):
            tsh.makedirs(outdir)
    args = {}
    predictions_meta, predictions = read_listfile(predictionsname)
    args.update(predictions_meta)
    dissim_meta, sample_ids, dissim = read_weightsfile(dissimname)
    assert (predictions['id'] == np.array(sample_ids)).all()
    assert predictions_meta['input_name'] == dissim_meta['input_name'], \
            'Expecting same input names (%s x %s)' % (predictions_meta['input_name'], dissim_meta['input_name'])
    inputname = predictions_meta['input_name']
    args.update(dissim_meta)
    model = read_propagatorfile(modelname)
    args.update(model['meta'])
    method_name = model['propagator']['method_name']
    del model['propagator']['method_name']
    args.update(model['propagator'])
    args, prop = propagate(method_name, args, predictions, dissim, output_dir=outdir)
    clean_args(args)
    del args['cv_results']
    write_listfile(os.path.join(outdir, inputname + '-propagated.csv.gz'), prop, **args)
Beispiel #15
0
def build_training_set(db,stacked_flux_densities, features_list, znodes, mnodes, knodes, Y_dict=None, k_init = 0, k_final = None, cc=0.5, ngal_cut=10):

    nz = len(znodes)-1
    nm = len(mnodes)-1
    nk = len(knodes)
    if k_final == None:
        k_final = len(knodes)+1
    training_set = {}
    if Y_dict != None:
        keys = [i for i in Y_dict.keys()]
        key = keys[keys != 'Y_err']
        training_set[key] = []
        training_set['Y_err'] = []
    for ft in features_list:
        if ft in ['LMASS','lmass']:
            training_set['stellar_mass'] = []
        else:
            training_set[ft] = []

    for k in range(nk)[k_init:k_final]:
        for im in range(nm):
            mn = mnodes[im:im+2]
            m_suf = '{:.2f}'.format(mn[0])+'-'+'{:.2f}'.format(mn[1])
            for iz in range(nz):
                zn = znodes[iz:iz+2]
                z_suf = '{:.2f}'.format(zn[0])+'-'+'{:.2f}'.format(zn[1])

                arg = clean_args('z_'+z_suf+'__m_'+m_suf+'_'+knodes[k])
                ind = stacked_flux_densities.bin_ids[arg]
                ngal = len(ind)
                completeness_flag = completeness_flag_neural_net([np.mean(zn)],[np.mean(mn)],sfg=k, completeness_cut = cc)

                if ((ngal > ngal_cut) & (completeness_flag == True)):
                    if Y_dict != None:
                        try:
                            training_set[key].append(Y_dict[key][arg][0])
                        except IndexError:
                            training_set[key].append(Y_dict[key][arg])
                        training_set['Y_err'].append(Y_dict['Y_err'][arg])
                    for ft in features_list:
                        if ft in['ltau','lage','a_hat_AGN','la2t']:
                            training_set[ft].append(10**subset_averages_from_ids(db.table,ind,ft))
                        elif ft in['LMASS','lmass']:
                            training_set['stellar_mass'].append(10**subset_averages_from_ids(db.table,ind,ft))
                        else:
                            training_set[ft].append(subset_averages_from_ids(db.table,ind,ft))

    return training_set
Beispiel #16
0
def measure_cib(stacked_object, area_deg=1.62, tcib=False):
	'''
	Sums the contribution from sources (in each bin) to the CIB at each wavelength.
	If tcib == True, output is sum of all bins at each wavelength.
	'''
	if area_deg == 1.62:
		print 'defaulting to uVista/COSMOS area of 1.62deg2'
	area_sr = area_deg * (3.1415926535 / 180.)**2
	cib = np.zeros(np.shape(stacked_object.simstack_nuInu_array))
	for iwv in range(stacked_object.nw):
		for i in range(stacked_object.nz):
			zn = stacked_object.z_nodes[i:i+2]
			z_suf = '{:.2f}'.format(zn[0])+'-'+'{:.2f}'.format(zn[1])
			for j in range(stacked_object.nm):
				mn = stacked_object.m_nodes[j:j+2]
				m_suf = '{:.2f}'.format(mn[0])+'-'+'{:.2f}'.format(mn[1])
				for p in range(stacked_object.npops):
					arg = clean_args('z_'+z_suf+'__m_'+m_suf+'_'+stacked_object.pops[p])
					ng = len(stacked_object.bin_ids[arg])
					cib[iwv,i,j,p] += 1e-9 * float(ng) / area_sr * stacked_object.simstack_nuInu_array[iwv,i,j,p]
	if tcib == True:
		return np.sum(np.sum(np.sum(cib,axis=1),axis=1),axis=1)
	else:
		return cib
Beispiel #17
0
    parser = argparse.ArgumentParser(description='Computes features for all the input data.')
    parser.add_argument('-c', '--config', dest='config', required=False, action='store', default=None, help='Path to the config file')
    parser.add_argument('-m', '--method', dest='method', required=True, action='store', choices=method_table.keys(), default=None, help='Method name.')
    parser.add_argument('-a', '--args', dest='args', required=False, action='store', default=None, help='Method arguments file.')
    parser.add_argument('-l', '--list', dest='list', required=True, action='store', default=None, help='List file.')
    parser.add_argument('-j', '--jobs', dest='jobs', required=False, action='store', default=None, type=int, help='Number of parallel processes.')
    parser.add_argument('-o', '--output', dest='output', required=False, action='store', default=None, help='Output directory.')
    opts = parser.parse_args()
    if opts.output == None:
        outdir = tempfile.mkdtemp(dir=os.curdir, prefix='out')
        logger.info('Output directory %s', outdir)
    else:
        outdir = opts.output
        if not os.path.exists(outdir):
            tsh.makedirs(outdir)
    inputname = os.path.splitext(os.path.basename(opts.list))[0]
    if opts.list.endswith('.gz'):
        inputname = os.path.splitext(inputname)[0]
    outputname = os.path.join(outdir, inputname + '-feats.csv.gz')
    if os.path.exists(outputname):
        logger.info('Skipping file %s, already exists.', outputname)
    else:
        config = tsh.read_config(opts, __file__)
        meta, data = read_listfile(opts.list)
        args = meta
        if opts.args != None:
            args.update(read_argsfile(opts.args))
        args, features = compute_features(opts.method, args, data, input_name=inputname, n_jobs=opts.jobs, output_dir=outdir)
        clean_args(args)
        write_listfile(outputname, features, input_name=inputname, **args)
Beispiel #18
0
    parser.add_argument('-a',
                        '--args',
                        dest='args',
                        required=True,
                        action='store',
                        default=None,
                        help='Method arguments file.')
    parser.add_argument('--random-seed',
                        dest='seed',
                        required=False,
                        action='store',
                        type=int,
                        default=-1,
                        help='Random seed, by default use time.')
    parser.add_argument('output',
                        action='store',
                        default=None,
                        help='Output file.')
    opts = parser.parse_args()
    config = tsh.read_config(opts, __file__)
    if opts.seed == -1:
        seed = int(time.time() * 1024 * 1024 + time.time())
    else:
        seed = opts.seed
    np.random.seed(seed)
    args = read_argsfile(opts.args)
    args, data = generate(**args)
    args['random_seed'] = seed
    clean_args(args)
    write_listfile(opts.output, data, **args)
Beispiel #19
0
	def read_pickles(self):

		if self.params['bootstrap'] == True:
			print('creating bootstrap array w/ size '+str(self.nw)+'bands; '+str(self.nz)+'redshifts; '+str(self.nm)+'masses; '+str(self.npops)+'populations; '+str(self.nboots)+' bootstraps')
			bootstrap_fluxes = np.zeros([self.nw,self.nz,self.nm,self.npops,self.nboots])
			bootstrap_errors = np.zeros([self.nw,self.nz,self.nm,self.npops,self.nboots])
			bootstrap_intensities = np.zeros([self.nw,self.nz,self.nm,self.npops,self.nboots])
		else:
			print('creating simstack array w/ size '+str(self.nw)+'bands; '+str(self.nz)+'redshifts; '+str(self.nm)+'masses; '+str(self.npops)+'populations')
			stacked_fluxes = np.zeros([self.nw,self.nz,self.nm,self.npops])
			stacked_errors = np.zeros([self.nw,self.nz,self.nm,self.npops])
			stacked_intensities = np.zeros([self.nw,self.nz,self.nm,self.npops])
		if self.params['bins']['bin_in_lookback_time'] == True:
			ndec = 2
		else:
			ndec = 1
		slice_keys = self.slice_key_builder(ndecimal=ndec)

		#pdb.set_trace()

		for i in range(self.nz):
			z_slice = slice_keys[i]
			z_suf = 'z_'+ self.z_keys[i]
			if self.params['bootstrap'] == True:
				for k in np.arange(self.nboots) + int(self.params['boot0']):
					if self.params['bins']['stack_all_z_at_once'] == True:
						filename_boots = 'simstack_flux_densities_'+ self.params['io']['shortname'] + '_all_z' + '_boot_'+ str(k) + '.p'
					else:
						filename_boots = 'simstack_flux_densities_'+ self.params['io']['shortname'] + '_' + z_slice + '_boot_'+ str(k) + '.p'

					if os.path.exists(self.path+filename_boots):
						bootstack = pickle.load( open( self.path + filename_boots, "rb" ))
						if self.params['save_bin_ids'] == True:
							for bbk in bootstack[0].keys():
								self.bin_ids[bbk+'_'+str(k)] = bootstack[0][bbk]
						for wv in range(self.nw):
							#pdb.set_trace1()
							if self.params['save_bin_ids'] == True:
								try:
									single_wv_stacks = bootstack[1][z_slice][self.maps[wv]]
								except:
									single_wv_stacks = bootstack[1][self.maps[wv]]
							else:
							    try:
								single_wv_stacks = bootstack[z_slice][self.maps[wv]]
							    except:
								single_wv_stacks = bootstack[self.maps[wv]]
							for j in range(self.nm):
								m_suf = 'm_' + self.m_keys[j]
								for p in range(self.npops):
									p_suf = self.pops[p]
									key = clean_args(z_suf+'__'+ m_suf+ '_' + p_suf)
									#pdb.set_trace()
									try:
										bootstrap_fluxes[wv,i,j,p,k] = single_wv_stacks[key].value
										bootstrap_errors[wv,i,j,p,k] = single_wv_stacks[key].stderr
										bootstrap_intensities[wv,i,j,p,k] = single_wv_stacks[key].value * (self.fqs[wv]) * 1e-26 * 1e9
									except:
										bootstrap_fluxes[wv,i,j,p,k] = single_wv_stacks[key]['value']
										bootstrap_errors[wv,i,j,p,k] = single_wv_stacks[key]['stderr']
										bootstrap_intensities[wv,i,j,p,k] = single_wv_stacks[key]['value'] * (self.fqs[wv]) * 1e-26 * 1e9

				self.bootstrap_flux_array = bootstrap_fluxes
				self.bootstrap_error_array = bootstrap_errors
				self.bootstrap_nuInu_array = bootstrap_intensities
			else:
				if self.params['bins']['stack_all_z_at_once'] == True:
					filename_stacks = 'simstack_flux_densities_'+ self.params['io']['shortname'] + '_all_z' + '.p'
				else:
					filename_stacks = 'simstack_flux_densities_'+ self.params['io']['shortname'] + '_' + z_slice + '.p'
				if os.path.exists(self.path+filename_stacks):
					simstack = pickle.load( open( self.path + filename_stacks, "rb" ))
					#pdb.set_trace()
					for ssk in simstack[0]:
						self.bin_ids[ssk] = simstack[0][ssk]
					for wv in range(self.nw):
						try:
							single_wv_stacks = simstack[1][z_slice][self.maps[wv]]
						except:
							single_wv_stacks = simstack[1][self.maps[wv]]
						for j in range(self.nm):
							m_suf = 'm_' + self.m_keys[j]
							for p in range(self.npops):
								p_suf = self.pops[p]
								key = clean_args(z_suf+'__'+ m_suf+ '_' + p_suf)
								try:
									stacked_fluxes[wv,i,j,p] = single_wv_stacks[key].value
									try:
										stacked_errors[wv,i,j,p] = single_wv_stacks[key].psnerr
									except:
										stacked_errors[wv,i,j,p] = single_wv_stacks[key].stderr
									stacked_intensities[wv,i,j,p] = single_wv_stacks[key].value * (self.fqs[wv]*1e9) * 1e-26 * 1e9
								except:
									stacked_fluxes[wv,i,j,p] = single_wv_stacks[key]['value']
									try:
										stacked_errors[wv,i,j,p] = single_wv_stacks[key]['psnerr']
									except:
										stacked_errors[wv,i,j,p] = single_wv_stacks[key]['stderr']
									stacked_intensities[wv,i,j,p] = single_wv_stacks[key]['value'] * (self.fqs[wv]*1e9) * 1e-26 * 1e9

				self.simstack_flux_array = stacked_fluxes
				self.simstack_error_array = stacked_errors
				self.simstack_nuInu_array = stacked_intensities
Beispiel #20
0
def stack_libraries_in_layers_w_background(
  map_library,
  subcatalog_library,
  quiet=None):

  print 'stacking with floating background'
  map_names = [i for i in map_library.keys()]
  # All wavelengths in cwavelengths
  cwavelengths = [map_library[i].wavelength for i in map_names]
  # Unique wavelengths in uwavelengths
  uwavelengths = np.sort(np.unique(cwavelengths))
  # nwv the number of unique wavelengths
  nwv = len(uwavelengths)

  lists = subcatalog_library.keys()
  nlists = len(lists)
  stacked_layers = {}

  cwavelengths = []
  radius = 1.1
  for iwv in range(nwv):
    print 'stacking '+map_library.keys()[iwv]
    #READ MAPS
    cmap = map_library[map_library.keys()[iwv]].map
    cnoise = map_library[map_library.keys()[iwv]].noise
    cwv = map_library[map_library.keys()[iwv]].wavelength
    cname = map_library.keys()[iwv]
    cwavelengths.append(cwv)
    chd = map_library[map_library.keys()[iwv]].header
    pixsize = map_library[map_library.keys()[iwv]].pixel_size
    kern = map_library[map_library.keys()[iwv]].psf
    fwhm = map_library[map_library.keys()[iwv]].fwhm
    cw = WCS(chd)
    cms = np.shape(cmap)
    zeromask = np.ones(np.shape(cmap))
    #ind_map_zero = np.where(np.isnan(cmap))
    ind_map_zero = np.where(clean_nans(cmap) == 0.0)
    zeromask[ind_map_zero] = 0.0
    #pdb.set_trace()

    # STEP 1  - Make Layers Cube at each wavelength
    layers=np.zeros([nlists+1,cms[0],cms[1]])
    ngals_layer = {}

    for k in range(nlists):
      s = lists[k]
      if len(subcatalog_library[s][0]) > 0:
        ra = subcatalog_library[s][0]
        dec = subcatalog_library[s][1]
        ty,tx = cw.wcs_world2pix(ra, dec, 0)
        # CHECK FOR SOURCES THAT FALL OUTSIDE MAP
        ind_keep = np.where((np.round(tx) >= 0) & (np.round(tx) < cms[0]) & (np.round(ty) >= 0) & (np.round(ty) < cms[1]))
        real_x=np.round(tx[ind_keep]).astype(int)
        real_y=np.round(ty[ind_keep]).astype(int)
        # CHECK FOR SOURCES THAT FALL ON ZEROS
        ind_nz=np.where(cmap[real_x,real_y] != 0 )
        nt = np.shape(ind_nz)[1]
        ngals_layer[s] = nt
        #print 'ngals: ' + str(nt)
        if nt > 0:
          real_x = real_x[ind_nz]
          real_y = real_y[ind_nz]

          for ni in range(nt):
            layers[k, real_x[ni],real_y[ni]]+=1.0
      else: ngals_layer[s] = 1

    # STEP 2  - Convolve Layers and put in pixels
    flattened_pixmap = np.sum(layers,axis=0)
    total_circles_mask = circle_mask(flattened_pixmap, radius * fwhm, pixsize)
    #ind_fit = np.where(total_circles_mask >= 1)
    ind_fit = np.where((total_circles_mask >= 1) & (zeromask != 0))
    del total_circles_mask
    nhits = np.shape(ind_fit)[1]
    ###
    cfits_flat = np.asarray([])
    cfits_flat = np.append(cfits_flat,np.ndarray.flatten(np.ones(len(ind_fit))))
    ###
    #pdb.set_trace()
    for u in range(nlists):
      layer = layers[u,:,:]
      tmap = smooth_psf(layer, kern)
      cfits_flat = np.append(cfits_flat,np.ndarray.flatten(tmap[ind_fit]))

    cmap[ind_fit] -= np.mean(cmap[ind_fit], dtype=np.float32)
    imap = np.ndarray.flatten(cmap[ind_fit])
    ierr = np.ndarray.flatten(cnoise[ind_fit])

    fit_params = Parameters()
    fit_params.add('cib_background',value=1e-5*np.random.randn())
    for iarg in range(nlists):
      arg = clean_args(lists[iarg])
      fit_params.add(arg,value= 1e-3*np.random.randn())


    if len(ierr)==0: pdb.set_trace()
    cov_ss_1d = minimize(simultaneous_stack_array_oned, fit_params,
      args=(cfits_flat,), kws={'data1d':imap,'err1d':ierr}, nan_policy = 'propagate')
    del cfits_flat, imap, ierr

    #Dictionary keys decided here.  Was originally wavelengths.  Changing it back to map_names
    packed_fluxes = pack_fluxes(cov_ss_1d.params)
    packed_stn = pack_simple_poisson_errors(cov_ss_1d.params,ngals_layer,crms)
    stacked_layers[cname] = packed_stn # packed_fluxes

  gc.collect
  return stacked_layers
Beispiel #21
0
	def get_parent_child_redshift_bins(self,znodes):
		self.id_z_sed = {}
		for ch in self.table.parent.unique():
			for iz in range(len(znodes[:-1])):
				self.id_z_sed['z_'+clean_args(str(znodes[iz]))+'_'+clean_args(str(znodes[iz+1]))+'__sed'+str(ch)] = self.table.ID[ (self.table.parent == ch) & (self.table[self.zkey] >= znodes[iz]) & (self.table[self.zkey] < znodes[iz+1]) ].values
Beispiel #22
0
 def decorated(function):
     function = Hook(events.TEARDOWN, clean_args(function))
     self._hooks[events.TEARDOWN].insert(
         0 if priority else len(self._hooks[events.SETUP]), function)
     return function