def stack(self, log_age_min=None, log_age_max=None): """ Creates a stack of HR diagrams within a range of ages Parameters ---------- log_age_min : int or float, optional Minimum log(age) to stack log_age_max : int or float, optional Maximum log(age) to stack Returns ------- None This method stores the stacked values in the class attributes self.high_H_stacked, self.medium_H_stacked, self.low_H_stacked and self.all_stacked. """ self.reset_stack() if log_age_min is None and log_age_max is not None: log_age_min = self.t[0] if log_age_max > self.t[-1]: raise HokiFatalError("Age_max too large. Give the log age.") if log_age_max is None and log_age_min is not None: log_age_max = self.t[-1] if log_age_min < self.t[0]: raise HokiFatalError("Age_min too low") if log_age_min is not None and log_age_max is not None: if log_age_min > log_age_max: raise HokiFatalError("Age_max should be greater than age_min") if log_age_min < self.t[0] or log_age_max > self.t[-1]: raise HokiFatalError( "The age range requested is outside the valid range" "(6.0 to 11.1 inclusive)" + str(log_age_min) + " " + str(log_age_max)) # Now that we have time limits we calculate what bins they correspond to. bin_min, bin_max = int(np.round(10 * (log_age_min - 6))), int( np.round(10 * (log_age_max - 6))) # And now we slice! for hrd1, hrd2, hrd3 in zip(self.high_H[bin_min:bin_max], self.medium_H[bin_min:bin_max], self.low_H[bin_min:bin_max]): self.high_H_stacked += hrd1 self.medium_H_stacked += hrd2 self.low_H_stacked += hrd3 self.all_stacked = self.high_H_stacked + self.medium_H_stacked + self.low_H_stacked print( "The following attributes were updated: .all_stacked, .high_H_stacked, " ".medium_H_stacked, .low_H_stacked.") return
def at_log_age(self, log_age): """ Returns the HR diagrams at a specific age. Parameters ---------- log_age : int or float The log(age) of choice. Returns ------- Tuple of 4 np.ndarrays (100x100): - [0] : Stack of all the abundances - [1] : High hydrogen abundance n1>0.4 - [2] : Medium hydrogen abundance (E-3 < n1 < 0.4) - [3] : Low hydrogen abundance (n1 < E-3) """ if log_age < 6.0 or log_age >= 11.1: raise HokiFatalError( "Valid values of log age should be between 6.0 and 11.1 (inclusive)" ) bin_i = int(np.round(10 * (log_age - 6))) return (self.high_H[bin_i] + self.medium_H[bin_i] + self.low_H[bin_i], self.high_H[bin_i], self.medium_H[bin_i], self.low_H[bin_i])
def at_log_age(self, log_age): """ Returns the HR diagrams at a specific age. Parameters ---------- log_age : int or float The log(age) of choice. Returns ------- The CMD grid : np.ndarray 240x100 """ if log_age < 6.0 or log_age > 11.0: raise HokiFatalError( "Valid values of log age should be between 6.0 and 11.1 (inclusive)" ) bin_i = int(np.round(10 * (log_age - 6))) return self.grid[bin_i]
def plot(self, log_age=6.8, loc=111, cmap='Greys', **kwargs): """ Plots the CMD grid at a particular age Parameters ---------- log_age : float Must be a valid BPASS time bin loc : 3 integers, optional Location of the subplot. Default is 111. cmap : str, optional Colour map for the contours. Default is 'Greys' **kwargs : matplotlib keyword arguments, optional Returns ------- matplotlib.axes._subplots.AxesSubplot : The plot created is returned, so you can add stuff to it, like text or extra data. """ cm_diagram = plt.subplot(loc) # THIS IS VERY SIMILAR TO THE PLOTTING FUNCTION IN HOKI.HRDIAGRAMS. # Now we define our default levels index = np.where(np.round(BPASS_TIME_BINS, 1) == log_age)[0] if log_age < 6.0 or log_age > 11.0: raise HokiFatalError( "Valid values of log age should be between 6.0 and 11.1 (inclusive)" ) single_cmd_grid = self.grid[int(index)] # This step has been approved by JJ :o) infinities = np.where(single_cmd_grid == np.inf) for i in infinities: single_cmd_grid[i] = 0.0 np.nan_to_num(single_cmd_grid, copy=False) single_cmd_grid[single_cmd_grid == 0] = min(single_cmd_grid[single_cmd_grid != 0]) - \ 0.1*min(single_cmd_grid[single_cmd_grid != 0]) top_level = single_cmd_grid.max() min_level = single_cmd_grid.min() # we want our levels to be fractions of 10 of our maximum value # and yes it didn't need to be written this way, but isn't it gorgeous? possible_levels = [ top_level * 0.0000000000001, top_level * 0.000000000001, top_level * 0.00000000001, top_level * 0.0000000001, top_level * 0.000000001, top_level * 0.00000001, top_level * 0.0000001, top_level * 0.000001, top_level * 0.00001, top_level * 0.0001, top_level * 0.001, top_level * 0.01, top_level * 0.1, top_level ] # to make sure the colourmap is sensible we want to ensure the minimum level == minimum value levels = [min_level] + [ level for level in possible_levels if level > min_level ] colMap = cm.get_cmap(cmap) colMap.set_under(color='white') cm_diagram.contourf(self.col_range, self.mag_range, np.log10(single_cmd_grid), np.log10(levels).tolist(), cmap=cmap, **kwargs) cm_diagram.invert_yaxis() cm_diagram.set_ylabel(self.mag_filter) cm_diagram.set_xlabel(self.col_filter1 + "-" + self.col_filter2) return cm_diagram
def at_time(self, SFH, ZEH, event_type_list, t0, sample_rate=1000): """ Calculates the event rates at lookback time `t0` for functions as input. Parameters ---------- SFH : `python callable`, `hoki.csp.sfh.SFH`, `list(hoki.csp.sfh.SFH, )`, `list(callable, )` SFH can be the following things: - A python callable (function) which takes the lookback time and returns the stellar formation rate in units M_\\odot per yr at the given time. - A list of python callables with the above requirement. - A `hoki.csp.sfh.SFH` object. - A list of `hoki.csp.sfh.SFH` objects. ZEH : callable, `list(callable, )` ZEH can be the following things: - A python callable (function) which takes the lookback time and returns the metallicity at the given time. - A list of python callables with the above requirement. event_type_list : `list(str, )` A list of BPASS event types. The available types are: - Ia - IIP - II - Ib - Ic - LGRB - PISNe - low_mass t0 : `float` The moment in lookback time, where to calculate the the event rate sample_rate : `int` The number of samples to take from the SFH and metallicity evolutions. Default = 1000. If a negative value is given, the BPASS binning to calculate the event rates. Returns ------- `numpy.ndarray` (N, M) [nr_sfh, nr_event_types] Returns a `numpy.ndarray` containing the event rates for each sfh and metallicity pair (N) and event type (M) at the requested lookback time. Usage: event_rates[1]["Ia"] (Gives the Ia event rates for the second sfh and metallicity history pair) """ SFH, ZEH = self._type_check_histories(SFH, ZEH) if isinstance(event_type_list, type(list)): raise HokiFatalError( "event_type_list is not a list. Only a list is taken as input." ) nr_events = len(event_type_list) nr_sfh = len(SFH) output_dtype = np.dtype([(i, np.float64) for i in event_type_list]) event_rates = np.empty(nr_sfh, dtype=output_dtype) # Define time edges time_edges = BPASS_LINEAR_TIME_EDGES if sample_rate < 0 else np.linspace( 0, self.now, sample_rate + 1) bpass_rates = self._numpy_bpass_rates[[ BPASS_EVENT_TYPES.index(i) for i in event_type_list ]] mass_per_bin_list = np.array( [utils.mass_per_bin(i, t0 + time_edges) for i in SFH]) metallicity_per_bin_list = np.array( [utils.metallicity_per_bin(i, t0 + time_edges) for i in ZEH]) for counter, (mass_per_bin, Z_per_bin) in enumerate( zip(mass_per_bin_list, metallicity_per_bin_list)): for count in range(nr_events): event_rates[counter][count] = utils._at_time( Z_per_bin, mass_per_bin, time_edges, bpass_rates[count]) return event_rates
def over_time(self, SFH, ZEH, event_type_list, nr_time_bins, return_time_edges=False): """ Calculates the event rates over lookback time for functions as input. Parameters ---------- SFH : `python callable`, `hoki.csp.sfh.SFH`, `list(hoki.csp.sfh.SFH, )`, `list(callable, )` SFH can be the following things: - A python callable (function) which takes the lookback time and returns the stellar formation rate in units M_\\odot per yr at the given time. - A list of python callables with the above requirement. - A `hoki.csp.sfh.SFH` object. - A list of `hoki.csp.sfh.SFH` objects. ZEH : callable, `list(callable, )` ZEH can be the following thins: - A python callable (function) which takes the lookback time and returns the metallicity at the given time. - A list of python callables with the above requirement. event_type_list : `list(str, )` A list of BPASS event types. The available types are: - Ia - IIP - II - Ib - Ic - LGRB - PISNe - low_mass nr_time_bins : `int` The number of bins to split the lookback time into. return_time_edges : `bool` If `True`, also returns the edges of the lookback time bins. Default=False Returns ------- `numpy.ndarray` (nr_sfh, nr_event_types, nr_time_bins), ((nr_sfh, nr_event_types, nr_time_bins), nr_time_bins) The event rates in a 3D matrix with sides, the number of SFH-Z pairs, the number of events selected, and the number of time bins choosen. If `return_time_edges=False`, returns a `numpy.ndarray` containing the event rates. Usage: event_rates[1]["Ia"][10] (Gives the Ia event rates in bin number 11 for the second sfh and metallicity history pair) If `return_time_edges=True`, returns a numpy array containing the event rates and the edges, eg. `out[0]=event_rates` `out[1]=time_edges`. """ # check and transform the input to the righ type SFH, ZEH = self._type_check_histories(SFH, ZEH) if isinstance(event_type_list, type(list)): raise HokiFatalError( "event_type_list is not a list. Only a list is taken as input." ) nr_events = len(event_type_list) nr_sfh = len(SFH) output_dtype = np.dtype([(i, np.float64, nr_time_bins) for i in event_type_list]) event_rates = np.empty(nr_sfh, dtype=output_dtype) time_edges = np.linspace(0, self.now, nr_time_bins + 1) bpass_rates = self._numpy_bpass_rates[[ BPASS_EVENT_TYPES.index(i) for i in event_type_list ]] mass_per_bin_list = np.array( [utils.mass_per_bin(i, time_edges) for i in SFH]) metallicity_per_bin_list = np.array( [utils.metallicity_per_bin(i, time_edges) for i in ZEH]) for counter, (mass_per_bin, Z_per_bin) in enumerate( zip(mass_per_bin_list, metallicity_per_bin_list)): for count in range(nr_events): event_rate = utils._over_time(Z_per_bin, mass_per_bin, time_edges, bpass_rates[count]) event_rates[counter][count] = event_rate / np.diff(time_edges) if return_time_edges: return np.array([event_rates, time_edges], dtype=object) else: return event_rates
def __init__(self, obs_df, model): """ Initialisation of the AgeWizard object Parameters ---------- obs_df: pandas.DataFrame Observational data. MUST contain a logT and logL column (for HRD comparison) or a col and mag column (for CMD comparison) model: str or hoki.hrdiagrams.HRDiagrams() hoki.cmd.CMD() Location of the modeled HRD or CMD. This can be an already instanciated HRDiagram or CMD() object, or a path to an HR Diagram file or a pickled CMD. """ # Making sure the osbervational properties are given in a format we can use. if not isinstance(obs_df, pd.DataFrame): raise HokiFormatError( "Observations should be stored in a Data Frame") if 'name' not in obs_df.columns: warnings.warn( "We expect the name of sources to be given in the 'name' column. " "If I can't find names I'll make my own ;)", HokiFormatWarning) # Checking what format they giving for the model: if isinstance(model, hoki.hrdiagrams.HRDiagram): self.model = model elif isinstance(model, hoki.cmd.CMD): self.model = model elif isinstance(model, str) and 'hrs' in model: self.model = load.model_output(model, hr_type='TL') elif isinstance(model, str): try: self.model = load.unpickle(path=model) except AssertionError: print('-----------------') print( 'HOKI DEBUGGER:\nThe model param should be a path to \na BPASS HRDiagram output file or pickled CMD,' 'or\na hoki.hrdiagrams.HRDiagram or a hoki.cmd.CMD') print('-----------------') raise HokiFatalError('model is ' + str(type(model))) else: print('-----------------') print( 'HOKI DEBUGGER:\nThe model param should be a path to \na BPASS HRDiagram output file or pickled CMD,' 'or\na hoki.hrdiagrams.HRDiagram or a hoki.cmd.CMD') print('-----------------') raise HokiFatalError('model is ' + str(type(model))) self.obs_df = obs_df self.coordinates = find_coordinates(self.obs_df, self.model) # This line is obsolete but might need revival if we ever want to add the not normalised distributions again # self._distributions = calculate_distributions_normalised(self.obs_df, self.model) self.pdfs = calculate_individual_pdfs(self.obs_df, self.model).fillna(0) self.sources = self.pdfs.columns.to_list() self.sample_pdf = None self._most_likely_age = None
def plot(self, log_age=None, age_range=None, abundances=(1, 1, 1), **kwargs): """ Plots the HR Diagram - calls hoki.hrdiagrams.plot_hrdiagram() Parameters ---------- log_age : int or float, optional Log(age) at which to plot the HRdiagram. age_range : tuple or list of 2 ints or floats, optional Age range within which you want to plot the HR diagram abundances : tuple or list of 3 ints, zeros or ones, optional This turns on or off the inclusion of the abundances. The corresponding abundances are: (n1 > 0.4, E-3 < n1 < 0.4, E-3>n1). A 1 means a particular abundance should be included, a 0 means it will be ignored. Default is (1,1,1), meaning all abundances are plotted. Note that (0,0,0) is not valid and will return and assertion error. **kwargs : matplotlib keyword arguments, optional Notes ----- If you give both an age and an age range, the age range will take precedent and be plotted. You will get a warning if that happens though. Returns ------- matplotlib.axes._subplots.AxesSubplot : The plot created is returned, so you can add stuff to it, like text or extra data. """ #assert abundances != (0, 0, 0), "HOKI ERROR: abundances cannot be (0, 0, 0) - You're plotting nothing." if abundances == (0, 0, 0): raise HokiFatalError( "Abundances cannot be (0, 0, 0) - You're plotting nothing.") if not isinstance(abundances, tuple): error_message = "abundances should be a tuple of 3 integers - consult the docstrings for further details " raise HokiFormatError(error_message) hr_plot = None # Case were no age or age range are given if log_age is None and age_range is None: self.stack(BPASS_TIME_BINS[0], BPASS_TIME_BINS[-1]) all_hr, high_hr, medium_hr, low_hr = self.all_stacked, self.high_H_stacked, \ self.medium_H_stacked, self.low_H_stacked if abundances == (1, 1, 1): hr_plot = plot_hrdiagram(all_hr, kind=self.type, **kwargs) elif abundances == (1, 1, 0): hr_data = high_hr + medium_hr hr_plot = plot_hrdiagram(hr_data, kind=self.type, **kwargs) elif abundances == (1, 0, 0): hr_plot = plot_hrdiagram(high_hr, kind=self.type, **kwargs) elif abundances == (0, 1, 1): hr_data = medium_hr + low_hr hr_plot = plot_hrdiagram(hr_data, kind=self.type, **kwargs) elif abundances == (0, 1, 0): hr_plot = plot_hrdiagram(medium_hr, kind=self.type, **kwargs) elif abundances == (0, 0, 1): hr_plot = plot_hrdiagram(low_hr, kind=self.type, **kwargs) elif abundances == (1, 0, 1): hr_data = high_hr + low_hr hr_plot = plot_hrdiagram(hr_data, kind=self.type, **kwargs) hr_plot.set_xlabel("log" + self.type[0]) hr_plot.set_ylabel("log" + self.type[1:]) return hr_plot # Case where an age or age_range is given if log_age: if not isinstance(log_age, int) and not isinstance(log_age, float): raise HokiFormatError("Age should be an int or float") all_hr, high_hr, medium_hr, low_hr = self.at_log_age(log_age) elif age_range: if not isinstance(age_range, list) and not isinstance( age_range, tuple): raise HokiFormatError("Age range should be a list or a tuple") self.stack(age_range[0], age_range[1]) all_hr, high_hr, medium_hr, low_hr = self.all_stacked, self.high_H_stacked, \ self.medium_H_stacked, self.low_H_stacked elif age_range and log_age: error_message = "You provided an age range as well as an age. The former takes "\ "precedent. If you wanted to plot a single age, this will be WRONG." warnings.warn(error_message, HokiUserWarning) if abundances == (1, 1, 1): hr_plot = plot_hrdiagram(all_hr, kind=self.type, **kwargs) elif abundances == (1, 1, 0): hr_data = high_hr + medium_hr hr_plot = plot_hrdiagram(hr_data, kind=self.type, **kwargs) elif abundances == (1, 0, 0): hr_plot = plot_hrdiagram(high_hr, kind=self.type, **kwargs) elif abundances == (0, 1, 1): hr_data = medium_hr + low_hr hr_plot = plot_hrdiagram(hr_data, kind=self.type, **kwargs) elif abundances == (0, 1, 0): hr_plot = plot_hrdiagram(medium_hr, kind=self.type, **kwargs) elif abundances == (0, 0, 1): hr_plot = plot_hrdiagram(low_hr, kind=self.type, **kwargs) elif abundances == (1, 0, 1): hr_data = high_hr + low_hr hr_plot = plot_hrdiagram(hr_data, kind=self.type, **kwargs) hr_plot.set_xlabel("log" + self.type[0]) hr_plot.set_ylabel("log" + self.type[1:]) return hr_plot
def __init__(self, obs_df, model, nsamples=100): """ Initialisation of the AgeWizard object Parameters ---------- obs_df: pandas.DataFrame Observational data. MUST contain a logT and logL column (for HRD comparison) or a col and mag column (for CMD comparison) model: str or hoki.hrdiagrams.HRDiagrams() hoki.cmd.CMD() Location of the modeled HRD or CMD. This can be an already instanciated HRDiagram or CMD() object, or a path to an HR Diagram file or a pickled CMD. nsamples: int, optional Number of times each data point should be sampled from its error distribution. Default is 100. This only matters if you are taking errors into account. """ print(f"{Dialogue.info()} AgeWizard Starting") print(f"{Dialogue.running()} Initial Checks") # Making sure the osbervational properties are given in a format we can use. if not isinstance(obs_df, pd.DataFrame): raise HokiFormatError( "Observations should be stored in a Data Frame") if 'name' not in obs_df.columns: warnings.warn( "We expect the name of sources to be given in the 'name' column. " "If I can't find names I'll make my own ;)", HokiFormatWarning) # Checking what format they giving for the model: if isinstance(model, hoki.hrdiagrams.HRDiagram): self.model = model elif isinstance(model, hoki.cmd.CMD): self.model = model elif isinstance(model, str) and 'hrs' in model: self.model = load.model_output(model, hr_type='TL') elif isinstance(model, str): try: self.model = load.unpickle(path=model) except AssertionError: print(f'{Dialogue.ORANGE}-----------------{Dialogue.ENDC}') print( f'{Dialogue.debugger()}\nThe model param should be a path to \na BPASS HRDiagram output file or pickled CMD,' 'or\na hoki.hrdiagrams.HRDiagram or a hoki.cmd.CMD') print(f'{Dialogue.ORANGE}-----------------{Dialogue.ENDC}') raise HokiFatalError('model is ' + str(type(model))) else: print(f'{Dialogue.ORANGE}-----------------{Dialogue.ENDC}') print( f'{Dialogue.debugger()}\nThe model param should be a path to \na BPASS HRDiagram output file or pickled CMD,' 'or\na hoki.hrdiagrams.HRDiagram or a hoki.cmd.CMD') print(f'{Dialogue.ORANGE}-----------------{Dialogue.ENDC}') raise HokiFatalError('model is ' + str(type(model))) print(f"{Dialogue.complete()} Initial Checks") self.obs_df = obs_df.copy() # not needed? # self.coordinates = find_coordinates(self.obs_df, self.model) # This line is obsolete but might need revival if we ever want to add the not normalised distributions again # self._distributions = calculate_distributions_normalised(self.obs_df, self.model) self.pdfs = au.calculate_individual_pdfs(self.obs_df, self.model, nsamples=nsamples).fillna(0) self.sources = self.pdfs.columns.to_list() self.sample_pdf = None self._most_likely_age = None