def get_parameter_names(self, paramname_file, params_in_header=False): if (paramname_file is None) and params_in_header: reader = utils.open_if_exists(self.chain_files[0], "r") parameters = reader.readline().strip("#").split() reader.close() else: paramname_reader = utils.open_if_exists(paramname_file, "r") lines = paramname_reader.readlines() paramname_reader.close() parameters = [] for line in lines: # remove trailing * used to denote derived parameters parameters.append(line.strip().split()[0].split("*")[0]) return parameters
def get_parameter_names(self, paramname_file, params_in_header=False): if (paramname_file is None) and params_in_header: reader = utils.open_if_exists(self.chain_files[0], 'r') parameters = reader.readline().strip('#').split() reader.close() else: paramname_reader = utils.open_if_exists(paramname_file, 'r') lines = paramname_reader.readlines() paramname_reader.close() parameters = [] for line in lines: # remove trailing * used to denote derived parameters parameters.append(line.strip().split()[0].split('*')[0]) return parameters
def load_contour_data(self, parameters, n_samples, grid_size, smoothing, contour_pct): contour_data = None print self.settings['contour_data_files'] for f in self.settings['contour_data_files']: reader = utils.open_if_exists(f, 'r') test_parameters = reader.readline().split('#')[0].split() test_n_samples = int(float(reader.readline().split('#')[0])) test_grid_size = [int(float(x)) for x in \ reader.readline().split('#')[0].split()] test_smoothing = float(reader.readline().split('#')[0]) test_contour_pct = [float(x) for x in \ reader.readline().split('#')[0].split()] match = (test_parameters == parameters) and \ (test_n_samples == n_samples) and \ np.all([x == y for x, y in \ zip(test_grid_size, grid_size)]) and \ test_smoothing == smoothing and \ np.all([x == y for x, y in \ zip(test_contour_pct, contour_pct)]) if match: print 'Plotting contours from ' + f contour_levels = [float(x) for x in \ reader.readline().split('#')[0].split()] X, Y, Z = np.loadtxt(reader, skiprows=1, unpack=True) contour_data = (contour_levels, X.reshape(grid_size), Y.reshape(grid_size), Z.reshape(grid_size)) reader.close() return contour_data
def __init__( self, name, chain_files, burn_in=None, mult_column=0, lnlike_column=1, first_par_column=2, paramname_file=None, params_in_header=False, ): self.rename(name) self.chain_files = chain_files first_file = True for chain_file in chain_files: # does this raise an error if the file doesn't exist??? reader = utils.open_if_exists(chain_file, "r") new_samples = np.loadtxt(reader) reader.close() if first_file: self.samples = np.copy(new_samples) first_file = False else: # check that number of columns are the same self.samples = np.vstack((self.samples, new_samples)) if mult_column is None: self.mult_column = 0 self.multiplicity = np.ones(len(self.samples)) self.samples = np.vstack((self.multiplicity, self.samples.T)).T if lnlike_column is not None: self.lnlike_column = lnlike_column + 1 else: self.lnlike_column = None self.first_par_column = first_par_column + 1 else: self.mult_column = mult_column self.multiplicity = self.samples[:, mult_column] self.lnlike_column = lnlike_column self.first_par_column = first_par_column if (paramname_file is None) and (not params_in_header): paramname_file = "_".join(chain_files[0].split("_")[:-1]) + ".paramnames" self.parameters = self.get_parameter_names(paramname_file, params_in_header) self.column_names = list(self.parameters) self.column_names.insert(mult_column, "mult") if lnlike_column is not None: self.column_names.insert(lnlike_column, "-ln(L)") self.burn_in = burn_in if burn_in is not None: self.remove_burn_in_samples()
def __init__(self, name, chain_files, burn_in=None, mult_column=0, lnlike_column=1, first_par_column=2, paramname_file=None, params_in_header=False): self.rename(name) self.chain_files = chain_files first_file = True for chain_file in chain_files: # does this raise an error if the file doesn't exist??? reader = utils.open_if_exists(chain_file, 'r') new_samples = np.loadtxt(reader) reader.close() if first_file: self.samples = np.copy(new_samples) first_file = False else: # check that number of columns are the same self.samples = np.vstack((self.samples, new_samples)) if mult_column is None: self.mult_column = 0 self.multiplicity = np.ones(len(self.samples)) self.samples = np.vstack((self.multiplicity, self.samples.T)).T if lnlike_column is not None: self.lnlike_column = lnlike_column + 1 else: self.lnlike_column = None self.first_par_column = first_par_column + 1 else: self.mult_column = mult_column self.multiplicity = self.samples[:, mult_column] self.lnlike_column = lnlike_column self.first_par_column = first_par_column if (paramname_file is None) and (not params_in_header): paramname_file = '_'.join(chain_files[0].split('_')[:-1]) + \ '.paramnames' self.parameters = self.get_parameter_names(paramname_file, params_in_header) self.column_names = list(self.parameters) self.column_names.insert(mult_column, 'mult') if lnlike_column is not None: self.column_names.insert(lnlike_column, '-ln(L)') self.burn_in = burn_in if burn_in is not None: self.remove_burn_in_samples()
def __init__(self, name, path=None, save=True): self.settings = {} self.history_file = '.session_history' self.name = name self.save = save if save: self.log_file = os.path.join('Logs', time.strftime('%Y-%m-%d'), name) self.plot = None self.pdfs = [] self.settings['pdfs'] = {} # check for file with inputs from all previous sessions # (e.g. chains, likelihoods, joint pdfs) and load it self.load_history() log_reader = None # if path is given, search it for the named log (error if not found) if path: old_log_file = os.path.join(path, name) log_reader = utils.open_if_exists(old_log_file, 'rb') # if no path given, search multiple paths for named log file # (sort date directories in reverse order so newest # log files are found first) else: log_paths = sorted(os.listdir('Logs'), reverse=True) for x in log_paths: p = os.path.join('Logs', x) old_log_file = os.path.join(p, name) if os.path.isdir(p) and os.path.isfile(old_log_file): log_reader = open(old_log_file, 'rb') break # if existing log found, set up environment using old settings if log_reader: print 'Using settings from ' + old_log_file self.log_file = old_log_file self.load_log(log_reader) # if not found, notify that new log file is assumed elif self.save: print 'No log file found. Creating new file:\n ' + \ self.log_file for attr in ['name', 'log_file']: self.settings[attr] = getattr(self, attr)