def mkScatter2D(s1): """ Make a Scatter2D from a Scatter1D by treating the points as y values and adding dummy x bins.""" rtn = yoda.Scatter2D() xval = 0.5 for a in s1.annotations: rtn.setAnnotation(a, s1.annotation(a)) rtn.setAnnotation("Type", "Scatter2D"); for point in s1.points: ex_m = xval-0.5 ex_p = xval+0.5 y = point.x ey_p = point.xMax - point.x ey_m = point.x - point.xMin pt = yoda.Point2D(xval, y, (0.5,0.5), (ey_p,ey_m)) rtn.addPoint(pt) xval = xval + 1.0 return rtn
def toScatter2D(self, manpath=None): path = manpath if manpath is not None else self.path points = [(self.bins[i].xmid, self.bins[i].val, [self.bins[i].xmid-self.bins[i].xmin, self.bins[i].xmax-self.bins[i].xmid], self.bins[i].errs) for i in xrange(len(self.bins))] import yoda return yoda.Scatter2D(points, path, path)
def envelope2YODA(fvals, fout_up="envelope_up.yoda", fout_dn="envelope_dn.yoda", wfile=None): import apprentice as app vals = app.AppSet(fvals) Yup = np.array([r.vmax for r in vals._RA]) Ydn = np.array([r.vmin for r in vals._RA]) dY = np.zeros_like(Yup) hids = np.array([b.split("#")[0] for b in vals._binids]) hnames = sorted(set(hids)) observables = sorted( [x for x in set(app.io.readObs(wfile)) if x in hnames]) if wfile is not None else hnames with open(fvals) as f: import json rd = json.load(f) xmin = np.array(rd["__xmin"]) xmax = np.array(rd["__xmax"]) DX = (xmax - xmin) * 0.5 X = xmin + DX Y2Dup, Y2Ddn = [], [] import yoda for obs in observables: idx = np.where(hids == obs) P2Dup = [ yoda.Point2D(x, y, dx, dy) for x, y, dx, dy in zip(X[idx], Yup[idx], DX[idx], dY[idx]) ] Y2Dup.append(yoda.Scatter2D(P2Dup, obs, obs)) P2Ddn = [ yoda.Point2D(x, y, dx, dy) for x, y, dx, dy in zip(X[idx], Ydn[idx], DX[idx], dY[idx]) ] Y2Ddn.append(yoda.Scatter2D(P2Ddn, obs, obs)) yoda.write(Y2Dup, fout_up) yoda.write(Y2Ddn, fout_dn)
def prediction2YODA(fvals, Peval, fout="predictions.yoda", ferrs=None, wfile=None): import apprentice as app vals = app.AppSet(fvals) errs = app.AppSet(ferrs) if ferrs is not None else None P = [Peval[x] for x in vals._SCLR.pnames] if type(Peval) == dict else Peval Y = vals.vals(P) dY = errs.vals(P) if errs is not None else np.zeros_like(Y) hids = np.array([b.split("#")[0] for b in vals._binids]) hnames = sorted(set(hids)) observables = sorted( [x for x in set(app.io.readObs(wfile)) if x in hnames]) if wfile is not None else hnames with open(fvals) as f: import json rd = json.load(f) xmin = np.array(rd["__xmin"]) xmax = np.array(rd["__xmax"]) DX = (xmax - xmin) * 0.5 X = xmin + DX Y2D = [] import yoda for obs in observables: idx = np.where(hids == obs) P2D = [ yoda.Point2D(x, y, dx, dy) for x, y, dx, dy in zip(X[idx], Y[idx], DX[idx], dY[idx]) ] Y2D.append(yoda.Scatter2D(P2D, obs, obs)) yoda.write(Y2D, fout)
import sys, re, yoda, os if len(sys.argv) != 3: sys.exit( "Usage: produce-Rdependence-data.py separation_table output_file") # open the files table = sys.argv[1] measures = ["grej20", "grej50", "qrej20", "qrej50", "srej", "I", "I2"] Rvalues = [2, 4, 6, 8, 10] observables = ["GA_00_00", "GA_20_00", "GA_10_05", "GA_10_10", "GA_10_20"] scatters = [] for measure in measures: for observable in observables: separation_scatter = yoda.Scatter2D(title=measure, path="/Rdependence/" + measure + "_" + observable) for Rval in Rvalues: command = "./get-separation.sh " + table + " " + observable + "_R" + str( Rval) + " " + measure separation_scatter.addPoint(0.1 * Rval, float( os.popen(command).read().rstrip()), xerrs=0.1) scatters.append(separation_scatter) yoda.write(scatters, sys.argv[2])
import sys, re, yoda, os if len(sys.argv) != 3: sys.exit( "Usage: produce-separation-data.py separation_table output_file.yoda") # open the files table = sys.argv[1] measures = ["grej20", "grej50", "qrej20", "qrej50", "srej", "I", "I2"] #Rvalues=["R2","R4","R6","R8", "R10"] Rvalues = ["R6"] observables = ["GA_00_00", "GA_20_00", "GA_10_05", "GA_10_10", "GA_10_20"] scatters = [] for measure in measures: for Rval in Rvalues: separation_scatter = yoda.Scatter2D(title=measure, path="/separation/" + measure + "_" + Rval) for observable, index in zip(observables, range(0, len(observables))): command = "./get-separation.sh " + table + " " + observable + "_" + Rval + " " + measure separation_scatter.addPoint(index, float( os.popen(command).read().rstrip()), xerrs=0.5) scatters.append(separation_scatter) yoda.write(scatters, sys.argv[2])
def resolve_data_object( filename_or_data_object, name, divide_by=None, multiply_by=None, subtract_by=None, assume_correlated=False, use_correlated_division=None, # this is only for backwards-compatibility rebin_count=1, rebin_begin=0): """Take passed data object or loads a data object from a YODA file, and return it after dividing (or multiplying) by divide_by (multiply_by).""" if use_correlated_division is not None: assume_correlated = use_correlated_division print( "Heppyplotlib deprecation warning: Use assume_correlated instead of use_correlated_division" ) if isinstance(filename_or_data_object, str): data_object = yoda.readYODA(filename_or_data_object)[name] else: data_object = filename_or_data_object.clone() if not rebin_count == 1: if data_object.type == "Histo1D": data_object.rebin(rebin_count, begin=rebin_begin) else: print( "WARNING: Will assume statistical errors for rebinning a scatter plot" ) x_coords = [point.x for point in data_object.points] y_coords = get_scatter2d_y_coords(data_object) x_errs = [] x_errs.append([point.xErrs[0] for point in data_object.points]) x_errs.append([point.xErrs[1] for point in data_object.points]) if not are_points_with_errors_adjacent(x_coords, x_errs): raise Exception( "Points must be adjacent for interpreting the scatter plots as a histogram" ) new_points = data_object.points[0:rebin_begin] i = 0 while rebin_begin + i * rebin_count < len(data_object.points) - 1: first_index = rebin_begin + i * rebin_count last_index = min(first_index + rebin_count, len(data_object.points)) points = data_object.points[first_index:last_index] left_edge = points[0].x - points[0].xErrs[0] right_edge = points[-1].x + points[-1].xErrs[1] length = right_edge - left_edge new_x = left_edge + length / 2.0 new_xerrs = length / 2.0 new_y = 0.0 new_yerrs = np.array([0.0, 0.0]) for point in points: left_edge = point.x - point.xErrs[0] right_edge = point.x + point.xErrs[1] new_y += (right_edge - left_edge) * point.y new_yerrs += ((right_edge - left_edge) * np.array(point.yErrs))**2 new_y /= length new_yerrs = np.sqrt(new_yerrs) / length new_points.append( yoda.Point2D(x=new_x, y=new_y, xerrs=new_xerrs, yerrs=new_yerrs)) i = i + 1 data_object = yoda.Scatter2D(path=data_object.path, title=data_object.title) for point in new_points: data_object.addPoint(point) if subtract_by is not None: data_object = yoda.mkScatter(data_object) operand = resolve_data_object(subtract_by, name).mkScatter() for point, operand_point in zip(data_object.points, operand.points): new_y = point.y - operand_point.y if assume_correlated: new_y_errs = [y_err - operand_point.y for y_err in point.yErrs] if not assume_correlated: # assume that we subtract an independent data set, use error propagation new_y_errs = [] for y_err, operand_y_err in zip(point.yErrs, operand_point.yErrs): err2 = 0.0 if point.y != 0.0: err2 += (y_err)**2 err2 += (operand_y_err)**2 new_y_errs.append(np.sqrt(err2)) point.y = new_y point.yErrs = new_y_errs if divide_by is not None or multiply_by is not None: data_object = yoda.mkScatter(data_object) if isinstance(divide_by, float) or isinstance(multiply_by, float): for point in data_object.points: if divide_by is not None: new_y = point.y / divide_by new_y_errs = [y_err / divide_by for y_err in point.yErrs] else: new_y = point.y * multiply_by new_y_errs = [y_err * multiply_by for y_err in point.yErrs] point.y = new_y point.yErrs = new_y_errs else: if divide_by is not None: operand = resolve_data_object(divide_by, name).mkScatter() else: operand = resolve_data_object(multiply_by, name).mkScatter() for point, operand_point in zip(data_object.points, operand.points): if operand_point.y == 0.0: if divide_by is not None: new_y = 1.0 else: new_y = 0.0 new_y_errs = [0.0, 0.0] else: if divide_by is not None: new_y = point.y / operand_point.y if assume_correlated: new_y_errs = [ y_err / operand_point.y for y_err in point.yErrs ] else: new_y = point.y * operand_point.y if assume_correlated: new_y_errs = [ y_err * operand_point.y for y_err in point.yErrs ] if not assume_correlated: # assume that we divide/multiply through an independent data set, use error propagation rel_y_errs = [] for y_err, operand_y_err in zip( point.yErrs, operand_point.yErrs): err2 = 0.0 if point.y != 0.0: err2 += (y_err / point.y)**2 err2 += (operand_y_err / operand_point.y)**2 rel_y_errs.append(np.sqrt(err2)) new_y_errs = [ rel_y_err * new_y for rel_y_err in rel_y_errs ] point.y = new_y point.yErrs = new_y_errs return data_object