def load_obliquity_file(filename): obliquity_data = {} for l in interesting_lines(lines_of(filename), comments="#"): if l.startswith("Obliquity of the ecliptic"): continue line = l.split() obliquity_data[line[0]] = float(line[1]) * u.arcsecond return obliquity_data
def parse_parfile(parfile): """Function for parsing .par file or .par style StringIO. Parameter --------- parfile: str or file-like object Input .par file name or string contents. Return ------ dict: Parameter and its associated lines. The key is the parameter name and the value is a list of the lines associated to the parameter name. """ parfile_dict = defaultdict(list) for l in interesting_lines(lines_of(parfile), comments=("#", "C ")): k = l.split() parfile_dict[k[0].upper()].append(" ".join(k[1:])) return parfile_dict
def choose_model(parfile, category_order=None, name=None, check_for_missing_parameters=False): """Determine which model components are appropriate for parfile.""" if name is None: if isinstance(parfile, str): name = os.path.basename(parfile) else: name = "" if category_order is None: category_order = DEFAULT_ORDER models_by_category = defaultdict(list) for k, c_type in Component.component_types.items(): models_by_category[c_type.category].append(c_type) par_dict = {} par_lines = [] multi_tags = set([ "JUMP", "ECORR", "T2EFAC", "T2EQUAD", "EQUAD", "EFAC", "DMJUMP", "DMEFAC", "DMEQUAD", ]) multi_line = Counter() for l in interesting_lines(lines_of(parfile), comments=("#", "C ")): ll = l.split() k = ll[0] if k in multi_tags: multi_line[k] += 1 k = k + str(multi_line[k]) if k in par_dict: # FIXME: what happens with JUMPs? log.info("Lines with duplicate keys in par file: {} and {}".format( [k] + par_dict[k], ll)) par_dict[k] = ll[1:] par_lines.append(l) models_to_use = {} for category, models in models_by_category.items(): acceptable = [] for m_type in models: m = m_type() if m.is_in_parfile(par_dict): acceptable.append(m) if len(acceptable) > 1: raise ValueError( "Multiple models are compatible with this par file: {}".format( acceptable)) if acceptable: models_to_use[category] = acceptable[0] if "BINARY" in par_dict: vals = par_dict["BINARY"] if len(vals) != 1: raise ValueError("Mal-formed binary model selection: {}".format( repr(" ".join(["BINARY"] + vals)))) (bm, ) = vals if "pulsar_system" not in models_to_use: # Either we're missing parameters or the model is bogus # FIXME: distinguish raise UnknownBinaryModel( "Unknown binary model requested in par file: {}".format(bm)) # FIXME: consistency check - the componens actually chosen should know the name bm models_in_order = [] for category in category_order: try: models_in_order.append(models_to_use.pop(category)) except KeyError: pass models_in_order.extend(v for k, v in sorted(models_to_use.items())) tm = TimingModel(name, models_in_order) # FIXME: this should go in TimingModel for when you try to # add conflicting components alias_map = {} for prefix_type in ["prefixParameter", "maskParameter"]: for pn in tm.get_params_of_type_top(prefix_type): par = getattr(tm, pn) for a in [par.prefix] + par.prefix_aliases: if a in alias_map: raise ValueError( "Two prefix/mask parameters have the same " "alias {}: {} and {}".format(a, alias_map[a], par)) alias_map[a] = par leftover_params = par_dict.copy() for k in tm.get_params_mapping(): leftover_params.pop(k, None) for a in getattr(tm, k).aliases: leftover_params.pop(a, None) for p in leftover_params: try: pre, idxstr, idxV = split_prefixed_name(p) try: par = alias_map[pre] except KeyError: if pre in ignore_prefix: # log.warning("Ignoring unhandled prefix {}".format(pre)) continue else: raise ValueError( "Mystery parameter {}, prefix {} with number {}". format(p, pre, idxV)) component = tm.get_params_mapping()[par.name] new_parameter = par.new_param(idxV) if hasattr(tm, new_parameter.name): raise ValueError("Received duplicate parameter {}".format( new_parameter.name)) tm.add_param_from_top(new_parameter, component) # print("added", new_parameter) except PrefixError: pass return tm
def read_parfile(self, file): """Read values from the specified parfile into the model parameters. Parameters ---------- file : str or list or file-like The parfile to read from. May be specified as a filename, a list of lines, or a readable file-like object. """ repeat_param = defaultdict(int) param_map = self.get_params_mapping() comps = self.components.copy() comps['timing_model'] = self wants_tcb = None stray_lines = [] for li in interesting_lines(lines_of(file), comments=("#", "C ")): k = li.split() name = k[0].upper() if name == 'UNITS': if name in repeat_param: raise ValueError("UNITS is repeated in par file") else: repeat_param[name] += 1 if len(k) > 1 and k[1] == 'TDB': wants_tcb = False else: wants_tcb = li continue if name == 'EPHVER': if len(k) > 1 and k[1] != '2' and wants_tcb is None: wants_tcb = li log.warning("EPHVER %s does nothing in PINT" % k[1]) #actually people expect EPHVER 5 to work #even though it's supposed to imply TCB which doesn't continue repeat_param[name] += 1 if repeat_param[name] > 1: k[0] = k[0] + str(repeat_param[name]) li = ' '.join(k) used = [] for p, c in param_map.items(): if getattr(comps[c], p).from_parfile_line(li): used.append((c, p)) if len(used) > 1: log.warning("More than one component made use of par file " "line {!r}: {}".format(li, used)) if used: continue if name in ignore_params: log.debug("Ignoring parfile line '%s'" % (li, )) continue try: prefix, f, v = utils.split_prefixed_name(name) if prefix in ignore_prefix: log.debug("Ignoring prefix parfile line '%s'" % (li, )) continue except utils.PrefixError: pass stray_lines.append(li) if wants_tcb: raise ValueError( "Only UNITS TDB supported by PINT but parfile has {}".format( wants_tcb)) if stray_lines: for l in stray_lines: log.warning("Unrecognized parfile line {!r}".format(l)) for name, param in getattr(self, "discarded_components", []): log.warning("Model component {} was rejected because we " "didn't find parameter {}".format(name, param)) log.warning("Final object: {}".format(self)) # The "setup" functions contain tests for required parameters or # combinations of parameters, etc, that can only be done # after the entire parfile is read self.setup()
def test_interesting_lines_input_validation(): """Check it lets the user know about invalid comment markers.""" with pytest.raises(ValueError): for l in interesting_lines([""], comments=" C "): pass
def test_interesting_lines(lines, goodlines, comments): """Check various patterns of text and comments.""" assert list(interesting_lines(lines, comments=comments)) == goodlines
def compare_t2_observatories_dat(t2dir=None): """Read a tempo2 observatories.dat file and compare with PINT Produces a report including lines that can be added to PINT's observatories.py to add any observatories unknown to PINT. Parameters ========== t2dir : str, optional Path to the TEMPO2 runtime dir; if not provided, look in the TEMPO2 environment variable. Returns ======= dict The dictionary has two entries, under the keys "different" and "missing"; each is a list of observatories found in the TEMPO2 files that disagree with what PINT expects. Each entry in these lists is again a dict, with various properties of the observatory, including a line that might be suitable for starting an entry in the PINT observatory list. """ if t2dir is None: t2dir = os.getenv("TEMPO2") if t2dir is None: raise ValueError( "TEMPO2 directory not provided and TEMPO2 environment variable not set" ) filename = os.path.join(t2dir, "observatory", "observatories.dat") report = defaultdict(list) with open(filename) as f: for line in interesting_lines(f, comments="#"): try: x, y, z, full_name, short_name = line.split() except ValueError: raise ValueError(f"unrecognized line '{line}'") x, y, z = float(x), float(y), float(z) full_name, short_name = full_name.lower(), short_name.lower() topo_obs_entry = textwrap.dedent(f""" TopoObs( name='{full_name}', aliases=['{short_name}'], itrf_xyz=[{x}, {y}, {z}], ) """) try: obs = get_observatory(full_name) except KeyError: try: obs = get_observatory(short_name) except KeyError: report["missing"].append( dict(name=full_name, topo_obs_entry=topo_obs_entry)) continue loc = EarthLocation.from_geocentric(x * u.m, y * u.m, z * u.m) oloc = obs.earth_location_itrf() d = earth_location_distance(loc, oloc) if d > 1 * u.m: report["different"].append( dict( name=full_name, t2_short_name=short_name, t2=loc.to_geodetic(), pint=oloc.to_geodetic(), topo_obs_entry=topo_obs_entry, pint_name=obs.name, pint_tempo_code=obs.tempo_code if hasattr( obs, "tempo_code") else "", pint_aliases=obs.aliases, position_difference=d, pint_origin=obs.origin, )) # Check whether TEMPO alias - first two letters - works and is distinct from others? # Check all t2 aliases also work for PINT? # Check ITOA code? # Check time corrections? return report