def save_params(file: str, dictionary: dict): file = u.sanitise_file_ext(filename=file, ext=".yaml") u.debug_print(1, 'Saving parameter file to ' + str(file)) u.debug_print(2, "params.save_params: dictionary ==", dictionary) with open(file, 'w') as f: yaml.dump(dictionary, f)
def sensitivity_config(tmpdirec, observation_config): with open(path.join(example_configs, "sensitivity_hera.yml")) as fl: sensitivity = yaml.load(fl) sensitivity["observation"] = observation_config with open(path.join(tmpdirec, "sensitivity.yml"), "w") as fl: yaml.dump(sensitivity, fl) return path.join(tmpdirec, "sensitivity.yml")
def observation_config(tmpdirec, observatory_config): with open(path.join(example_configs, "observation_hera.yml")) as fl: observation = yaml.load(fl) observation["observatory"] = observatory_config with open(path.join(tmpdirec, "observation.yml"), "w") as fl: yaml.dump(observation, fl) return path.join(tmpdirec, "observation.yml")
def calc_sense( configfile, array_file, direc, fname, thermal, samplevar, write_significance, plot, plot_title, prefix, ): """Calculate the sensitivity of an array. This is the primary command of 21cmSense, and can be run independently for a complete sensitivity calculation. """ # If given an array-file, overwrite the "observation" parameter # in the config with the pickled array file, which has already # calculated the uv_coverage, hopefully. if array_file is not None: with open(configfile) as fl: cfg = yaml.load(fl) cfg["observation"] = path.abspath(array_file) configfile = tempfile.mktemp() with open(configfile, "w") as fl: yaml.dump(cfg, fl) sensitivity = sense.PowerSpectrum.from_yaml(configfile) logger.info( f"Used {len(sensitivity.k1d)} bins between " f"{sensitivity.k1d.min()} and {sensitivity.k1d.max()}" ) sensitivity.write(filename=fname, thermal=thermal, sample=samplevar, prefix=prefix) if write_significance: sig = sensitivity.calculate_significance(thermal=thermal, sample=samplevar) logger.info(f"Significance of detection: {sig}") if plot and HAVE_MPL: fig = sensitivity.plot_sense_1d(thermal=thermal, sample=samplevar) if plot_title: plt.title(plot_title) prefix + "_" if prefix else "" fig.savefig( f"{prefix}{sensitivity.foreground_model}_" f"{sensitivity.observation.frequency:.3f}.png" )
def test_skycoord(frame): c = SkyCoord([[1, 2], [3, 4]], [[5, 6], [7, 8]], unit='deg', frame=frame, obstime=Time('2016-01-02'), location=EarthLocation(1000, 2000, 3000, unit=u.km)) cy = load(dump(c)) compare_coord(c, cy)
def test_load_all(): t = _get_time() unit = u.m / u.s c = SkyCoord([[1, 2], [3, 4]], [[5, 6], [7, 8]], unit='deg', frame='fk4', obstime=Time('2016-01-02'), location=EarthLocation(1000, 2000, 3000, unit=u.km)) # Make a multi-document stream out = ('---\n' + dump(t) + '---\n' + dump(unit) + '---\n' + dump(c)) ty, unity, cy = list(load_all(out)) compare_time(t, ty) compare_coord(c, cy) assert unity == unit
def test_timedelta(): t = _get_time() dt = t - t + 0.1234556 * u.s dty = load(dump(dt)) assert type(dt) is type(dty) for attr in ('shape', 'jd1', 'jd2', 'format', 'scale'): assert np.all(getattr(dt, attr) == getattr(dty, attr))
def test_yaml_representer(): """Test :func:`~astropy.cosmology.io.yaml.yaml_representer`.""" # test function `representer` representer = yaml_representer("!astropy.cosmology.flrw.LambdaCDM") assert callable(representer) # test the normal method of dumping to YAML yml = dump(Planck18) assert isinstance(yml, str) assert yml.startswith("!astropy.cosmology.flrw.FlatLambdaCDM")
def test_custom_unit(c): s = dump(c) with pytest.warns(u.UnitsWarning, match=f"'{c!s}' did not parse") as w: cy = load(s) assert len(w) == 1 assert isinstance(cy, u.UnrecognizedUnit) assert str(cy) == str(c) with u.add_enabled_units(c): cy2 = load(s) assert cy2 is c
def change_yaml_param(file: str = 'project', param: str = None, value=None, update_json=False, quiet: bool = False): if not quiet: print(f'Setting {param} in file {file} to {value}.') if file[-5:] != '.yaml': file = file + '.yaml' with open(file) as f: p = yaml.safe_load(f) if param is not None: p[param] = value with open(file, 'w') as f: yaml.dump(p, f) if update_json: yaml_to_json(file) with open(file.replace('.yaml', '.json'), 'w'): json.dump(p, f) return p
def test_yaml_constructor(): """Test :func:`~astropy.cosmology.io.yaml.yaml_constructor`.""" # test function `constructor` constructor = yaml_constructor(FlatLambdaCDM) assert callable(constructor) # it's too hard to manually construct a node, so we only test dump/load # this is also a good round-trip test yml = dump(Planck18) with u.add_enabled_units(cu): # needed for redshift units cosmo = load(yml) assert isinstance(cosmo, FlatLambdaCDM) assert cosmo == Planck18 assert cosmo.meta == Planck18.meta
def test_custom_unit(c): s = dump(c) with catch_warnings() as w: cy = load(s) assert len(w) == 1 assert f"'{c!s}' did not parse" in str(w[0].message) assert isinstance(cy, u.UnrecognizedUnit) assert str(cy) == str(c) with u.add_enabled_units(c): with catch_warnings() as w2: cy2 = load(s) assert len(w2) == 0 assert cy2 is c
def to_yaml(cosmology, *args): """Return the cosmology class, parameters, and metadata as a :mod:`yaml` object. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance *args Not used. Needed for compatibility with `~astropy.io.registry.UnifiedReadWriteMethod` Returns ------- str :mod:`yaml` representation of |Cosmology| object """ return dump(cosmology)
def test_ndarray_subclasses(c): cy = load(dump(c)) assert np.all(c == cy) assert c.shape == cy.shape assert type(c) is type(cy) cc = 'C_CONTIGUOUS' fc = 'F_CONTIGUOUS' if c.flags[cc] or c.flags[fc]: assert c.flags[cc] == cy.flags[cc] assert c.flags[fc] == cy.flags[fc] else: # Original was not contiguous but round-trip version # should be c-contig. assert cy.flags[cc] if hasattr(c, 'unit'): assert c.unit == cy.unit
def tofile(self, fh): """Write the header as a yaml-encoded 'header' extension.""" from astropy.io.misc import yaml data = yaml.dump(dict(self)) fh.create_dataset('header', data=data)
def run_query(box=None, get_exptime=True, rename_columns=DEFAULT_RENAME, sort_column=['obs_id', 'filter'], position_box=True, base_query=DEFAULT_QUERY_ASTROQUERY.copy(), **kwargs): """ Run MAST query with astroquery.mast. All columns listed at https://mast.stsci.edu/api/v0/_c_a_o_mfields.html can be used for the query. position_box: query on s_ra / s_dec positions rather than position coordinates """ # arguments frame = inspect.currentframe() msg = utils.log_function_arguments(None, frame, 'query.run_query') import time from astroquery.mast import Observations from astropy.coordinates import SkyCoord from astropy.io.misc import yaml import astropy.units as u query_args = {} for k in base_query: query_args[k] = base_query[k] # JWST "expected data" won't have datasets to query for actual exptimes... if 'obs_collection' in base_query: if 'JWST' in base_query['obs_collection']: get_exptime = False for k in kwargs: if k == 'instruments': query_args['instrument_name'] = kwargs[k] elif k == 'proposal_id': query_args['proposal_id'] = ['{0}'.format(p) for p in kwargs[k]] elif k == 'extensions': continue else: query_args[k] = kwargs[k] if (box is not None): ra, dec, radius = box #coo = SkyCoord(ra*u.deg, dec*u.deg) #query_args['coordinates'] = coo #query_args['radius'] = radius*u.arcmin cosd = np.cos(box[1] / 180 * np.pi) query_args['s_ra'] = box[0] + np.array([-1, 1]) * box[2] / 60 / cosd query_args['s_dec'] = box[1] + np.array([-1, 1]) * box[2] / 60 try: tab = Observations.query_criteria(**query_args) except: return query_args tab.meta['qtime'] = time.ctime(), 'Query timestamp' if box is not None: tab.meta['boxra'] = ra, 'Query RA, degrees' tab.meta['boxdec'] = dec, 'Query Decl., degrees' tab.meta['boxrad'] = radius, 'Query radius, arcmin' str_args = yaml.dump(query_args).replace('\n', ';;') tab.meta[ 'obsquery'] = str_args, 'Full query string, replace ;; with newline' if len(tab) == 0: return tab tab = modify_table(tab, get_exptime=get_exptime, rename_columns=rename_columns, sort_column=sort_column) return tab
def test_unit(c): cy = load(dump(c)) if isinstance(c, u.CompositeUnit): assert c == cy else: assert c is cy
def test_numpy_types(c): cy = load(dump(c)) assert c == cy
def test_serialized_column(): sc = SerializedColumn({'name': 'hello', 'other': 1, 'other2': 2.0}) scy = load(dump(sc)) assert sc == scy
def submit_lsf(script,config,option,njobs,**kwargs): """ Create a bash script and submit it to the lsf cluster Parameters ---------- script: string, full path to python script that is executed on lsf cluster config: dict with options to be stored in a yaml file and parsed by python script option: some additional option to be parsed by the python script njobs: either int: number of cluster array jobs; or list with job numbers kwargs ------ queue: string, queue of lsf cluster (default: long) n: int, number of processor requested for mpi jobs ptile: int, number of processors per hosts for mpi jobs lsb_steps: int, step width for job indeces, only applies if njobs is int (default: 1) time: string, if queue == time then this determines the cpu running time asked for. Format has to be 'hh:mm' jname: string, name of lsf cluster job (default: lsf) sleep: float, seconds to sleep (default: 10s) nolog: bool, if True (default), send standard output and stderr of cluster job to /dev/null (note that the output from the python script will still be saved) log: string, name of log file (default: tmpdir/jname.out) err: string, name of err file (default: tmpdir/jname.err) dry: bool, if false submit job to lsf cluster (default: False) concurrent: int, limit the number of simultaneously running jobs. If zero (default): no limit. dependency: string, if given, job id of job that needs to have ended before current job is started. Should be of the form "myjob1", "myjob[1-10]", or "1234", "1234[1-10]", where 1234 is the job id. forceJob: str, if not '0', njobs is over-written with the value of this keyword. extraDelay: bool, if true, add an extra random delay (between 10 seconds and 3 minutes) to sleep in the bash script before python is called. max_rjobs: int or None maximum jobs that are allowed to run. If exceeded, wait 20s and try again. tmpdir: string, local directory for temporary storage of bash script logdir: string, local directory for log files no_resubmit_running_jobs: bool if job array job is running, don't resubmit """ mkdir(kwargs['logdir']) mkdir(kwargs['tmpdir']) yamlfile = join(kwargs['tmpdir'],'{0[jname]:s}_{1[configname]:s}.yaml'.format(kwargs,config)) yaml.dump(config, stream = open(yamlfile, 'w'), default_flow_style=False) # test yaml file par = yaml.load(open(yamlfile)) bash = make_bash(script,yamlfile,kwargs['logdir'], add_opt = option, sleep = kwargs['sleep'], extrasleep = kwargs['extraDelay'] ) bashScript = join(kwargs['tmpdir'],'{0[jname]:s}_{1[configname]:s}.sh'.format(kwargs,config)) f = open(bashScript,'w') f.write(bash) f.close() call(['chmod','u+x',bashScript]) if kwargs['forceJob'] == '0': if kwargs['no_resubmit_running_jobs'] and (isinstance(njobs, list) or njobs == 1): # check if jobs with same name are already running and remove them njobs = remove_running_job_from_list(kwargs['jname'], njobs if isinstance(njobs, list) else [1]) if not len(njobs): logging.warning("all jobs requested for submission are currently running / pending! Returning without submission") return if isinstance(njobs, int): if not kwargs['minimumJID']: kwargs['minimumJID'] = 1 njobs = '[{0[minimumJID]:n}-{1:n}:{0[lsb_steps]:n}]'.format(kwargs,njobs) nsubmit = 1 elif isinstance(njobs, list): if len(njobs) > 100: nsubmit = len(njobs) / 100 if len(njobs) % 100 == 0 else len(njobs) / 100 + 1 else: nsubmit = 1 else: njobs = kwargs['forceJob'] nsubmit = 1 nsubmit = int(nsubmit) for i in range(nsubmit): if nsubmit == 1: if not kwargs['concurrent']: command = """bsub -oo {0[log]:s} -eo {0[err]:s} -J "{0[jname]:s}{1}" """.format(kwargs,njobs) else: command = """bsub -oo {0[log]:s} -eo {0[err]:s} -J "{0[jname]:s}{1}%{0[concurrent]:n}" """.format(kwargs,njobs) else: if not kwargs['concurrent']: command = """bsub -oo {0[log]:s} -eo {0[err]:s} -J "{0[jname]:s}{1}" """.format(kwargs, njobs[i * 100:(i + 1) * 100 if (i + 1) * 100 < len(njobs) else -1] ) else: command = """bsub -oo {0[log]:s} -eo {0[err]:s} -J "{0[jname]:s}{1}%{0[concurrent]:n}" """.format(kwargs, njobs[i * 100:(i + 1) * 100 if (i + 1) * 100 < len(njobs) else -1] ) if kwargs['queue'] == 'time': command += """-W {0[time]:s} """.format(kwargs) else: command += """-q {0[queue]:s} """.format(kwargs) if kwargs['n'] > 0: command += """-n {0[n]:n} -R "{0[span]:s}" """.format(kwargs) if not kwargs['dependency'] == None: command += """-w "ended({0[dependency]:s})" """.format(kwargs) # exclude some clusters command += """ -R "select[rhel60]" """ #command += """ -R "select[fell]" """ command += """ {0:s} """.format(bashScript) # get the current number of running jobs # and wait if type(kwargs['max_rjobs']) == int: rjobs = get_jobs() while rjobs >= kwargs['max_rjobs']: logging.info('{0:n} jobs running, max number of running jobs allowed: {1:n}'.format(rjobs, kwargs['max_rjobs'])) logging.info('Sleep for {0:.2f} s ...'.format(kwargs['sleep'] * 3.)) sleep(kwargs['sleep'] * 3.) rjobs = get_jobs() if not kwargs['dry']: logging.info('Sending command\n\t{0:s}\nto lsf cluster'.format(command)) call(shlex.split(command)) else: logging.info('Dry run for command\n\t{0:s}\nto lsf cluster'.format(command)) logging.info('Going to sleep for {0[sleep]:.2f} s ...'.format(kwargs)) sleep(kwargs['sleep']) return
def test_time(): t = _get_time() ty = load(dump(t)) compare_time(t, ty)
def test_representations(rep): rrep = load(dump(rep)) assert np.all(representation_equal(rrep, rep))
def test_unit(c): cy = load(dump(c)) if isinstance(c, (u.CompositeUnit, u.StructuredUnit)): assert c == cy else: assert c is cy