def wrapper(*args, **kwd): # Grab the logging keyword, if it is present. log_result = kwd.pop(_LOG_ARGUMENT, True) result = func(*args, **kwd) if not log_result: # No need to add metadata.... meta_dict = {} elif log_result is not True: meta_dict = _metadata_to_dict(log_result) else: # Logging is not turned off, but user did not provide a value # so construct one. key = func.__name__ all_args = chain(zip(original_positional_args, args), six.iteritems(kwd)) all_args = ["{0}={1}".format(name, _replace_array_with_placeholder(val)) for name, val in all_args] log_val = ", ".join(all_args) log_val = log_val.replace("\n", "") meta_dict = {key: log_val} for k, v in six.iteritems(meta_dict): result._insert_in_metadata_fits_safe(k, v) return result
def average_pols(block): """ Average the polarizations for each feed in each IF """ # will have names like "(IFnum)(feed)" averaged_pol_dict = {} ifdict = find_matched_freqs(block) feeddict = find_feeds(block) IDs = identify_samplers(block) for ifnum, ifsampler in iteritems(ifdict): for sampler, feednum in iteritems(feeddict): if sampler not in ifsampler: continue newname = "if%ifd%i" % (ifnum, feednum) matched_samplers = [ sampler_name for (sampler_name, ID) in iteritems(IDs) if ID['feed'] == feednum and ID['IF'] == ifnum ] if len(matched_samplers) != 2: raise ValueError("Too few/many matches: %s" % matched_samplers) if newname not in averaged_pol_dict: average = (block[matched_samplers[0]] + block[matched_samplers[1]]) / 2. averaged_pol_dict[newname] = average return averaged_pol_dict
def wrapper(*args, **kwd): # Grab the logging keyword, if it is present. log_result = kwd.pop(_LOG_ARGUMENT, False) result = func(*args, **kwd) if log_result is None: # No need to add metadata.... meta_dict = {} elif log_result: meta_dict = _metadata_to_dict(log_result) elif log_result is not None: # Logging is not turned off, but user did not provide a value # so construct one. key = func.__name__ pos_args = [ "{0}={1}".format(arg_name, _replace_array_with_placeholder(arg_value)) for arg_name, arg_value in zip(original_positional_args, args) ] kwd_args = [ "{0}={1}".format(k, _replace_array_with_placeholder(v)) for k, v in six.iteritems(kwd) ] pos_args.extend(kwd_args) log_val = ", ".join(pos_args) log_val = log_val.replace("\n", "") meta_dict = {key: log_val} for k, v in six.iteritems(meta_dict): result._insert_in_metadata_fits_safe(k, v) return result
def average_pols(block): """ Average the polarizations for each feed in each IF """ # will have names like "(IFnum)(feed)" averaged_pol_dict = {} ifdict = find_matched_freqs(block) feeddict = find_feeds(block) IDs = identify_samplers(block) for ifnum,ifsampler in iteritems(ifdict): for sampler,feednum in iteritems(feeddict): if sampler not in ifsampler: continue newname = "if%ifd%i" % (ifnum, feednum) matched_samplers = [sampler_name for (sampler_name,ID) in iteritems(IDs) if ID['feed'] == feednum and ID['IF'] == ifnum] if len(matched_samplers) != 2: raise ValueError("Too few/many matches: %s" % matched_samplers) if newname not in averaged_pol_dict: average = (block[matched_samplers[0]] + block[matched_samplers[1]]) / 2. averaged_pol_dict[newname] = average return averaged_pol_dict
def wrapper(*args, **kwd): # Grab the logging keyword, if it is present. log_result = kwd.pop(_LOG_ARGUMENT, False) result = func(*args, **kwd) if log_result is None: # No need to add metadata.... meta_dict = {} elif log_result: meta_dict = _metadata_to_dict(log_result) elif log_result is not None: # Logging is not turned off, but user did not provide a value # so construct one. key = func.__name__ pos_args = ["{0}={1}".format(arg_name, _replace_array_with_placeholder(arg_value)) for arg_name, arg_value in zip(original_positional_args, args)] kwd_args = ["{0}={1}".format(k, _replace_array_with_placeholder(v)) for k, v in six.iteritems(kwd)] pos_args.extend(kwd_args) log_val = ", ".join(pos_args) log_val = log_val.replace("\n", "") meta_dict = {key: log_val} for k, v in six.iteritems(meta_dict): result._insert_in_metadata_fits_safe(k, v) return result
def plot_nh3(spdict, spectra, fignum=1, show_components=False, residfignum=None, show_hyperfine_components=True, annotate=True, axdict=None, figure=None, **plotkwargs): """ Plot the results from a multi-nh3 fit spdict needs to be dictionary with form: 'oneone': spectrum, 'twotwo': spectrum, etc. """ from matplotlib import pyplot if figure is None: spectra.plotter.figure = pyplot.figure(fignum) spectra.plotter.axis = spectra.plotter.figure.gca() splist = spdict.values() for transition, sp in spdict.items(): sp.xarr.convert_to_unit('km/s', velocity_convention='radio', refX=pyspeckit.spectrum.models.ammonia.freq_dict[transition]*u.Hz, quiet=True) sp.specfit.fitter = copy.copy(spectra.specfit.fitter) sp.specfit.modelpars = spectra.specfit.modelpars sp.specfit.parinfo = spectra.specfit.parinfo sp.specfit.npeaks = spectra.specfit.npeaks sp.specfit.fitter.npeaks = spectra.specfit.npeaks if spectra.specfit.modelpars is not None: sp.specfit.model = sp.specfit.fitter.n_ammonia(pars=spectra.specfit.modelpars, parnames=spectra.specfit.fitter.parnames)(sp.xarr) if axdict is None: axdict = make_axdict(splist, spdict) for linename, sp in iteritems(spdict): if linename not in axdict: raise NotImplementedError("Plot windows for {0} cannot " "be automatically arranged (yet)." .format(linename)) sp.plotter.axis=axdict[linename] # permanent sp.plotter(axis=axdict[linename], title=title_dict[linename], **plotkwargs) sp.specfit.Spectrum.plotter = sp.plotter sp.specfit.selectregion(reset=True) if sp.specfit.modelpars is not None: sp.specfit.plot_fit(annotate=False, show_components=show_components, show_hyperfine_components=show_hyperfine_components) if spdict['oneone'].specfit.modelpars is not None and annotate: spdict['oneone'].specfit.annotate(labelspacing=0.05, prop={'size':'small', 'stretch':'extra-condensed'}, frameon=False) if residfignum is not None: pyplot.figure(residfignum) pyplot.clf() axdict = make_axdict(splist, spdict) for linename, sp in iteritems(spdict): sp.specfit.plotresiduals(axis=axdict[linename])
def plot_nh3(spdict, spectra, fignum=1, show_components=False, residfignum=None, show_hyperfine_components=True, annotate=True, **plotkwargs): """ Plot the results from a multi-nh3 fit spdict needs to be dictionary with form: 'oneone': spectrum, 'twotwo': spectrum, etc. """ spectra.plotter.figure = pyplot.figure(fignum) spectra.plotter.axis = spectra.plotter.figure.gca() pyplot.clf() splist = spdict.values() for transition, sp in spdict.items(): sp.xarr.convert_to_unit('km/s', velocity_convention='radio', refX=pyspeckit.spectrum.models.ammonia.freq_dict[transition]*u.Hz, quiet=True) sp.specfit.fitter = copy.copy(spectra.specfit.fitter) sp.specfit.modelpars = spectra.specfit.modelpars sp.specfit.parinfo = spectra.specfit.parinfo sp.specfit.npeaks = spectra.specfit.npeaks sp.specfit.fitter.npeaks = spectra.specfit.npeaks if spectra.specfit.modelpars is not None: sp.specfit.model = sp.specfit.fitter.n_ammonia(pars=spectra.specfit.modelpars, parnames=spectra.specfit.fitter.parnames)(sp.xarr) axdict = make_axdict(splist, spdict) for linename, sp in iteritems(spdict): if linename not in axdict: raise NotImplementedError("Plot windows for {0} cannot " "be automatically arranged (yet)." .format(linename)) sp.plotter.axis=axdict[linename] # permanent sp.plotter(axis=axdict[linename], title=title_dict[linename], **plotkwargs) sp.specfit.Spectrum.plotter = sp.plotter sp.specfit.selectregion(reset=True) if sp.specfit.modelpars is not None: sp.specfit.plot_fit(annotate=False, show_components=show_components, show_hyperfine_components=show_hyperfine_components) if spdict['oneone'].specfit.modelpars is not None and annotate: spdict['oneone'].specfit.annotate(labelspacing=0.05, prop={'size':'small', 'stretch':'extra-condensed'}, frameon=False) if residfignum is not None: pyplot.figure(residfignum) pyplot.clf() axdict = make_axdict(splist, spdict) for linename, sp in iteritems(spdict): sp.specfit.plotresiduals(axis=axdict[linename])
def set_test_files(self): """ A set of file is created from the dictionary below; the number of files of each type is set by the 'number' key. """ bias = 'BIAS' dark = 'DARK' flat = 'FLAT' light = 'LIGHT' files = {bias: 5, dark: {20.0: 5, 30.0: 5}, flat: {'R': {17.0: 3}, 'V': {15.0: 2, 25.0: 2}, 'I': {17.0: 5}}, light: {'m81': {'R': {17.0: 3}, 'V': {17.0: 2}}, 'm101': {'I': {17.0: 5}}, None: {'I': {17.0: 2}, 'R': {17.0: 3}, 'V': {17.0: 4}}} } data = np.random.random([100, 100]) make_hdu = lambda im_type: \ fits.PrimaryHDU(data, fits.Header.fromkeys(['imagetyp'], value=im_type)) make_file_names = lambda base, number: \ [base + str(i) + '.fit' for i in range(number)] bias_names = make_file_names(bias, files[bias]) bias_hdu = make_hdu(bias) self.write_names(bias_hdu, bias_names) for exp, number in six.iteritems(files[dark]): dark_hdu = make_hdu(dark) dark_hdu.header['exptime'] = exp dark_names = make_file_names(dark + str(exp), number) self.write_names(dark_hdu, dark_names) flat_hdu = make_hdu(flat) self.make_filter_exp(flat_hdu, files[flat]) light_hdu = make_hdu(light) for obj, filter_dict in six.iteritems(files[light]): if not obj: try: del light_hdu.header['object'] except KeyError: pass else: light_hdu.header['object'] = obj obj_name = obj or None self.make_filter_exp(light_hdu, filter_dict, extra_name=obj_name) return files
def _json_summary_to_table(self, data, base_url): """ """ columns = {'uid':[], 'URL':[], 'size':[]} for entry in data['node_data']: if entry['file_name'] != 'null': # "de_name": "ALMA+uid://A001/X122/X35e", columns['uid'].append(entry['de_name'][5:]) columns['size'].append((int(entry['file_size'])*u.B).to(u.Gbyte)) # example template for constructing url: # https://almascience.eso.org/dataPortal/requests/keflavich/940238268/ALMA/ # uid___A002_X9d6f4c_X154/2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar # above is WRONG # should be: # 2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar/2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar url = os.path.join(base_url, entry['file_name'], entry['file_name'], ) columns['URL'].append(url) columns['size'] = u.Quantity(columns['size'], u.Gbyte) tbl = Table([Column(name=k, data=v) for k,v in iteritems(columns)]) return tbl
def make_argparser(): """ Most of the real work is handled by the subcommands in the commands subpackage. """ def help(args): parser.print_help() return 0 parser = argparse.ArgumentParser( "asdftool", description="Commandline utilities for managing ASDF files.") parser.add_argument( "--verbose", "-v", action="store_true", help="Increase verbosity") subparsers = parser.add_subparsers( title='subcommands', description='valid subcommands') help_parser = subparsers.add_parser( str("help"), help="Display usage information") help_parser.set_defaults(func=help) commands = dict((x.__name__, x) for x in util.iter_subclasses(Command)) for command in command_order: commands[str(command)].setup_arguments(subparsers) del commands[command] for name, command in sorted(six.iteritems(commands)): command.setup_arguments(subparsers) return parser, subparsers
def read_snana_ascii_multi(fnames, default_tablename=None): """Like ``read_snana_ascii()``, but read from multiple files containing the same tables and glue results together into big tables. Parameters ---------- fnames : list of str List of filenames. Returns ------- tables : dictionary of `~astropy.table.Table` Tables indexed by table names. Examples -------- >>> tables = read_snana_ascii_multi(['data1.txt', 'data1.txt']) """ alltables = {} for fname in fnames: meta, tables = read_snana_ascii(fname, default_tablename=default_tablename) for key, table in six.iteritems(tables): if key in alltables: alltables[key].append(table) else: alltables[key] = [table] for key in alltables.keys(): alltables[key] = vstack(alltables[key]) return alltables
def get_loaders_metadata(data_class): """Return the metadata of all registered loaders for a given class. Parameters ---------- data_class : classobj Returns ------- loadermeta : list of dict Each item in the list is a dictionary containing a 'name' keyword, a 'version' keyword (if applicable), and the metadata keywords for the given loader. """ loaders_metadata = [] for lkey, loader in six.iteritems(_loaders): if lkey[0] is not data_class: continue m = {'name': lkey[1]} if len(lkey) > 2: m['version'] = lkey[2] m.update(loader[2]) loaders_metadata.append(m) return loaders_metadata
def _json_summary_to_table(self, data, base_url): """ """ columns = {'uid':[], 'URL':[], 'size':[]} for entry in data['node_data']: is_file = (entry['de_type'] == 'MOUS' or (entry['file_name'] != 'null' and entry['file_key'] != 'null')) if is_file: # "de_name": "ALMA+uid://A001/X122/X35e", columns['uid'].append(entry['de_name'][5:]) columns['size'].append((int(entry['file_size'])*u.B).to(u.Gbyte)) # example template for constructing url: # https://almascience.eso.org/dataPortal/requests/keflavich/940238268/ALMA/ # uid___A002_X9d6f4c_X154/2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar # above is WRONG... except for ASDMs, when it's right # should be: # 2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar/2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar # # apparently ASDMs are different from others: # templates: # https://almascience.eso.org/dataPortal/requests/keflavich/946895898/ALMA/ # 2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar/2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar # uid___A002_X9ee74a_X26f0/2013.1.00308.S_uid___A002_X9ee74a_X26f0.asdm.sdm.tar url = os.path.join(base_url, entry['file_key'], entry['file_name'], ) columns['URL'].append(url) columns['size'] = u.Quantity(columns['size'], u.Gbyte) tbl = Table([Column(name=k, data=v) for k,v in iteritems(columns)]) return tbl
def action(self): self.progress_bar.visible = True self.progress_bar.value = 1.0 self.progress_bar.layout.visbility = 'visible' self.progress_bar.layout.display = 'flex' # Refresh image collection in case files were added after widget was # created. self.image_source.refresh() groups_to_combine = self._group_by.groups(self.apply_to) n_groups = len(groups_to_combine) for idx, combo_group in enumerate(groups_to_combine): self.progress_bar.description = \ ("Processing {} of {} " "(may take several minutes)".format(idx + 1, n_groups)) combined = self._action_for_one_group(combo_group) name_addons = ['_'.join([str(k), str(v)]) for k, v in six.iteritems(combo_group)] fname = [self._file_base_name] fname.extend(name_addons) fname = '_'.join(fname) + '.fit' dest_path = os.path.join(self.destination, fname) combined.write(dest_path) self._combined = combined self.progress_bar.visible = False self.progress_bar.layout.display = 'none'
def validate_fill_default(validator, properties, instance, schema): if not validator.is_type(instance, 'object'): return for property, subschema in six.iteritems(properties): if "default" in subschema: instance.setdefault(property, subschema["default"])
def _get_default_form_values(self, form): """Return the already selected values of a given form (a BeautifulSoup form node) as a dict. """ res = defaultdict(list) for elem in form.find_all(['input', 'select']): key = elem.get('name') value = None # ignore the submit and reset buttons if elem.get('type') in ['submit', 'reset']: continue # check boxes: enabled boxes have the value "on" if not specified # otherwise. Found out by debugging, perhaps not documented. if (elem.get('type') == 'checkbox' and elem.get('checked') in ["", "checked"]): value = elem.get('value', 'on') # radio buttons and simple input fields if elem.get('type') == 'radio' and\ elem.get('checked') in ["", "checked"] or\ elem.get('type') in [None, 'text']: value = elem.get('value') # dropdown menu, multi-section possible if elem.name == 'select': for option in elem.find_all('option'): if option.get('selected') == '': value = option.get('value', option.text.strip()) if value and value not in [None, u'None', u'null']: res[key].append(value) # avoid values with size 1 lists d = dict(res) for k, v in six.iteritems(d): if len(v) == 1: d[k] = v[0] return d
def make_argparser(): """ Most of the real work is handled by the subcommands in the commands subpackage. """ def help(args): parser.print_help() return 0 parser = argparse.ArgumentParser( "asdftool", description="Commandline utilities for managing ASDF files.") parser.add_argument("--verbose", "-v", action="store_true", help="Increase verbosity") subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands') help_parser = subparsers.add_parser(str("help"), help="Display usage information") help_parser.set_defaults(func=help) commands = dict((x.__name__, x) for x in util.iter_subclasses(Command)) for command in command_order: commands[str(command)].setup_arguments(subparsers) del commands[command] for name, command in sorted(six.iteritems(commands)): command.setup_arguments(subparsers) return parser, subparsers
def _vo_service_request(url, pedantic, kwargs, cache=True, verbose=False): """ This is called by :func:`call_vo_service`. Raises ------ InvalidAccessURL Invalid access URL. """ if len(kwargs) and not url.endswith(('?', '&')): raise InvalidAccessURL("url should already end with '?' or '&'") query = [] for key, value in six.iteritems(kwargs): query.append('{0}={1}'.format(urllib.parse.quote(key), urllib.parse.quote_plus(str(value)))) parsed_url = url + '&'.join(query) with get_readable_fileobj(parsed_url, encoding='binary', cache=cache, show_progress=verbose) as req: tab = table.parse(req, filename=parsed_url, pedantic=pedantic) return vo_tab_parse(tab, url, kwargs)
def clear_all_connections(self, debug=False): """ Prevent overlapping interactive sessions """ # this is really ugly, but needs to be done in order to prevent multiple overlapping calls... cids_to_remove = [] if not hasattr(self.Spectrum.plotter.figure,'canvas'): # just quit out; saves a tab... if debug or self._debug: print("Didn't find a canvas, quitting.") return for eventtype in ('button_press_event','key_press_event'): for key,val in iteritems(self.Spectrum.plotter.figure.canvas.callbacks.callbacks[eventtype]): if "event_manager" in val.func.__name__: cids_to_remove.append(key) if debug or self._debug: print("Removing CID #%i with attached function %s" % (key,val.func.__name__)) for cid in cids_to_remove: self.Spectrum.plotter.figure.canvas.mpl_disconnect(cid) self.Spectrum.plotter._reconnect_matplotlib_keys() # Click counters - should always be reset! self.nclicks_b1 = 0 # button 1 self.nclicks_b2 = 0 # button 2 self.Spectrum.plotter._active_gui = None
def _submit_form(self, input=None): """Fill out the form of the SkyView site and submit it with the values given in `input` (a dictionary where the keys are the form element's names and the values are their respective values). """ if input is None: input = {} response = self._request("GET", url=self.FORM_URL, data={}, timeout=self.TIMEOUT) bs = BeautifulSoup(response.text) form = bs.find('form') # cache the default values to save HTTP traffic if self._default_form_values is None: self._default_form_values = self._get_default_form_values(form) # only overwrite payload's values if the `input` value is not None # to avoid overwriting of the form's default values payload = self._default_form_values.copy() for k, v in six.iteritems(input): if v is not None: payload[k] = v url = urlparse.urljoin(self.FORM_URL, form.get('action')) response = self._request("POST", url=url, data=payload, timeout=self.TIMEOUT) return response
def BigSpectrum_to_H2COdict(sp, vrange=None): """ A rather complicated way to make the spdicts above given a spectrum... """ spdict = {} for linename, freq in iteritems( spectrum.models.formaldehyde.central_freq_dict): if vrange is not None: freq_test_low = freq - freq * vrange[0] / units.speedoflight_kms freq_test_high = freq - freq * vrange[1] / units.speedoflight_kms else: freq_test_low = freq_test_high = freq if (sp.xarr.as_unit('Hz').in_range(freq_test_low * u.Hz) or sp.xarr.as_unit('Hz').in_range(freq_test_high * u.Hz)): spdict[linename] = sp.copy(deep=True) spdict[linename].xarr.convert_to_unit('GHz') spdict[linename].xarr.refX = freq spdict[linename].xarr.refX_unit = 'Hz' #spdict[linename].baseline = copy.copy(sp.baseline) #spdict[linename].baseline.Spectrum = spdict[linename] spdict[linename].specfit = sp.specfit.copy(parent=spdict[linename]) spdict[linename].xarr.convert_to_unit('km/s') if vrange is not None: try: spdict[linename].crop(*vrange, unit='km/s') except IndexError: # if the freq in range, but there's no data in range, remove spdict.pop(linename) return spdict
def get_extensions(): wcslib_files = [ # List of wcslib files to compile 'prj.c', 'wcserr.c', 'wcsprintf.c', 'wcsutil.c' ] wcslib_config_paths = [ join(MODELING_SRC, 'wcsconfig.h') ] cfg = setup_helpers.DistutilsExtensionArgs() wcs_setup_package.get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths) cfg['include_dirs'].append(MODELING_SRC) astropy_files = [ # List of astropy.modeling files to compile 'projections.c' ] cfg['sources'].extend(join(MODELING_SRC, x) for x in astropy_files) cfg['sources'] = [str(x) for x in cfg['sources']] cfg = dict((str(key), val) for key, val in six.iteritems(cfg)) return [Extension(str('astropy.modeling._projections'), **cfg)]
def clear_all_connections(self, debug=False): """ Prevent overlapping interactive sessions """ # this is really ugly, but needs to be done in order to prevent multiple overlapping calls... cids_to_remove = [] if not hasattr(self.Spectrum.plotter.figure, 'canvas'): # just quit out; saves a tab... if debug or self._debug: print("Didn't find a canvas, quitting.") return for eventtype in ('button_press_event', 'key_press_event'): for key, val in iteritems(self.Spectrum.plotter.figure.canvas. callbacks.callbacks[eventtype]): if "event_manager" in val.func.__name__: cids_to_remove.append(key) if debug or self._debug: print("Removing CID #%i with attached function %s" % (key, val.func.__name__)) for cid in cids_to_remove: self.Spectrum.plotter.figure.canvas.mpl_disconnect(cid) self.Spectrum.plotter._reconnect_matplotlib_keys() # Click counters - should always be reset! self.nclicks_b1 = 0 # button 1 self.nclicks_b2 = 0 # button 2 self.Spectrum.plotter._active_gui = None
def _write_salt2(f, data, meta, **kwargs): raw = kwargs.get('raw', False) pedantic = kwargs.get('pedantic', True) if meta is not None: for key, val in six.iteritems(meta): if not raw: key = key.upper() key = KEY_TO_SALT2KEY_META.get(key, key) f.write('@{0} {1}\n'.format(key, str(val))) keys = data.dtype.names length = len(data) # Write column names keys_as_written = [] for key in keys: if not raw: key = key.capitalize() key = KEY_TO_SALT2KEY_COLUMN.get(key, key) f.write('#{0} :\n'.format(key)) keys_as_written.append(key) f.write('#end :\n') # Check that necessary fields exist if pedantic: if not ('Filter' in keys_as_written and 'MagSys' in keys_as_written): raise ValueError('photometry data missing required some fields ' ': Filter, MagSys') # Write the data itself for i in range(length): f.write(' '.join([str(data[key][i]) for key in keys])) f.write('\n')
def get_extensions(): generate_c_docstrings() ###################################################################### # DISTUTILS SETUP cfg = setup_helpers.DistutilsExtensionArgs() wcslib_files = [ # List of wcslib files to compile 'flexed/wcsbth.c', 'flexed/wcspih.c', 'flexed/wcsulex.c', 'flexed/wcsutrn.c', 'cel.c', 'dis.c', 'lin.c', 'log.c', 'prj.c', 'spc.c', 'sph.c', 'spx.c', 'tab.c', 'wcs.c', 'wcserr.c', 'wcsfix.c', 'wcshdr.c', 'wcsprintf.c', 'wcsunits.c', 'wcsutil.c' ] wcslib_config_paths = [ join(WCSROOT, 'include', 'astropy_wcs', 'wcsconfig.h'), join(WCSROOT, 'include', 'wcsconfig.h') ] get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths) cfg['include_dirs'].append(join(WCSROOT, "include")) astropy_wcs_files = [ # List of astropy.wcs files to compile 'distortion.c', 'distortion_wrap.c', 'docstrings.c', 'pipeline.c', 'pyutil.c', 'astropy_wcs.c', 'astropy_wcs_api.c', 'sip.c', 'sip_wrap.c', 'str_list_proxy.c', 'unit_list_proxy.c', 'util.c', 'wcslib_wrap.c', 'wcslib_tabprm_wrap.c'] cfg['sources'].extend(join(WCSROOT, 'src', x) for x in astropy_wcs_files) cfg['sources'] = [str(x) for x in cfg['sources']] cfg = dict((str(key), val) for key, val in six.iteritems(cfg)) return [Extension(str('astropy.wcs._wcs'), **cfg)]
def fc_construct_acceptance_intervals(distribution_dict, bins, alpha): r"""Convenience function that calculates the PDF for the user. For more information see :ref:`documentation <feldman_cousins>`. Parameters ---------- distribution_dict : dict Keys are mu values and value is an array-like list of x values bins : array-like The bins the x distribution will have alpha : float Desired confidence level Returns ------- acceptance_intervals : ndarray Acceptance intervals (1 means inside, 0 means outside) """ distributions_scaled = [] # Histogram gets rid of the last bin, so add one extra bin_width = bins[1] - bins[0] new_bins = np.concatenate((bins, np.array([bins[-1] + bin_width])), axis=0) # Histogram and normalise each distribution so it is a real PDF for mu, distribution in iter(sorted(iteritems(distribution_dict))): entries = np.histogram(distribution, bins=new_bins)[0] integral = float(sum(entries)) distributions_scaled.append(entries / integral) acceptance_intervals = fc_construct_acceptance_intervals_pdfs(distributions_scaled, alpha) return acceptance_intervals
def BigSpectrum_to_H2COdict(sp, vrange=None): """ A rather complicated way to make the spdicts above given a spectrum... """ spdict = {} for linename,freq in iteritems(spectrum.models.formaldehyde.central_freq_dict): if vrange is not None: freq_test_low = freq - freq * vrange[0]/units.speedoflight_kms freq_test_high = freq - freq * vrange[1]/units.speedoflight_kms else: freq_test_low = freq_test_high = freq if (sp.xarr.as_unit('Hz').in_range(freq_test_low*u.Hz) or sp.xarr.as_unit('Hz').in_range(freq_test_high*u.Hz)): spdict[linename] = sp.copy(deep=True) spdict[linename].xarr.convert_to_unit('GHz') spdict[linename].xarr.refX = freq spdict[linename].xarr.refX_unit = 'Hz' #spdict[linename].baseline = copy.copy(sp.baseline) #spdict[linename].baseline.Spectrum = spdict[linename] spdict[linename].specfit = sp.specfit.copy(parent=spdict[linename]) spdict[linename].xarr.convert_to_unit('km/s') if vrange is not None: try: spdict[linename].crop(*vrange, unit='km/s') except IndexError: # if the freq in range, but there's no data in range, remove spdict.pop(linename) return spdict
def action(self): self.progress_bar.visible = True self.progress_bar.value = 1.0 self.progress_bar.layout.visbility = 'visible' self.progress_bar.layout.display = 'flex' # Refresh image collection in case files were added after widget was # created. self.image_source.refresh() groups_to_combine = self._group_by.groups(self.apply_to) n_groups = len(groups_to_combine) for idx, combo_group in enumerate(groups_to_combine): self.progress_bar.description = \ ("Processing {} of {} " "(may take several minutes)".format(idx + 1, n_groups)) combined = self._action_for_one_group(combo_group) name_addons = [ '_'.join([str(k), str(v)]) for k, v in six.iteritems(combo_group) ] fname = [self._file_base_name] fname.extend(name_addons) fname = '_'.join(fname) + '.fit' dest_path = os.path.join(self.destination, fname) combined.write(dest_path) self._combined = combined self.progress_bar.visible = False self.progress_bar.layout.display = 'none'
def _set_column_name_case_to_match_keywords(self, header_keys, summary_table): key_name_dict = {k.lower(): k for k in header_keys if k != k.lower()} for lcase, user_case in six.iteritems(key_name_dict): try: summary_table.rename_column(lcase, user_case) except KeyError: pass
def from_custom_type(cls, custom_type): try: return cls._type_by_cls[custom_type] except KeyError: for key, val in six.iteritems(cls._type_by_cls): if issubclass(custom_type, key): return val return None
def read_lmv_tofits(fn): from astropy.io import fits data,header = read_lmv(fn) cards = [fits.header.Card(k,v) for k,v in iteritems(header)] Header = fits.Header(cards) hdu = fits.PrimaryHDU(data=data, header=Header) return hdu
def recurse(a, b): if isinstance(b, dict): if not isinstance(a, dict): return copy.deepcopy(b) for key, val in six.iteritems(b): a[key] = recurse(a.get(key), val) return a return copy.deepcopy(b)
def validate_remove_default(validator, properties, instance, schema): if not validator.is_type(instance, 'object'): return for property, subschema in six.iteritems(properties): if "default" in subschema: if instance.get(property, None) == subschema["default"]: del instance[property]
def read_lmv_tofits(fn): from astropy.io import fits data, header = read_lmv(fn) cards = [fits.header.Card(k, v) for k, v in iteritems(header)] Header = fits.Header(cards) hdu = fits.PrimaryHDU(data=data, header=Header) return hdu
def find_lines(xarr): """ Given a :class:`pyspeckit.units.SpectrosopicAxis` instance, finds all the lines that are in bounds. Returns a list of line names. """ return [ linename for (linename, lam) in iteritems(wavelength) if xarr.as_unit('micron').in_range(lam) ]
def _str_index(self): idx = self['index'] out = [] out += ['.. index:: %s' % idx.get('default','')] for section, references in iteritems(idx): if section == 'default': continue out += [' :%s: %s' % (section, ', '.join(references))] return out
def _str_index(self): idx = self['index'] out = [] out += ['.. index:: %s' % idx.get('default', '')] for section, references in iteritems(idx): if section == 'default': continue out += [' :%s: %s' % (section, ', '.join(references))] return out
def _walk_dict_string_keys(self, d, parent=None): if parent is None: parent = [] for key, val in six.iteritems(d): new_parent = list(parent) + [str(key)] if isinstance(val, dict): for d in self._walk_dict_string_keys(val, parent=new_parent): yield d else: yield new_parent, val
def add_constraints(self, new_constraints): """Add a set of constraints to the current constraints.""" try: constraints = self.constraints except AttributeError: constraints = {} self.constraints = constraints for constraint, value in six.iteritems(new_constraints): constraints[constraint] = constraints.get(constraint, value)
def make_filter_exp(self, hdu, filter_dict, extra_name=None): add_to_name = extra_name or '' hdr = hdu.header make_base = lambda hdr, band: \ hdr['imagetyp'] + add_to_name + band + str(hdr['exptime']) for band, num_or_exp_dict in six.iteritems(filter_dict): hdr['filter'] = band if isinstance(num_or_exp_dict, dict): for exp, number in six.iteritems(num_or_exp_dict): hdr['exptime'] = exp base = make_base(hdr, band) print(base) self.write_names(hdu, [base + str(i) + '.fit' for i in range(number)]) else: hdr['exptime'] = 17.0 base = make_base(hdr, band) print(base) print(hdr['imagetyp']) self.write_names(hdu, [base + str(i) + '.fit' for i in range(num_or_exp_dict)])
def _write_snana(f, data, meta, **kwargs): raw = kwargs.get("raw", False) pedantic = kwargs.get("pedantic", True) # Write metadata keys_as_written = [] if meta is not None: for key, val in six.iteritems(meta): if not raw: key = key.upper() key = KEY_TO_SNANAKEY_META.get(key, key) f.write("{0}: {1}\n".format(key, str(val))) keys_as_written.append(key) # Check that necessary metadata was written if pedantic: for key in SNANA_REQUIRED_META: if key not in keys_as_written: raise ValueError("Missing required metadata kw: " + key) # Get column names and data length keys = data.dtype.names length = len(data) # Convert column names keys_to_write = [] for key in keys: if not raw: key = key.upper() key = KEY_TO_SNANAKEY_COLUMN.get(key, key) keys_to_write.append(key) # Check that necessary column names are included if pedantic: for key in SNANA_REQUIRED_COLUMN: if key not in keys_to_write: raise ValueError("Missing required column name: " + key) # Write the header f.write( "\n" "# ==========================================\n" "# TERSE LIGHT CURVE OUTPUT:\n" "#\n" "NOBS: {0:d}\n" "NVAR: {1:d}\n" "VARLIST: {2}\n".format(length, len(keys), " ".join(keys_to_write)) ) # Write data for i in range(length): f.write("OBS: ") f.write(" ".join([str(data[key][i]) for key in keys])) f.write("\n")
def _pack_params(p): params = p.copy() for key, item in six.iteritems(p): if hasattr(item, 'unit'): params[key] = item.value params[key + '_unit'] = str(item.unit) if hasattr(params[key], 'tolist'): # convert array to list params[key] = params[key].tolist() return params
def _set_titles(self): """ Set titles for accordions. This should apparently be done *before* the widget is displayed. """ for name, obj in six.iteritems(self._gui_objects): if isinstance(obj, Accordion): for idx, child in enumerate(obj.children): if not isinstance(child, widgets.Select): obj.set_title(idx, child.description)
def recurse(tree, path=[]): if isinstance(tree, dict): for key, val in six.iteritems(tree): for x in recurse(val, path + [key]): yield x elif isinstance(tree, (list, tuple)): for i, val in enumerate(tree): for x in recurse(val, path + [i]): yield x elif tree is not None: yield (str('.'.join(six.text_type(x) for x in path)), tree)
def recurse(tree, path=[]): if isinstance(tree, dict): for key, val in six.iteritems(tree): for x in recurse(val, path + [key]): yield x elif isinstance(tree, (list, tuple)): for i, val in enumerate(tree): for x in recurse(val, path + [i]): yield x elif tree is not None: yield ('.'.join(six.text_type(x) for x in path), tree)
def _write_snana(f, data, meta, **kwargs): raw = kwargs.get('raw', False) pedantic = kwargs.get('pedantic', True) # Write metadata keys_as_written = [] if meta is not None: for key, val in six.iteritems(meta): if not raw: key = key.upper() key = KEY_TO_SNANAKEY_META.get(key, key) f.write('{0}: {1}\n'.format(key, str(val))) keys_as_written.append(key) # Check that necessary metadata was written if pedantic: for key in SNANA_REQUIRED_META: if key not in keys_as_written: raise ValueError('Missing required metadata kw: ' + key) # Get column names and data length keys = data.dtype.names length = len(data) # Convert column names keys_to_write = [] for key in keys: if not raw: key = key.upper() key = KEY_TO_SNANAKEY_COLUMN.get(key, key) keys_to_write.append(key) # Check that necessary column names are included if pedantic: for key in SNANA_REQUIRED_COLUMN: if key not in keys_to_write: raise ValueError('Missing required column name: ' + key) # Write the header f.write('\n' '# ==========================================\n' '# TERSE LIGHT CURVE OUTPUT:\n' '#\n' 'NOBS: {0:d}\n' 'NVAR: {1:d}\n' 'VARLIST: {2}\n'.format(length, len(keys), ' '.join(keys_to_write))) # Write data for i in range(length): f.write('OBS: ') f.write(' '.join([str(data[key][i]) for key in keys])) f.write('\n')
def _insert_in_metadata(metadata, arg): if isinstance(arg, basestring): # add the key, no value metadata[arg] = None elif isinstance(arg, ccdproc.ccdproc.Keyword): metadata[arg.name] = arg.value else: try: for k, v in six.iteritems(arg): metadata[k] = v except AttributeError: raise
def _save_extra_fits(hdulist, tree): # Handle _extra_fits for hdu_name, parts in six.iteritems(tree.get('extra_fits', {})): hdu_name = fits_hdu_name(hdu_name) if 'data' in parts: hdu = _make_new_hdu(hdulist, parts['data'], hdu_name) if 'header' in parts: hdu = _get_or_make_hdu(hdulist, hdu_name) for key, val, comment in parts['header']: if _is_builtin_fits_keyword(key): continue hdu.header.append((key, val, comment), end=True)