def test_warnings(self): with warnings.catch_warnings(): logging.captureWarnings(True) try: warnings.filterwarnings("always", category=UserWarning) file = io.StringIO() h = logging.StreamHandler(file) logger = logging.getLogger("py.warnings") logger.addHandler(h) warnings.warn("I'm warning you...") logger.removeHandler(h) s = file.getvalue() h.close() self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0) #See if an explicit file uses the original implementation file = io.StringIO() warnings.showwarning("Explicit", UserWarning, "dummy.py", 42, file, "Dummy line") s = file.getvalue() file.close() self.assertEqual(s, "dummy.py:42: UserWarning: Explicit\n Dummy line\n") finally: logging.captureWarnings(False)
def __init__(self, pin=None, pull_up=False): if pin in (2, 3) and not pull_up: raise InputDeviceError( 'GPIO pins 2 and 3 are fitted with physical pull up ' 'resistors; you cannot initialize them with pull_up=False' ) # _pull_up should be assigned first as __repr__ relies upon it to # support the case where __repr__ is called during debugging of an # instance that has failed to initialize (due to an exception in the # super-class __init__) self._pull_up = pull_up super(InputDevice, self).__init__(pin) self._active_edge = GPIO.FALLING if pull_up else GPIO.RISING self._inactive_edge = GPIO.RISING if pull_up else GPIO.FALLING self._active_state = GPIO.LOW if pull_up else GPIO.HIGH self._inactive_state = GPIO.HIGH if pull_up else GPIO.LOW pull = GPIO.PUD_UP if pull_up else GPIO.PUD_DOWN try: # NOTE: catch_warnings isn't thread-safe but hopefully no-one's # messing around with GPIO init within background threads... with warnings.catch_warnings(record=True) as w: GPIO.setup(pin, GPIO.IN, pull) # The only warning we want to squash is a RuntimeWarning that is # thrown when setting pins 2 or 3. Anything else should be replayed for warning in w: if warning.category != RuntimeWarning or pin not in (2, 3): warnings.showwarning( warning.message, warning.category, warning.filename, warning.lineno, warning.file, warning.line ) except: self.close() raise
def sigma_envelopes(self, steps = 1): """ Envelopes and twiss-functions from sigma-matrix mathod """ # initials bx,ax,gx,epsx = PARAMS['twiss_x_i']() by,ay,gy,epsy = PARAMS['twiss_y_i']() bz,az,gz,epsz = PARAMS['twiss_z_i']() twv0 = NP.array([bx,ax,gx,by,ay,gy,bz,az,gz]) # twiss vector IN lattice sg0 = Sigma(twv0,epsx,epsy,epsz) # sigma object IN lattice # sigma envelopes as function of distance s sigma_fun = Functions(('s','bx','ax','gax','by','ay','gy','bz','az','gz')) for node in iter(self): # loop nodes # sigma-matrices for a single node sigmas = node.sigma_beam(steps = steps, sg = sg0) # prep plot list of ftn's for sg,s in sigmas: v = sg.twiss() # twiss from Sigma object flist = v.tolist() sigma_fun.append(s,tuple(flist)) sg0 = sg # loop back nodes # aperture check if FLAGS['useaper']: nbsigma = PARAMS['nbsigma'] if node.aperture != None: aperture = node.aperture sigx, sigxp, sigy, sigyp = node['sigxy'] si,sm,sf = node.position if(aperture < nbsigma*sigx or aperture < nbsigma*sigy): warnings.showwarning( '{} sigma aperture hit @ s={:.1f} [m]'.format(nbsigma,sm), UserWarning,'lattice.py', 'sigma_functions()') return sigma_fun
def __init__(self, pin=None, active_high=True, initial_value=False): self._active_high = active_high super(OutputDevice, self).__init__(pin) self._active_state = GPIO.HIGH if active_high else GPIO.LOW self._inactive_state = GPIO.LOW if active_high else GPIO.HIGH try: # NOTE: catch_warnings isn't thread-safe but hopefully no-one's # messing around with GPIO init within background threads... with warnings.catch_warnings(record=True) as w: # This is horrid, but we can't specify initial=None with setup if initial_value is None: GPIO.setup(pin, GPIO.OUT) else: GPIO.setup(pin, GPIO.OUT, initial= [self._inactive_state, self._active_state][bool(initial_value)]) GPIO.setup(pin, GPIO.OUT) # The only warning we want to squash is a RuntimeWarning that is # thrown when setting pins 2 or 3. Anything else should be replayed for warning in w: if warning.category != RuntimeWarning or pin not in (2, 3): warnings.showwarning( warning.message, warning.category, warning.filename, warning.lineno, warning.file, warning.line ) except: self.close() raise
def test2(input_file): print('---------------------------------TEST2') with open(input_file,'r') as fileobject: try: indat = yaml.load(fileobject) except Exception as ex: warnings.showwarning( 'InputError: {} - STOP'.format(str(ex)), UserWarning, 'lattice_generator.py', 'factory()', ) sys.exit(1) fileobject.close() ky = indat.keys() for k in ky: print() print(k) klist = indat[k] print(klist) if not klist: continue nlist = flatten(klist) if k == 'LATTICE': N = nlist[0] plist = nlist[1:] qlist = plist.copy() for i in range(N-1): qlist += plist nlist=qlist print(nlist)
def Normal(**kwargs): ''' Devuelve la instancia de Union de tipo JOIN parametros => diccionario de tipo {'alias':ClaseOTD} ''' import warnings #advertencia de componente deprecado warnings.showwarning('Union.Normal(): deprecado, recurrir directamente al constructor de la clase!' , DeprecationWarning , __file__ , 0 ) # si el parametro NO es un diccionario if type(kwargs) is not dict: raise Exception(Union.NOMBRE_CLASE + ".Normal: El parametro se debe pasar " + "{'alias':ClaseOTD}!" ) return Union(cTipo = Union.NORMAL, **kwargs)
def new_func(*args, **kwargs): warnings.showwarning( "Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, filename=func.__code__.co_filename, lineno=func.__code__.co_firstlineno + 1 ) return func(*args, **kwargs)
def ignore_deprecation_warnings(): with warnings.catch_warnings(record=True) as warning_list: # catch all warnings yield # rethrow all warnings that were not DeprecationWarnings for w in warning_list: if not issubclass(w.category, DeprecationWarning): warnings.showwarning(message=w.message, category=w.category, filename=w.filename, lineno=w.lineno, file=w.file, line=w.line)
def _handle_caught_warnings(caught_warnings, filename): logger = logging.getLogger(__name__) for warning in caught_warnings: if warning.category == PILImage.DecompressionBombWarning: logger.info('PILImage reported a possible DecompressionBomb' ' for file {}'.format(filename)) else: warnings.showwarning(warning.message, warning.category, warning.filename, warning.lineno)
def _read_config(rcfile): """Read configuration information from a file""" global config defaults = {'enable_deprecation_warning': str(True), 'ncpus': str(multiprocessing.cpu_count()), 'qindenton_url': 'http://virbo.org/ftp/QinDenton/hour/merged/latest/WGhour-latest.d.zip', 'omni2_url': 'http://virbo.org/ftp/OMNI/OMNI2/merged/latest/OMNI_OMNI2-latest.cdf.zip', 'leapsec_url': 'http://maia.usno.navy.mil/ser7/tai-utc.dat', 'psddata_url': 'http://spacepy.lanl.gov/repository/psd_dat.sqlite', 'support_notice': str(True), 'apply_plot_styles': str(True), } #Functions to cast a config value; if not specified, value is a string str2bool = lambda x: x.lower() in ('1', 'yes', 'true', 'on') caster = {'enable_deprecation_warning': str2bool, 'ncpus': int, 'support_notice': str2bool, 'apply_plot_styles': str2bool, } #SafeConfigParser deprecated in 3.2. And this is hideous, but... if hasattr(ConfigParser, 'SafeConfigParser'): cp_class = ConfigParser.SafeConfigParser with warnings.catch_warnings(record=True) as w: warnings.filterwarnings( 'always', 'The SafeConfigParser class has been renamed.*', DeprecationWarning, '^spacepy$') #configparser lies about source of warnings ConfigParser.SafeConfigParser() for this_w in w: if isinstance(this_w.message, DeprecationWarning): cp_class = ConfigParser.ConfigParser else: warnings.showwarning(this_w.message, this_w.category, this_w.filename, this_w.lineno, this_w.file, this_w.line) else: cp_class = ConfigParser.ConfigParser cp = cp_class(defaults) try: successful = cp.read([rcfile]) except ConfigParser.Error: successful = [] if successful: #New file structure config = dict(cp.items('spacepy')) else: #old file structure, wipe it out cp = cp_class() cp.add_section('spacepy') with open(rcfile, 'w') as cf: cp.write(cf) for k in defaults: if not k in config: config[k] = defaults[k] _write_defaults(rcfile, defaults) for k in caster: config[k] = caster[k](config[k])
def ready(self): """ Performs an DB engine check, as we maintain some engine specific queries. """ if (connection.vendor not in settings.DSMR_SUPPORTED_DB_VENDORS): # Temporary for backwards compatibility warnings.showwarning( _( 'Unsupported database engine "{}" active, ' 'some features might not work properly'.format(connection.vendor) ), RuntimeWarning, __file__, 0 )
def get_mandatory(attributes,key,item): try: res = attributes[key] except KeyError: warnings.showwarning( 'InputError: Mandatory attribute "{}" missing for element "{}" - STOP'.format(key,item), UserWarning, 'lattice_generator.py', 'get_mandatory()', ) sys.exit(1) return res
def _read_image(file_path): with warnings.catch_warnings(record=True) as caught_warnings: im = PILImage.open(file_path) for warning in caught_warnings: if warning.category == PILImage.DecompressionBombWarning: logger = logging.getLogger(__name__) logger.info('PILImage reported a possible DecompressionBomb' ' for file {}'.format(file_path)) else: warnings.showwarning(warning.message, warning.category, warning.filename, warning.lineno) return im
def __init__(self, pin=None): super(OutputDevice, self).__init__(pin) # NOTE: catch_warnings isn't thread-safe but hopefully no-one's messing # around with GPIO init within background threads... with warnings.catch_warnings(record=True) as w: GPIO.setup(pin, GPIO.OUT) # The only warning we want to squash is a RuntimeWarning that is thrown # when setting pins 2 or 3. Anything else should be replayed for warning in w: if warning.category != RuntimeWarning or pin not in (2, 3): warnings.showwarning( warning.message, warning.category, warning.filename, warning.lineno, warning.file, warning.line )
def __init__(self, pin=None, active_high=True): self._active_high = active_high super(OutputDevice, self).__init__(pin) self._active_state = GPIO.HIGH if active_high else GPIO.LOW self._inactive_state = GPIO.LOW if active_high else GPIO.HIGH try: # NOTE: catch_warnings isn't thread-safe but hopefully no-one's # messing around with GPIO init within background threads... with warnings.catch_warnings(record=True) as w: GPIO.setup(pin, GPIO.OUT) # The only warning we want to squash is a RuntimeWarning that is # thrown when setting pins 2 or 3. Anything else should be replayed for warning in w: if warning.category != RuntimeWarning or pin not in (2, 3): warnings.showwarning(warning.message, warning.category, warning.filename, warning.lineno, warning.file, warning.line) except: self.close() raise
def test_import_error_in_warning_logging(): """ Regression test for https://github.com/astropy/astropy/issues/2671 This test actually puts a goofy fake module into ``sys.modules`` to test this problem. """ class FakeModule(object): def __getattr__(self, attr): raise ImportError('_showwarning should ignore any exceptions ' 'here') log.enable_warnings_logging() sys.modules['<test fake module>'] = FakeModule() try: warnings.showwarning(AstropyWarning('Regression test for #2671'), AstropyWarning, '<this is only a test>', 1) finally: del sys.modules['<test fake module>']
def transform(self, samples): components = [ 'C' + str.zfill(x, 2) for x in np.arange(self.components).astype('str') ] timenames = select.getsamplingnames(samples) subcolix = pd.MultiIndex.from_product([components, timenames], names=['component', 'sample']) ica_samples = pd.DataFrame(index=samples.index, columns=subcolix, dtype=samples.values.dtype) chanlen = len(select.getchannelnames(samples)) with catch_warnings(record=True) as w: simplefilter('ignore', UserWarning) for index, sample in samples.iterrows(): self.update_status(len(samples)) data = sample.reshape(chanlen, -1) ica = FastICA(max_iter=self.maxiter, n_components=self.components) ica.fit(data) # FIXME: # how do we get the same ordering (EEGlab is sorting by # "mean projected variance") # also we need to check the correct sign of the components # this is the point where I realized ICA might not be useful at all for compindex, component in enumerate(components): ica_samples.loc[index].loc[ component].values[:] = ica.components_[compindex] if w: showwarning("recorded {0} warnings (non-convergence)".format( len(w))) return ica_samples.astype('float')
def iv__sim900__sweep_current__read_voltage(current_range, current_step, bias_resistance, measurement_delay): if np.abs(current_applied_vec[0]) >= 0.01 or np.abs( current_applied_vec[-1]) >= 0.01: warnings.showwarning( 'The SIM928 can only output in the range [-10,10]mA. You have attempted to exceed this range.', UserWarning, os.path.basename(__file__), inspect.currentframe().f_back.f_lineno) from instruments.srs_sim970 import SIM970 from instruments.srs_sim928 import SIM928 voltage_meter = SIM970( 'GPIB0::2', 7) #voltmeter = SIM970(visa_name = 'GPIB0::2', sim900port = 2) voltage_source = SIM928('GPIB0::2', 5) current_applied_vec = np.arange(current_range[0], current_range[1] + current_step, current_step) #SIM928 max voltage = [-20,20]V voltage_applied_vec = current_applied_vec * bias_resistance if np.abs(voltage_applied_vec[0]) >= 20 or np.abs( voltage_applied_vec[-1]) >= 20: warnings.showwarning( 'The SIM928 can only output in the range [-20,20]V. You have attempted to exceed this range.', UserWarning, os.path.basename(__file__), inspect.currentframe().f_back.f_lineno) voltage_read_vec = [] voltage_source.reset() voltage_source.set_output(True) for ii in range(len(voltage_applied_vec)): voltage_source.set_voltage(voltage=voltage_applied_vec[ii]) time.sleep(measurement_delay) voltage_read_vec.append(voltage_meter.read_voltage(channel=1)) voltage_source.set_output(False) return current_applied_vec, voltage_read_vec
def __exit__(self, *exc_info): """Exit context manager, called at exit of block""" retval = super(assertWarns, self).__exit__(*exc_info) if exc_info[0] is not None: # Exception in handling, show all warnings for w in self._log: warnings.showwarning(w.message, w.category, w.filename, w.lineno) return retval n_match = 0 # Number of matching warnings msg_pat = re.compile(self._filterspec[1], re.I) cat = self._filterspec[2] mod_pat = self._filterspec[3] matchall = not mod_pat #Empty pattern, match all modules mod_pat = re.compile(mod_pat) lineno = int(self._filterspec[4]) # show_warnings isn't given the module, just the filename, # so find filenames of desired modules. mod_files = [] for m in list(sys.modules): if mod_pat.match(m) and hasattr(sys.modules[m], '__file__'): fnl = sys.modules[m].__file__ if fnl is None: continue if fnl.lower().endswith(('.pyc', '.pyo')): fnl = fnl[:-1] mod_files.append(fnl) for w in self._log: if issubclass(w.category, cat) \ and (matchall or w.filename in mod_files) \ and msg_pat.match(str(w.message)) and lineno in (0, w.lineno): n_match += 1 else: warnings.showwarning(w.message, w.category, w.filename, w.lineno) if self.requireWarning: if n_match == 0: self._testcase.fail('Warning not issued.') elif n_match > 1: self._testcase.fail('Warning issued {} times.'.format(n_match)) elif n_match: self._testcase.fail('Warning was issued.')
def annotate_vcf(self, outfile, random=False, vaf_cutoff=None): ''' Read in a VCF, add annotation and write it back out again. ''' warningcount = 0 with open(outfile, 'w') as out_fh: vcf_writer = vcf.Writer(out_fh, self.reader) for record in self.reader: if vaf_cutoff is not None: vafs = [ self.calculate_vaf(call, record) for call in record.samples ] fvafs = [ x for x in vafs if x is not None ] if max(fvafs) < vaf_cutoff: continue if random: record = self._randomise_record(record) with catch_warnings(record=True) as warnlist: newrecs = self._annotate_record(record) for rec in newrecs: vcf_writer.write_record(rec) # Count AnnotationWarnings, show all others. annwarns = 0 for wrn in warnlist: if issubclass(wrn.category, AnnotationWarning): annwarns += 1 else: showwarning(wrn.message, wrn.category, wrn.filename, wrn.lineno, wrn.file, wrn.line) if annwarns > 0: warningcount += 1 if warningcount > 0: sys.stderr.write("Detected AnnotationWarnings for %d variants.\n" % warningcount)
def twiss_envelopes(self,steps=1): """ Calulate envelopes from initial twiss-vector with beta-matrices """ twfun = Functions(('s','bx','ax','gx','by','ay','gy','bz','az','gz')) bx,ax,gx,epsx = PARAMS['twiss_x_i']() by,ay,gy,epsy = PARAMS['twiss_y_i']() bz,az,gz,epsz = PARAMS['twiss_z_i']() twv0 = NP.array([bx,ax,gx,by,ay,gy,bz,az,gz]) # initial B_matrix = NP.eye(9) # cumulated beta-matrix for node in iter(self): slices = node.make_slices(anz = steps) means = [] s,sm,sf = node.position for slice in slices: beta_matrix = slice.beta_matrix() B_matrix = NP.dot(beta_matrix,B_matrix) twv = NP.dot(B_matrix,twv0) # track twiss-vector s += slice.length twfun.append(s,tuple(twv)) bx = twv[Ktw.bx]; ax = twv[Ktw.ax]; gx = twv[Ktw.gx] by = twv[Ktw.by]; ay = twv[Ktw.ay]; gy = twv[Ktw.gy] sigxy = (*sigmas(ax,bx,epsx),*sigmas(ay,by,epsy)) means.append(sigxy) # means = NP.array(means) means = NP.mean(means,axis=0) # each node has its tuple of average sigmas node['sigxy'] = tuple(means) # aperture check if FLAGS['useaper']: nbsigma = PARAMS['nbsigma'] if node.aperture != None: aperture = node.aperture sigx, sigxp, sigy, sigyp = node['sigxy'] if(aperture < nbsigma*sigx or aperture < nbsigma*sigy): warnings.showwarning( '{} sigma aperture hit @ s={:.1f} [m]'.format(nbsigma,sm), UserWarning,'lattice.py', 'twiss_functions()') return twfun
def __init__(self, klass=None, model=None): frame, filename, line_number, function_name, lines, index = inspect.stack()[1] warnings.showwarning('Serializers are now directly implemented in models. Please use the model class directly instead"', DeprecationWarning, filename, line_number) self.local_path = None self.remote_path = None self.fitting_path = None self.model_path = None self.remote_model_path = None self.model = model self._fitting = None if model is not None: self.local_path = join(lore.env.models_dir, model.__module__, model.__class__.__name__) self.remote_path = join(model.__module__, model.__class__.__name__) elif klass is not None: self.local_path = join(lore.env.models_dir, klass.__module__, klass.__name__) self.remote_path = join(klass.__module__, klass.__name__) else: raise ValueError('You must pass name or model') self.fitting = self.last_fitting()
def download(cls, fitting=None): frame, filename, line_number, function_name, lines, index = inspect.stack( )[1] warnings.showwarning( 'Please start using explicit fitting number when downloading the model ex "Keras.download(10)". Default Keras.download() will be deprecated in 0.7.0', DeprecationWarning, filename, line_number) model = cls(None, None) if not fitting: fitting = model.last_fitting() model.fitting = int(fitting) try: lore.io.download(model.remote_model_path(), model.model_path(), cache=True) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": model.fitting = None lore.io.download(model.remote_model_path(), model.model_path(), cache=True) return cls.load(fitting)
def qryTransaction(self, contract=None, count=None): """ :param contract: 建议填写。但是对于部分交易所,此项不填可以返回所有成交。 :param count: 返回成交记录的条数 :return: """ params = {} self.reqID += 1 if contract is not None: params['contract'] = contract else: warnings.showwarning(":param contract: 建议填写。但是对于部分交易所,此项不填可以返回所有成交。") if count is not None: params['count'] = count data = {'reqID': self.reqID, 'callback': self.onQryTransaction, 'params': params} self.reqQueue.put(data)
def __ignore_warning(*args, **kwargs): # Execute the code while recording all warnings with warnings.catch_warnings(record=True) as ws: # Catch all warnings of this type warnings.simplefilter('always', warning) # Execute the function result = function(*args, **kwargs) # Now that all code was executed and the warnings # collected, re-send all warnings that are beyond our # expected number of warnings if count is not None: for w in ws[count:]: warnings.showwarning( message=w.message, category=w.category, filename=w.filename, lineno=w.lineno, file=w.file, line=w.line, ) return result
def _runtime_warning_context(): # type: ignore """ Context manager which checks for RuntimeWarnings, specifically to avoid "coroutine 'X' was never awaited" warnings being missed. If RuntimeWarnings occur in the context a RuntimeError is raised. """ with warnings.catch_warnings(record=True) as _warnings: yield rw = [ '{w.filename}:{w.lineno}:{w.message}'.format(w=w) for w in _warnings if w.category == RuntimeWarning ] if rw: raise RuntimeError('{} Runtime Warning{},\n{}'.format( len(rw), '' if len(rw) == 1 else 's', '\n'.join(rw))) # Propagate warnings to pytest for msg in _warnings: warnings.showwarning(msg.message, msg.category, msg.filename, msg.lineno, msg.file, msg.line)
def get_number_and_suffix(string): """Return number and suffix of a string. args: string (str): String. returns: (tuple): number, suffix examples: ``` >>> get_number_and_suffix('1khz') > (1.0, 'khz') ``` """ warnings.showwarning( "get_number_and_suffix will be deprecated. Please use ekpmeasure.universal.get_number_and_suffix", DeprecationWarning, '', 0) return _get_number_and_suffix(string)
def __init__(self, owner = None, occupancy = None, tenure = None, address = None, longitude = None, latitude = None, value = None, cost = None, area = None, bedrooms = None, bathrooms = None, listed = False, damage_state = None, building_stock = None): """ Keyword Arguments: owner -- entities.Owner or subclass that represents building owner. occupancy -- The buildings occupancy. DESaster currently supports SFR and mobile home. address -- Building's address longitude -- Building's longitude latitude -- Building's latitude value -- Building's value in $ cost -- Building's monthly cost in $ (e.g., mortgage or rent) bedrooms -- Building's number of bedrooms bathrooms -- Building's number of bathrooms area -- Building's area in sf listed -- Whether building is for rent or sale. damage_state -- Building's damage state (e.g., HAZUS damage states) building_stock -- The the building's associated building stock FilterStore Modified Attributes: self.damage_value -- Calculated using setStructuralDamageValueHAZUS() Inheritance: structures.Building """ Building.__init__(self, owner, occupancy, tenure, address, longitude, latitude, value, cost, area, listed, damage_state, building_stock) self.bedrooms = bedrooms # Number of bedrooms in building self.bathrooms = bathrooms # Number of bedrooms in building # Verify that building dataframe has expected occupancy types # Raise warning, if not (but continue with simulation) if not occupancy.lower() in ('single family dwelling', 'mobile home'): warnings.showwarning('Warning: SingleFamilyResidential not compatible with given occupancy type: {0}'.format( occupancy.title()), DeprecationWarning, filename = sys.stderr, lineno=661)
def field_limit(Bx, By, Bz, max_field_strength=1.5) -> bool: """ Calculate the absolute field amplitude and check against the max_field_strength. For use with QCoDeS oxford.MercuryiPS_VISA driver. Inputs: Bx, By, Bz (float): magnetic field components Outputs: bool: true, if sqrt(Bx^2+By^2+Bz^2) < max_field_strength; else false """ if max_field_strength > 1.5: showwarning( "Be aware that mu-metal shields are saturated by too large magnetic fields and will not work afterwards. " "Are you sure you want to go to more than 1.5 T?", ResourceWarning, "cqed/cqed/custom_pysweep_functions/magnet", 20, ) if np.sqrt(Bx**2 + By**2 + Bz**2) > max_field_strength: return bool(False) else: return bool(True)
def _get_number_and_suffix(string): """Return number and suffix of a string. e.g. 1khz will return (1.0, 'khz'). args: string (str): String. returns: (tuple): number, suffix """ warnings.showwarning("_get_number_and_suffix is deprecated. Please use get_number_and_suffix instead.", DeprecationWarning, '', '') iteration = 0 number = np.nan while np.isnan(number): if iteration >= len(string): raise ValueError('unable to find a valid number in str: {}'.format(string)) try: number = float(string[:-(1+iteration)]) except ValueError: iteration+=1 return number, string[-(iteration + 1):]
def read(self): for line in self.fstream: line = line.strip().lower() if line == 'specgrid': self.n = self.read_specgrid() elif line == 'coord': self.coord = self.read_coord() elif line == 'zcorn': self.zcorn = self.read_zcorn() elif line in {'actnum', 'permx', 'permy', 'permz', 'poro', 'satnum', 'rho', 'kx', 'kz', 'emodulus25', 'poissonratio25', 'pressure', }: dtype = np.int32 if line in {'actnum', 'satnum'} else np.float64 self.attribute[line] = self.cell_property(dtype) elif line in {'grid', '/', ''} or line.startswith('--'): pass elif not re.match('[0-9]', line[0]): warnings.showwarning( 'Unkown keyword "{}" encountered in file'.format(line.split()[0]), SyntaxWarning, self.filename, self.line_number, line=[], ) else: pass # silently skip large number blocks self.raw = DiscontBoxMesh(self.n, self.coord, self.zcorn)
def sweep_z(self, points, max_field_strength=1.5): ''' Generate a pysweep.SweepObject to sweep field z amplitude at fixed x and y. Inputs: points (float): sweep values for z, i.e. the magentic field strength, unit: T, range: -max_field_strength <= z <= max_field_strength max_field_strength (float): maximum magnetic field strength, unit: T Output: pysweep.SweepObject ''' if max_field_strength > 1.5: showwarning( 'Be aware that mu-metal shields are saturated by too large magnetic fields and will not work afterwards.' 'Are you sure you want to go to more than 1.5 T?', ResourceWarning, 'cqed/cqed/custom_pysweep_functions/magnet', 162) @MakeMeasurementFunction([]) def point_function(d): return points, [] @MakeMeasurementFunction([]) def set_function(z, d): assert max_field_strength > np.abs(z) >= 0, 'The field amplitude must not exceed {} and be lager than 0.' \ ' Upper limit can be adjusted with kwarg: max_field_strength.' \ ' Proceed with caution (Mu-metal shields do not appreciate high fields!)'.format( max_field_strength) self.magnet.z_target(z) self.magnet.ramp(mode="safe") return [] return SweepObject(set_function=set_function, unit="T", label="z_field", point_function=point_function, dataparameter=None)
def __init__( self, model=None, embed_size=10, sequence_embedding='flatten', sequence_embed_size=10, hidden_width=1024, hidden_layers=4, layer_shrink=0.5, dropout=0, batch_size=32, learning_rate=0.001, decay=0., optimizer=None, hidden_activation='relu', hidden_activity_regularizer=None, hidden_bias_regularizer=None, hidden_kernel_regularizer=None, output_activation='sigmoid', monitor='val_loss', loss='categorical_crossentropy', towers=1, cudnn=True, multi_gpu_model=True, ): kwargs = locals() kwargs.pop('self') kwargs.pop('__class__') super(Keras, self).__init__(**kwargs) frame, filename, line_number, function_name, lines, index = inspect.stack( )[1] warnings.showwarning( 'lore.estimators.keras.Keras is deprecated. Please use "from lore.estimators.keras import Base"', DeprecationWarning, filename, line_number)
def load_Dataset(path, meta_data=None, readfileby=read_ekpy_data): """ Load a dataset from path. Path must contain (pickle or .csv) file ``'meta_data'``. args: path (str): Path to data meta_data (pandas.DataFrame): meta_data if one wishes to provide different meta_data from that provided in path. readfileby (callable): Method for reading data. returns: (Dataset): Dataset """ files = list(os.listdir(path)) existing_ekpds = [] for file in files: if '.ekpds' in file: existing_ekpds.append(file) if len(existing_ekpds) != 0: warnings.showwarning( 'There exist .ekpds files ({}) in this directory. If you want to load those Datasets, be sure to use ``.read_ekpds``' .format(existing_ekpds), UserWarning, '', 0) return Dataset(path, _build_df(path, meta_data), readfileby=readfileby)
except ValueError as e: raise e f.close() default_settings.update(**settings_from_file) class Settings: def __init__(self): for k, v in default_settings.items(): setattr(self, k, v) settings = Settings() if not settings.REMOTE_REPO: warnings.showwarning( 'Missed remote repository parameter', UserWarning, settings_path, 1 ) sys.exit(1) if not getattr(settings, 'REPO_PATH', None): git_name = settings.REMOTE_REPO.split('/')[-1] repo_name = git_name.split('.')[0] settings.REPO_PATH = os.path.join(settings.PROCYON_PATH, repo_name) sys.path.append(settings.REPO_PATH)
from StreamManager.StreamManager4 import StreamManager from webgraphic.webgraphic import webgraphic try: import threading except ImportError: import dummy_threading as threading # CPU number try: # try first import multiprocessing except ImportError: multiprocessing = None proc_cnt = 0 warnings.showwarning('current system has not multiprocessing support', ResourceWarning, filename=__file__, lineno=87) else: # CPU number if multiprocessing supported proc_cnt = ast.literal_eval(os.environ['PROC_CNT']) finally: CPU_CNT = proc_cnt # PID PID = os.getpid() # file root path ROOT = os.path.dirname(os.path.abspath(__file__)) # 1-initialisation; 2-migration; 3-prediction; 4-adaptation; 5-regeneration MODE = 3 # path of input data PATH = '/mad/pcap' # latest processing file name
def new_warn_explicit(message, category, filename, lineno, module=None, registry=None, module_globals=None, emit_module=None): lineno = int(lineno) if module is None: module = filename or "<unknown>" if module[-3:].lower() == ".py": module = module[:-3] # XXX What about leading pathname? if registry is None: registry = {} if registry.get('version', 0) != warnings._filters_version: registry.clear() registry['version'] = warnings._filters_version if isinstance(message, Warning): text = str(message) category = message.__class__ else: text = message message = category(message) key = (text, category, lineno) # Quick test for common case if registry.get(key): return # Search the filters for item in warnings.filters: item = _get_proxy_filter(item) if len(item) == 5: action, msg, cat, mod, ln = item emod = None else: action, msg, cat, mod, ln, emod = item if ((msg is None or msg.match(text)) and issubclass(category, cat) and (mod is None or mod.match(module)) and (emod is None or emod.match(emit_module)) and (ln == 0 or lineno == ln)): break else: action = defaultaction # Early exit actions if action == "ignore": registry[key] = 1 return # Prime the linecache for formatting, in case the # "file" is actually in a zipfile or something. import linecache linecache.getlines(filename, module_globals) if action == "error": raise message # Other actions if action == "once": registry[key] = 1 oncekey = (text, category) if onceregistry.get(oncekey): return onceregistry[oncekey] = 1 elif action == "always": pass elif action == "module": registry[key] = 1 altkey = (text, category, 0) if registry.get(altkey): return registry[altkey] = 1 elif action == "default": registry[key] = 1 elif action == "custom": pass else: # Unrecognized actions are errors raise RuntimeError( "Unrecognized action (%r) in warnings.filters:\n %s" % (action, item)) if not callable(showwarning): raise TypeError("warnings.showwarning() must be set to a " "function or method") # Print message and context showwarning(message, category, filename, lineno)
def instanciate_element(item): DEBUG_MODULE('instanciate_element: instanciate {}'.format(item)) key = item[0] attributes = item[1] # aperture = PARAMS['aperture'] # default aperture if key == 'D': label = attributes['ID'] length = get_mandatory(attributes,'length',label) aperture = attributes['aperture'] if 'aperture' in attributes else None instance = ELM.D(length=length,label=label,aperture=aperture) instance['label'] = label instance['length'] = length instance['aperture'] = aperture elif key == 'SIXD': label = attributes['ID']+'#' length = get_mandatory(attributes,'length',label) aperture = attributes['aperture'] if 'aperture' in attributes else None instance = ELM.SIXD(length=length,label=label,aperture=aperture) instance['label'] = label instance['length'] = length instance['aperture'] = aperture elif key == 'QF': label = attributes['ID'] length = get_mandatory(attributes,'length',label) dBdz = get_mandatory(attributes,"B'",label) slices = get_mandatory(attributes,'slices',label) aperture = get_mandatory(attributes,'aperture',label) kq = dBdz/PARAMS['sollteilchen'].brho Bpole = dBdz*aperture if slices > 1: instance = replace_QF_with_QFth_lattice(slices,kq,length,label,PARAMS['sollteilchen'],aperture) elif slices <= 1: instance = ELM.QF(k0=kq,length=length,label=label,aperture=aperture) instance['label'] = label instance['dBdz'] = dBdz instance['bore'] = aperture instance['Bpole'] = Bpole pass elif key == 'QD': label = attributes['ID'] length = get_mandatory(attributes,'length',label) dBdz = get_mandatory(attributes,"B'",label) slices = get_mandatory(attributes,'slices',label) aperture = get_mandatory(attributes,'aperture',label) kq = dBdz/PARAMS['sollteilchen'].brho Bpole = dBdz*aperture if slices > 1: instance = replace_QD_with_QDth_lattice(slices,kq,length,label,PARAMS['sollteilchen'],aperture) elif slices <= 1: instance = ELM.QD(k0=kq,length=length,label=label,aperture=aperture) instance['label'] = label instance['dBdz'] = dBdz instance['bore'] = aperture instance['Bpole'] = Bpole elif key == 'RFG': label = attributes['ID'] PhiSoll = radians(get_mandatory(attributes,"PhiSync",label)) freq = float(get_mandatory(attributes,"freq",label)) gap = get_mandatory(attributes,'gap',label) aperture = get_mandatory(attributes,'aperture',label) dWf = FLAGS['dWf'] mapping = PARAMS['mapping'] EzPeak = get_mandatory(attributes,"EzPeak",label) EzAvg = get_mandatory(attributes,"EzAvg",label) if mapping == None: mapping = 't3d' EzPeak = EzAvg if mapping == 'ttf' or mapping == 'dyn' or mapping == 'oxal': # SF-data fname = get_mandatory(attributes,"SFdata",label) if fname not in PARAMS: PARAMS[fname] = SFdata(fname,EzPeak=EzPeak) EzAvg = PARAMS[fname].EzAvg instance = ELM.RFG(EzAvg=EzAvg,PhiSoll=PhiSoll,fRF=freq,label=label,gap=gap,mapping=mapping,dWf=dWf,aperture=aperture,SFdata=PARAMS[fname]) pass else: EzPeak = EzAvg instance = ELM.RFG(EzAvg=EzAvg,PhiSoll=PhiSoll,fRF=freq,label=label,gap=gap,mapping=mapping,dWf=dWf,aperture=aperture) instance['EzAvg'] = EzAvg instance['EzPeak'] = EzPeak instance['label'] = label instance['PhiSoll'] = PhiSoll instance['freq'] = freq instance['gap'] = gap instance['aperture'] = aperture instance['dWf'] = dWf instance['mapping'] = mapping elif key == 'RFC': label = attributes['ID'] PhiSoll = radians(get_mandatory(attributes,"PhiSync",label)) freq = float(get_mandatory(attributes,"freq",label)) gap = get_mandatory(attributes,'gap',label) aperture = get_mandatory(attributes,'aperture',label) dWf = FLAGS['dWf'] length = get_mandatory(attributes,'length',label) mapping = PARAMS['mapping'] EzPeak = get_mandatory(attributes,"EzPeak",label) EzAvg = get_mandatory(attributes,"EzAvg",label) if mapping == None: mapping = 't3d' EzPeak = EzAvg if mapping == 'ttf' or mapping == 'dyn' or mapping == 'oxal': # SF-data fname = get_mandatory(attributes,"SFdata",label) if fname not in PARAMS: PARAMS[fname] = SFdata(fname,EzPeak=EzPeak) EzAvg = PARAMS[fname].EzAvg instance = ELM.RFC(EzAvg=EzAvg,label=label,PhiSoll=PhiSoll,fRF=freq,gap=gap,aperture=aperture,dWf=dWf,length=length,mapping=mapping,SFdata=PARAMS[fname]) pass else: EzPeak = EzAvg instance = ELM.RFC(EzAvg=EzAvg,label=label,PhiSoll=PhiSoll,fRF=freq,gap=gap,aperture=aperture,dWf=dWf,length=length,mapping=mapping) instance['EzAvg'] = EzAvg instance['EzPeak'] = EzPeak instance['label'] = label instance['PhiSoll'] = PhiSoll instance['freq'] = freq instance['gap'] = gap instance['aperture'] = aperture instance['dWf'] = dWf instance['length'] = length instance['mapping'] = mapping elif key == 'GAP': label = attributes['ID'] gap = get_mandatory(attributes,'gap',label) EzAvg = get_mandatory(attributes,"EzAvg",label) PhiSoll = radians(get_mandatory(attributes,"PhiSync",label)) freq = float(get_mandatory(attributes,"freq",label)) dWf = FLAGS['dWf'] aperture = get_mandatory(attributes,'aperture',label) instance = ELM.GAP(EzAvg=EzAvg,PhiSoll=PhiSoll,fRF=freq,label=label,gap=gap,dWf=dWf,aperture=aperture) instance['EzAvg'] = EzAvg instance['EzPeak'] = EzAvg instance['label'] = label instance['gap'] = gap instance['PhiSoll'] = PhiSoll instance['freq'] = freq instance['dWf'] = dWf elif key == 'MRK': label = attributes['ID'] action = get_mandatory(attributes,'action',label) if 'scatter' == action: prefix = attributes['prefix'] if 'prefix' in attributes else '' abszisse = attributes['abscissa'] if 'abscissa' in attributes else 'z' ordinate = attributes['ordinate'] if 'ordinate' in attributes else 'zp' instance = MRK.PoincareAction(label=label, prefix=prefix, abszisse=abszisse, ordinate=ordinate) instance['prefix'] = prefix instance['abszisse'] = abszisse instance['ordinate'] = ordinate else: instance = ELM.MRK(label=label,action=action) instance['label'] = label instance['action'] = action else: warnings.showwarning( 'InputError: Unknown element type encountered: "{}" - STOP'.format(key), UserWarning, 'lattice_generator.py', 'instanciate_element()', ) sys.exit(1) try: sec = attributes['sec'] #can fail because sections are not mandatory except: pass else: instance.section = sec return (label,instance)
# Fit chebyshev polynomials. print("Working on objects %d to %d in timespan %f to %f" % (n, n + nObj, t, t + tSpan), file=log) cheb = ChebyFits(subsetOrbits, t, tSpan, skyTolerance=args.skyTol, nDecimal=args.nDecimal, nCoeff_position=args.nCoeff, ngran=64, nCoeff_vmag=9, nCoeff_delta=5, nCoeff_elongation=6, obscode=807, timeScale='TAI') try: cheb.calcSegmentLength(length=args.length) except ValueError as ve: cheb.length = None for objId in subsetOrbits.orbits['objId'].as_matrix(): cheb.failed.append((objId, tStart, tEnd)) warnings.showwarning("Objid %s to %s (n %d to %d), segment %f to %f - error: %s" % (subsetOrbits.orbits.objId.iloc[0], subsetOrbits.orbits.objId.iloc[-1], n, n + nObj, t, t + tSpan, ve.message), UserWarning, "generateCoefficients.py", 132, file=log) # Put this in a separate try/except block, because errors here can mask errors in the previous # length determination stage otherwise. if cheb.length is not None: try: cheb.calcSegments() except ValueError as ve: for objId in subsetOrbits.orbits['objId'].as_matrix(): cheb.failed.append((objId, tStart, tEnd)) warnings.showwarning("Objid %s to %s (n %d to %d), segment %f to %f - error: %s" % (subsetOrbits.orbits.objId.iloc[0], subsetOrbits.orbits.objId.iloc[-1], n, n + nObj, t, t + tSpan, ve.message),
else: new_lis.append(item) return new_lis ##--------- MAIN if __name__ == '__main__': input_file = 'simuINstat.yml' # input_file = 'learnyaml.yaml' with open(input_file,'r') as fileobject: try: indat = yaml.load(fileobject) except Exception as ex: warnings.showwarning( 'InputError: {} - STOP'.format(str(ex)), UserWarning, 'lattice_generator.py', 'factory()', ) sys.exit(1) fileobject.close() ky = indat.keys() for k in ky: print() print(k) klist = indat[k] print(klist) if not klist: continue nlist = flatten(klist) if k == 'LATTICE': N = nlist[0] plist = nlist[1:]
def _scan_all_plugins( modules: List[Any], ) -> Tuple[Dict[Type[Plugin], List[Type[Plugin]]], ScanStats]: stats = ScanStats() stats.total_time = timer() ret: Dict[Type[Plugin], List[Type[Plugin]]] = defaultdict(list) plugin_types: List[Type[Plugin]] = [ Plugin, ConfigSource, CompletionPlugin, Launcher, Sweeper, SearchPathPlugin, ] for mdl in modules: for importer, modname, ispkg in pkgutil.walk_packages( path=mdl.__path__, prefix=mdl.__name__ + ".", onerror=lambda x: None): try: module_name = modname.rsplit(".", 1)[-1] # If module's name starts with "_", do not load the module. # But if the module's name starts with a "__", then load the # module. if module_name.startswith( "_") and not module_name.startswith("__"): continue import_time = timer() m = importer.find_module(modname) with warnings.catch_warnings( record=True) as recorded_warnings: loaded_mod = m.load_module(modname) import_time = timer() - import_time if len(recorded_warnings) > 0: sys.stderr.write( f"[Hydra plugins scanner] : warnings from '{modname}'. Please report to plugin author.\n" ) for w in recorded_warnings: warnings.showwarning( message=w.message, # type: ignore category=w.category, filename=w.filename, lineno=w.lineno, file=w.file, line=w.line, ) stats.total_modules_import_time += import_time assert modname not in stats.modules_import_time stats.modules_import_time[modname] = import_time if loaded_mod is not None: for name, obj in inspect.getmembers(loaded_mod): if (inspect.isclass(obj) and issubclass(obj, Plugin) and not inspect.isabstract(obj)): for plugin_type in plugin_types: if issubclass(obj, plugin_type): ret[plugin_type].append(obj) except ImportError as e: warnings.warn( message=f"\n" f"\tError importing '{modname}'.\n" f"\tPlugin is incompatible with this Hydra version or buggy.\n" f"\tRecommended to uninstall or upgrade plugin.\n" f"\t\t{type(e).__name__} : {e}", category=UserWarning, ) stats.total_time = timer() - stats.total_time return ret, stats
def validate_nwbs(): global TOTAL, FAILURES, ERRORS logging.info('running validation tests on NWB files') examples_nwbs = glob.glob('*.nwb') import pynwb for nwb in examples_nwbs: try: logging.info("Validating file %s" % nwb) ws = list() with warnings.catch_warnings(record=True) as tmp: logging.info("Validating with pynwb.validate method.") with pynwb.NWBHDF5IO(nwb, mode='r') as io: errors = pynwb.validate(io) TOTAL += 1 if errors: FAILURES += 1 ERRORS += 1 for err in errors: print("Error: %s" % err) def get_namespaces(nwbfile): comp = run([ "python", "-m", "pynwb.validate", "--list-namespaces", "--cached-namespace", nwb ], stdout=PIPE, stderr=STDOUT, universal_newlines=True, timeout=20) if comp.returncode != 0: return [] return comp.stdout.split() namespaces = get_namespaces(nwb) if len(namespaces) == 0: FAILURES += 1 ERRORS += 1 cmds = [] cmds += [["python", "-m", "pynwb.validate", nwb]] cmds += [[ "python", "-m", "pynwb.validate", "--cached-namespace", nwb ]] cmds += [[ "python", "-m", "pynwb.validate", "--no-cached-namespace", nwb ]] for ns in namespaces: cmds += [[ "python", "-m", "pynwb.validate", "--cached-namespace", "--ns", ns, nwb ]] for cmd in cmds: logging.info("Validating with \"%s\"." % (" ".join(cmd[:-1]))) comp = run(cmd, stdout=PIPE, stderr=STDOUT, universal_newlines=True, timeout=20) TOTAL += 1 if comp.returncode != 0: FAILURES += 1 ERRORS += 1 print("Error: %s" % comp.stdout) for w in tmp: # ignore RunTimeWarnings about importing if isinstance(w.message, RuntimeWarning) and not warning_re.match( str(w.message)): ws.append(w) for w in ws: warnings.showwarning(w.message, w.category, w.filename, w.lineno, w.line) except Exception: print(traceback.format_exc()) FAILURES += 1 ERRORS += 1
def solve_mbar_once(u_kn_nonzero, N_k_nonzero, f_k_nonzero, method="hybr", tol=1E-12, options=None): """Solve MBAR self-consistent equations using some form of equation solver. Parameters ---------- u_kn_nonzero : np.ndarray, shape=(n_states, n_samples), dtype='float' The reduced potential energies, i.e. -log unnormalized probabilities for the nonempty states N_k_nonzero : np.ndarray, shape=(n_states), dtype='int' The number of samples in each state for the nonempty states f_k_nonzero : np.ndarray, shape=(n_states), dtype='float' The reduced free energies for the nonempty states method : str, optional, default="hybr" The optimization routine to use. This can be any of the methods available via scipy.optimize.minimize() or scipy.optimize.root(). tol : float, optional, default=1E-14 The convergance tolerance for minimize() or root() verbose: bool Whether to print information about the solution method. options: dict, optional, default=None Optional dictionary of algorithm-specific parameters. See scipy.optimize.root or scipy.optimize.minimize for details. Returns ------- f_k : np.ndarray The converged reduced free energies. results : dict Dictionary containing entire results of optimization routine, may be useful when debugging convergence. Notes ----- This function requires that N_k_nonzero > 0--that is, you should have already dropped all the states for which you have no samples. Internally, this function works in a reduced coordinate system defined by subtracting off the first component of f_k and fixing that component to be zero. For fast but precise convergence, we recommend calling this function multiple times to polish the result. `solve_mbar()` facilitates this. """ u_kn_nonzero, N_k_nonzero, f_k_nonzero = validate_inputs( u_kn_nonzero, N_k_nonzero, f_k_nonzero) f_k_nonzero = f_k_nonzero - f_k_nonzero[ 0] # Work with reduced dimensions with f_k[0] := 0 u_kn_nonzero = precondition_u_kn(u_kn_nonzero, N_k_nonzero, f_k_nonzero) pad = lambda x: np.pad( x, (1, 0), mode='constant' ) # Helper function inserts zero before first element unpad_second_arg = lambda obj, grad: (obj, grad[ 1:]) # Helper function drops first element of gradient # Create objective functions / nonlinear equations to send to scipy.optimize, fixing f_0 = 0 grad = lambda x: mbar_gradient(u_kn_nonzero, N_k_nonzero, pad(x))[ 1:] # Objective function gradient grad_and_obj = lambda x: unpad_second_arg(*mbar_objective_and_gradient( u_kn_nonzero, N_k_nonzero, pad(x) )) # Objective function gradient and objective function hess = lambda x: mbar_hessian(u_kn_nonzero, N_k_nonzero, pad(x))[ 1:][:, 1:] # Hessian of objective function with warnings.catch_warnings(record=True) as w: if method in [ "L-BFGS-B", "dogleg", "CG", "BFGS", "Newton-CG", "TNC", "trust-ncg", "SLSQP" ]: if method in ["L-BFGS-B", "CG"]: hess = None # To suppress warning from passing a hessian function. results = scipy.optimize.minimize(grad_and_obj, f_k_nonzero[1:], jac=True, hess=hess, method=method, tol=tol, options=options) f_k_nonzero = pad(results["x"]) elif method == 'adaptive': results = adaptive(u_kn_nonzero, N_k_nonzero, f_k_nonzero, tol=tol, options=options) f_k_nonzero = results # they are the same for adaptive, until we decide to return more. else: results = scipy.optimize.root(grad, f_k_nonzero[1:], jac=hess, method=method, tol=tol, options=options) f_k_nonzero = pad(results["x"]) #If there were runtime warnings, show the messages if len(w) > 0: for warn_msg in w: warnings.showwarning(warn_msg.message, warn_msg.category, warn_msg.filename, warn_msg.lineno, warn_msg.file, "") #Ensure MBAR solved correctly W_nk_check = mbar_W_nk(u_kn_nonzero, N_k_nonzero, f_k_nonzero) check_w_normalized(W_nk_check, N_k_nonzero) print( "MBAR weights converged within tolerance, despite the SciPy Warnings. Please validate your results." ) return f_k_nonzero, results
dump utilities for `pcapkit` implementation """ import os import warnings import tbtrim from pcapkit.utilities.exceptions import DEVMODE, BaseError from pcapkit.utilities.warnings import DevModeWarning # set up sys.excepthook if DEVMODE: warnings.showwarning( 'development mode enabled', DevModeWarning, filename=__file__, lineno=0, line=f"PCAPKIT_DEVMODE={os.environ['PCAPKIT_DEVMODE']}") else: ROOT = os.path.dirname(os.path.realpath(__file__)) tbtrim.set_trim_rule(lambda filename: ROOT in os.path.realpath(filename), exception=BaseError, strict=False) # All Reference import pcapkit.all # Interface from pcapkit.interface import * # ToolKit
# now the convergence should be stricter and more accurate. # !!! fixed bug in ellipsoid: dr2**POW --> dr2**(POW/2.) # v10.8 # netRads accepting node and link labels # !!!! bugfix on networkBase get_json: # new params were ignored. Now JSON updates params before making network. #import tensorflow as tf from numpy import * import json, os, time from warnings import showwarning try: import tensorflow as tf except ImportError: showwarning("Can't import tensorflow!", ImportWarning, 'net3d_v9_4_10', 8) try: from matplotlib.pyplot import * #from matplotlib.mlab import movavg except ImportError: showwarning("Can't import matplotlib.pyplot!", ImportWarning, 'net3d_v9_4_10', 8) ############################ __version__ = 'v10.7' ############################ PAIRS = 100 POW = 2 POWn = 2
def solve_mbar_once(u_kn_nonzero, N_k_nonzero, f_k_nonzero, method="hybr", tol=1E-12, options=None): """Solve MBAR self-consistent equations using some form of equation solver. Parameters ---------- u_kn_nonzero : np.ndarray, shape=(n_states, n_samples), dtype='float' The reduced potential energies, i.e. -log unnormalized probabilities for the nonempty states N_k_nonzero : np.ndarray, shape=(n_states), dtype='int' The number of samples in each state for the nonempty states f_k_nonzero : np.ndarray, shape=(n_states), dtype='float' The reduced free energies for the nonempty states method : str, optional, default="hybr" The optimization routine to use. This can be any of the methods available via scipy.optimize.minimize() or scipy.optimize.root(). tol : float, optional, default=1E-14 The convergance tolerance for minimize() or root() verbose: bool Whether to print information about the solution method. options: dict, optional, default=None Optional dictionary of algorithm-specific parameters. See scipy.optimize.root or scipy.optimize.minimize for details. Returns ------- f_k : np.ndarray The converged reduced free energies. results : dict Dictionary containing entire results of optimization routine, may be useful when debugging convergence. Notes ----- This function requires that N_k_nonzero > 0--that is, you should have already dropped all the states for which you have no samples. Internally, this function works in a reduced coordinate system defined by subtracting off the first component of f_k and fixing that component to be zero. For fast but precise convergence, we recommend calling this function multiple times to polish the result. `solve_mbar()` facilitates this. """ u_kn_nonzero, N_k_nonzero, f_k_nonzero = validate_inputs(u_kn_nonzero, N_k_nonzero, f_k_nonzero) f_k_nonzero = f_k_nonzero - f_k_nonzero[0] # Work with reduced dimensions with f_k[0] := 0 u_kn_nonzero = precondition_u_kn(u_kn_nonzero, N_k_nonzero, f_k_nonzero) pad = lambda x: np.pad(x, (1, 0), mode='constant') # Helper function inserts zero before first element unpad_second_arg = lambda obj, grad: (obj, grad[1:]) # Helper function drops first element of gradient # Create objective functions / nonlinear equations to send to scipy.optimize, fixing f_0 = 0 grad = lambda x: mbar_gradient(u_kn_nonzero, N_k_nonzero, pad(x))[1:] # Objective function gradient grad_and_obj = lambda x: unpad_second_arg(*mbar_objective_and_gradient(u_kn_nonzero, N_k_nonzero, pad(x))) # Objective function gradient and objective function hess = lambda x: mbar_hessian(u_kn_nonzero, N_k_nonzero, pad(x))[1:][:, 1:] # Hessian of objective function with warnings.catch_warnings(record=True) as w: if method in ["L-BFGS-B", "dogleg", "CG", "BFGS", "Newton-CG", "TNC", "trust-ncg", "SLSQP"]: if method in ["L-BFGS-B", "CG"]: hess = None # To suppress warning from passing a hessian function. results = scipy.optimize.minimize(grad_and_obj, f_k_nonzero[1:], jac=True, hess=hess, method=method, tol=tol, options=options) f_k_nonzero = pad(results["x"]) elif method == 'adaptive': results = adaptive(u_kn_nonzero, N_k_nonzero, f_k_nonzero, tol=tol, options=options) f_k_nonzero = results # they are the same for adaptive, until we decide to return more. else: results = scipy.optimize.root(grad, f_k_nonzero[1:], jac=hess, method=method, tol=tol, options=options) f_k_nonzero = pad(results["x"]) # If there were runtime warnings, show the messages if len(w) > 0: can_ignore = True for warn_msg in w: if "Unknown solver options" in str(warn_msg.message): continue warnings.showwarning(warn_msg.message, warn_msg.category, warn_msg.filename, warn_msg.lineno, warn_msg.file, "") can_ignore = False # If any warning is not just unknown options, can ]not skip check if not can_ignore: # Ensure MBAR solved correctly w_nk_check = mbar_W_nk(u_kn_nonzero, N_k_nonzero, f_k_nonzero) check_w_normalized(w_nk_check, N_k_nonzero) print("MBAR weights converged within tolerance, despite the SciPy Warnings. Please validate your results.") return f_k_nonzero, results
import yfinance __version__ = 'yfinance v. ' + yfinance.__version__ __author__ = yfinance.__author__ Ticker = yfinance.download download = yfinance.download pdr_override = yfinance.download def get_yahoo_crumb(*args, **kwargs): pass def parse_ticker_csv(*args, **kwargs): pass __all__ = yfinance.__all__ + ['get_yahoo_crumb', 'parse_ticker_csv'] import warnings warnings.showwarning( """ *** `fix_yahoo_finance` was renamed to `yfinance`. *** Please install and use `yfinance` directly using `pip install yfinance -U` More information: https://github.com/ranaroussi/yfinance """, DeprecationWarning, __file__, 0)
def initialize_model( rng_key, model, *, init_strategy=init_to_uniform, dynamic_args=False, model_args=(), model_kwargs=None, forward_mode_differentiation=False, validate_grad=True, ): """ (EXPERIMENTAL INTERFACE) Helper function that calls :func:`~numpyro.infer.util.get_potential_fn` and :func:`~numpyro.infer.util.find_valid_initial_params` under the hood to return a tuple of (`init_params_info`, `potential_fn`, `postprocess_fn`, `model_trace`). :param jax.random.PRNGKey rng_key: random number generator seed to sample from the prior. The returned `init_params` will have the batch shape ``rng_key.shape[:-1]``. :param model: Python callable containing Pyro primitives. :param callable init_strategy: a per-site initialization function. See :ref:`init_strategy` section for available functions. :param bool dynamic_args: if `True`, the `potential_fn` and `constraints_fn` are themselves dependent on model arguments. When provided a `*model_args, **model_kwargs`, they return `potential_fn` and `constraints_fn` callables, respectively. :param tuple model_args: args provided to the model. :param dict model_kwargs: kwargs provided to the model. :param bool forward_mode_differentiation: whether to use forward-mode differentiation or reverse-mode differentiation. By default, we use reverse mode but the forward mode can be useful in some cases to improve the performance. In addition, some control flow utility on JAX such as `jax.lax.while_loop` or `jax.lax.fori_loop` only supports forward-mode differentiation. See `JAX's The Autodiff Cookbook <https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html>`_ for more information. :param bool validate_grad: whether to validate gradient of the initial params. Defaults to True. :return: a namedtupe `ModelInfo` which contains the fields (`param_info`, `potential_fn`, `postprocess_fn`, `model_trace`), where `param_info` is a namedtuple `ParamInfo` containing values from the prior used to initiate MCMC, their corresponding potential energy, and their gradients; `postprocess_fn` is a callable that uses inverse transforms to convert unconstrained HMC samples to constrained values that lie within the site's support, in addition to returning values at `deterministic` sites in the model. """ model_kwargs = {} if model_kwargs is None else model_kwargs substituted_model = substitute( seed(model, rng_key if jnp.ndim(rng_key) == 1 else rng_key[0]), substitute_fn=init_strategy, ) ( inv_transforms, replay_model, has_enumerate_support, model_trace, ) = _get_model_transforms(substituted_model, model_args, model_kwargs) # substitute param sites from model_trace to model so # we don't need to generate again parameters of `numpyro.module` model = substitute( model, data={ k: site["value"] for k, site in model_trace.items() if site["type"] in ["param"] }, ) constrained_values = { k: v["value"] for k, v in model_trace.items() if v["type"] == "sample" and not v["is_observed"] and not v["fn"].is_discrete } if has_enumerate_support: from numpyro.contrib.funsor import config_enumerate, enum if not isinstance(model, enum): max_plate_nesting = _guess_max_plate_nesting(model_trace) _validate_model(model_trace) model = enum(config_enumerate(model), -max_plate_nesting - 1) potential_fn, postprocess_fn = get_potential_fn( model, inv_transforms, replay_model=replay_model, enum=has_enumerate_support, dynamic_args=dynamic_args, model_args=model_args, model_kwargs=model_kwargs, ) init_strategy = (init_strategy if isinstance(init_strategy, partial) else init_strategy()) if (init_strategy.func is init_to_value) and not replay_model: init_values = init_strategy.keywords.get("values") unconstrained_values = transform_fn(inv_transforms, init_values, invert=True) init_strategy = _init_to_unconstrained_value( values=unconstrained_values) prototype_params = transform_fn(inv_transforms, constrained_values, invert=True) (init_params, pe, grad), is_valid = find_valid_initial_params( rng_key, substitute( model, data={ k: site["value"] for k, site in model_trace.items() if site["type"] in ["plate"] }, ), init_strategy=init_strategy, enum=has_enumerate_support, model_args=model_args, model_kwargs=model_kwargs, prototype_params=prototype_params, forward_mode_differentiation=forward_mode_differentiation, validate_grad=validate_grad, ) if not_jax_tracer(is_valid): if device_get(~jnp.all(is_valid)): with numpyro.validation_enabled(), trace() as tr: # validate parameters substituted_model(*model_args, **model_kwargs) # validate values for site in tr.values(): if site["type"] == "sample": with warnings.catch_warnings(record=True) as ws: site["fn"]._validate_sample(site["value"]) if len(ws) > 0: for w in ws: # at site information to the warning message w.message.args = ("Site {}: {}".format( site["name"], w.message.args[0]), ) + w.message.args[1:] warnings.showwarning( w.message, w.category, w.filename, w.lineno, file=w.file, line=w.line, ) raise RuntimeError( "Cannot find valid initial parameters. Please check your model again." ) return ModelInfo(ParamInfo(init_params, pe, grad), potential_fn, postprocess_fn, model_trace)
def parse_imageinfo(gpath): """ Worker function: gpath must be in UNIX-PATH format! Args: tup (tuple): a tuple or one argument (so the function can be parallelized easily) (here it is just gpath, no tuple, sorry for confusion) Returns: tuple: param_tup - if successful returns a tuple of image parameters which are values for SQL columns on else returns None CommandLine: python -m ibeis.algo.preproc.preproc_image --exec-parse_imageinfo Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.preproc.preproc_image import * # NOQA >>> gpath = ('/media/raid/work/lynx/_ibsdb/images/f6c84c6d-55ca-fd02-d0b4-1c7c9c27c894.jpg') >>> param_tup = parse_imageinfo(tup) >>> result = ('param_tup = %s' % (str(param_tup),)) >>> print(result) """ # Parse arguments from tuple #print('[ginfo] gpath=%r' % gpath) # Try to open the image with warnings.catch_warnings(record=True) as w: try: pil_img = Image.open(gpath, 'r') # Open PIL Image except IOError as ex: print('[preproc] IOError: %s' % (str(ex),)) return None if len(w) > 0: for warn in w: warnings.showwarning(warn.message, warn.category, warn.filename, warn.lineno, warn.file, warn.line) #warnstr = warnings.formatwarning #print(warnstr) print('Warnings issued by %r' % (gpath,)) # Parse out the data width, height = pil_img.size # Read width, height time, lat, lon = parse_exif(pil_img) # Read exif tags # We cannot use pixel data as libjpeg is not determenistic (even for reads!) image_uuid = ut.get_file_uuid(gpath) # Read file ]-hash-> guid = gid #orig_gpath = gpath orig_gname = basename(gpath) ext = get_standard_ext(gpath) notes = '' # Build parameters tuple param_tup = ( image_uuid, gpath, gpath, orig_gname, #orig_gpath, ext, width, height, time, lat, lon, notes ) #print('[ginfo] %r %r' % (image_uuid, orig_gname)) return param_tup
def _read_config(rcfile): """Read configuration information from a file""" global config defaults = { 'enable_deprecation_warning': str(True), 'keepalive': str(True), 'ncpus': str(multiprocessing.cpu_count()), 'qindenton_url': 'http://virbo.org/ftp/QinDenton/hour/merged/latest/WGhour-latest.d.zip', 'qd_daily_url': 'https://rbsp-ect.newmexicoconsortium.org/data_pub/QinDenton/', 'omni2_url': 'https://spdf.gsfc.nasa.gov/pub/data/omni/omni_cdaweb/hourly/', 'leapsec_url': 'https://oceandata.sci.gsfc.nasa.gov/Ancillary/LUTs/modis/leapsec.dat', 'psddata_url': 'http://spacepy.lanl.gov/repository/psd_dat.sqlite', 'support_notice': str(True), 'apply_plot_styles': str(True), } #Functions to cast a config value; if not specified, value is a string str2bool = lambda x: x.lower() in ('1', 'yes', 'true', 'on') caster = { 'enable_deprecation_warning': str2bool, 'keepalive': str2bool, 'ncpus': int, 'support_notice': str2bool, 'apply_plot_styles': str2bool, } #SafeConfigParser deprecated in 3.2. And this is hideous, but... if hasattr(ConfigParser, 'SafeConfigParser'): cp_class = ConfigParser.SafeConfigParser with warnings.catch_warnings(record=True) as w: warnings.filterwarnings( 'always', 'The SafeConfigParser class has been renamed.*', DeprecationWarning, '^spacepy$') #configparser lies about source of warnings ConfigParser.SafeConfigParser() for this_w in w: if isinstance(this_w.message, DeprecationWarning): cp_class = ConfigParser.ConfigParser else: warnings.showwarning(this_w.message, this_w.category, this_w.filename, this_w.lineno, this_w.file, this_w.line) else: cp_class = ConfigParser.ConfigParser cp = cp_class(defaults) try: successful = cp.read([rcfile]) except ConfigParser.Error: successful = [] if successful: #New file structure config = dict(cp.items('spacepy')) else: #old file structure, wipe it out cp = cp_class() cp.add_section('spacepy') with open(rcfile, 'w') as cf: cp.write(cf) for k in defaults: if not k in config: config[k] = defaults[k] _write_defaults(rcfile, defaults) for k in caster: config[k] = caster[k](config[k])
import time import pickle import asyncio import warnings import aiohttp.web try: import aioredis except ImportError: warnings.showwarning("aioredis library not found. Redis cache client not available") class BaseStaticBuilder(object): def __init__(self, expiration: int = 300, base_path: str = None): self.base_path = base_path self.expiration = expiration async def write(self, content: str, path: str) -> object: raise NotImplementedError() def make_key(self, request: aiohttp.web.Request) -> str: key = "{method}{host}{path}{postdata}{ctype}".format(method=request.method, path=request.path_qs, host=request.host, postdata="".join(request.post()), ctype=request.content_type) return key
# Double-Ellipsoid forces import tensorflow as tf from numpy import * import json, os, time from warnings import showwarning try: from matplotlib.pyplot import * #from matplotlib.mlab import movavg except ImportError: showwarning("Can't import matplotlib.pyplot!", ImportWarning, 'net3d_v9_4_6', 8) PAIRS = 100 POW = 2 POWn = 2 POW_SN = 2 # power extra factors of r_i+r_j in node repulsion #pairs = array([i for i in itertools.combinations(arange(400),2)]) def expand(pts, n): s = pts.shape x = linspace(0, s[0] - 1, n) xp = arange(s[0]) return array([interp(x, xp, i) for i in pts.T]).T def dvecz(x, y): #assert (len(x.shape)==2) and (x.shape[1] == y.shape[1]) xT_ = tf.constant(x[:, :, newaxis].T, dtype=tf.float32)
def factory(input_file): """ factory creates a lattice from input-file """ #-------- def read_elements(in_data): element_list = in_data['ELEMENTS'] elements = liofd2d(element_list) # add {'ID':key} to attribute list IDs = elements.keys() for ID in IDs: attList = elements[ID] attList.append({'ID':ID}) return elements # -------- def read_flags(in_data): """returns a dict of flags""" flags_list = in_data['FLAGS'] flags = liofd2d(flags_list) if flags_list != None else {} if 'accON' in flags: if flags['accON']: FLAGS['dWf'] = 1. SUMMARY['accON'] = True else: FLAGS['dWf'] = 0. SUMMARY['accON'] = False if 'periodic' in flags: FLAGS['periodic'] = flags['periodic'] if 'egf' in flags: FLAGS['egf'] = flags['egf'] if 'sigma' in flags: FLAGS['sigma'] = flags['sigma'] if 'KVout' in flags: FLAGS['KVout'] = flags['KVout'] if 'verbose' in flags: FLAGS['verbose'] = flags['verbose'] if 'express' in flags: FLAGS['express'] = flags['express'] if 'useaper' in flags: FLAGS['useaper'] = flags['useaper'] if 'bucket' in flags: FLAGS['bucket'] = flags['bucket'] if 'csTrak' in flags: FLAGS['csTrak'] = flags['csTrak'] if 'pspace' in flags: FLAGS['pspace'] = flags['pspace'] return flags # -------- def read_sections(in_data): """ returns a list of section names """ sec_list = [] use_sections = True try: ## can fail because sections are not mandatory sec_list = in_data['SECTIONS'] sec_list = flatten(sec_list) except: use_sections = False PARAMS['sections'] = sec_list FLAGS['sections'] = use_sections return sec_list # -------- def read_parameters(in_data): """ returns a dict of parameters """ parameter_list = in_data['PARAMETERS'] parameters = liofd2d(parameter_list) if 'Tkin' in parameters: PARAMS['injection_energy'] = parameters['Tkin'] if 'phi_sync' in parameters: PARAMS['phisoll'] = parameters['phi_sync'] if 'gap' in parameters: PARAMS['gap'] = parameters['gap'] if 'cav_len' in parameters: PARAMS['cavity_laenge'] = parameters['cav_len'] if 'ql' in parameters: PARAMS['ql'] = parameters['ql'] if 'windings' in parameters: PARAMS['nbwindgs'] = parameters['windings'] if 'nbsigma' in parameters: PARAMS['nbsigma'] = parameters['nbsigma'] if 'aperture' in parameters: PARAMS['aperture'] = parameters['aperture'] if 'emitx_i' in parameters: PARAMS['emitx_i'] = parameters['emitx_i'] if 'emity_i' in parameters: PARAMS['emity_i'] = parameters['emity_i'] if 'betax_i' in parameters: PARAMS['betax_i'] = parameters['betax_i'] if 'betay_i' in parameters: PARAMS['betay_i'] = parameters['betay_i'] if 'alfax_i' in parameters: PARAMS['alfax_i'] = parameters['alfax_i'] if 'alfay_i' in parameters: PARAMS['alfay_i'] = parameters['alfay_i'] if 'mapping' in parameters: PARAMS['mapping'] = parameters['mapping'] if 'DT2T' in parameters: PARAMS['DT2T'] = parameters['DT2T'] if 'lattvers' in parameters: PARAMS['lattice_version'] = parameters['lattvers'] return parameters #-------- def get_flattened_lattice_list(in_data): """ read_and_flatten lattice from (in_data).""" lattice_list = in_data['LATTICE'] lattice_list = flatten(lattice_list) N = lattice_list[0] # Duplikator plist = lattice_list[1:] qlist = plist.copy() for i in range(N-1): qlist += plist lattice_list=qlist return lattice_list #-------- def make_lattice(latticeList,in_data): """ instanciate all elements from flattened node list""" lattice = Lattice() DEBUG_OFF('make_lattice for sollteilchen\n'+PARAMS['sollteilchen'].string()) elements = read_elements(in_data) for ID in lattice_list: element = elements[ID] element = liofd2d(element) elementClass = element['type'] elmItem = (elementClass,element) # !!INSTANCIATE!! (label,instance) = instanciate_element(elmItem) section = instance.section if FLAGS['sections'] else '*' DEBUG_MODULE('instance {} {} {}'.format(label,instance,section)) # add element instance to lattice if isinstance(instance,ELM._Node): lattice.add_element(instance) # elif isinstance(instance,Lattice): # lattice.concat(instance) # concatenate partial with lattice return lattice # the complete lattice ## factory body -------- SUMMARY['input file'] = PARAMS['input_file'] = input_file with open(input_file,'r') as fileobject: try: in_data = yaml.load(fileobject) except Exception as ex: warnings.showwarning( 'InputError: {} - STOP'.format(str(ex)), UserWarning, 'lattice_generator.py', 'factory()', ) sys.exit(1) fileobject.close() read_flags(in_data) read_sections(in_data) read_parameters(in_data) DEBUG_MODULE('PARAMS after read_parameters()',PARAMS) # lattice_list is a flat list of node IDs lattice_list = get_flattened_lattice_list(in_data) DEBUG_MODULE('latticeList in factory()',lattice_list) # __call__ sollteilchen energy PARAMS['sollteilchen'](tkin=PARAMS['injection_energy']) lattice = make_lattice(lattice_list,in_data) # DEBUG_MODULE('lattice_generator >>\n',lattice.string()) SUMMARY['lattice length [m]'] = PARAMS['lattice_length'] = lattice.length DEBUG_OFF('SUMMARY in factory()',SUMMARY) # end of factory(...) return lattice