def get_n_jobs(logger=None): def _from_hardware(): import psutil return psutil.cpu_count(logical=False) def _from_env(var): import os e = os.getenv(var, None) if e: try: return int(e) except ValueError as ve: if logger is not None: logger.warning("could not parse env variable '{var}'." " Value='{val}'. Error={err}." .format(err=ve, val=e, var=var)) return None slurm_njobs = _from_env('SLURM_CPUS_ON_NODE') # Number of CPUS on the allocated SLURM node. pyemma_njobs = _from_env('PYEMMA_NJOBS') if slurm_njobs and pyemma_njobs: import warning warning.warn('two settings for n_jobs from environment: PYEMMA_NJOBS and SLURM_CPUS_ON_NODE. ' 'Respecting the SLURM setting to avoid overprovisioning resources.') # slurm njobs will be used preferably. val = slurm_njobs or pyemma_njobs if not val: val = _from_hardware() if logger is not None: logger.debug('determined n_jobs: %s', val) return val
def geturl(url, cache=True): """Retrives a URL, optionally caching the response into redis """ key = "strava_cache:%s" % url if have_redis: # If redis is available, use it to cache API return values r = redis.Redis() if cache: cv = r.get(key) if cv is not None: return json.loads(cv) else: print "Caching miss for %r" % key else: import warning warning.warn("redis not found, no caching will be used") start = time.time() req = urllib.urlopen(url) end = time.time() content = req.read() print "Request took %.02f seconds" % (end - start) if req.getcode() >= 400: raise StravaError("%r responded with status was %s" % (url, resp['status'])) if have_redis and cache: r.set(key, content) return json.loads(content)
def geturl(url, cache = True): """Retrives a URL, optionally caching the response into redis """ key = "strava_cache:%s" % url if have_redis: # If redis is available, use it to cache API return values r = redis.Redis() if cache: cv = r.get(key) if cv is not None: return json.loads(cv) else: print "Caching miss for %r" % key else: import warning warning.warn("redis not found, no caching will be used") start = time.time() req = urllib.urlopen(url) end = time.time() content = req.read() print "Request took %.02f seconds" % (end-start) if req.getcode() >= 400: raise StravaError("%r responded with status was %s" % (url, resp['status'])) if have_redis and cache: r.set(key, content) return json.loads(content)
def __getattr__(self, key): if self[key]: return self[key] else: warning.warn( "The {} setting does not appear in config.cfg. Setting the self.config value to None." .format(key), RuntimeWarning, 1) return None
def loaddir(directory, clear=False): if clear: Article.objects.all().delete() queue = os.listdir(directory) urls = set() while queue: artfile = queue.pop() if artfile[0] == '.': continue if artfile in ('template', 'template.rst', 'template.txt'): continue artfile = path.join(directory, artfile) if path.isdir(artfile): queue.extend([ path.join(artfile,f) for f in os.listdir(artfile) ]) continue input = file(artfile) header = {} linenr = 0 while True: line = input.readline().strip() linenr += 1 if line in ('---', '..'): break if line.find(':') < 0: raise IOError('gitcms.pages.load: In file %s, line %s. No \':\' found!' % (artfile, linenr)) tag,value = line.split(':',1) value = value.strip() header[tag] = value blank = input.readline() linenr += 1 if blank.strip(): raise IOError, 'Blank line expected while processing file (%s:%s)\nGot "%s"' % (artfile, linenr,blank) content = input.read() content = preprocess_rst_content(content) url = header['url'] if url and url[-1] == '/': import warning warning.warn('''\ gitcms.pages.loaddir: Removing / at end of url (%s) (Both versions will work for accessing the page.) ''' % url) url = url[:-1] if url in urls: raise IOError('gitcms.pages.loaddir: repeated URL detected (%s)' % url) taglist = [] for c in header.get('categories','').split(): taglist.append(tag_for(c)) # if we got so far, implies that our article is safe to store. urls.add(url) A = Article(title=header['title'], url=url, author=header.get('author', ''), content=content) A.save() for c in taglist: A.tags.add(c)
def get_prev(self, obj): from warning import warn warn("Couldn't import a pure-python OrderedDict so this will be a O(n) operation") from itertools import cycle l1, l2 = cycle(self._objects.values()), cycle(self._objects.values()) next(l2) for a, b in zip(l1, l2): if obj == b: return a
def unit_length(rad: Optional[float] = None, deg: Optional[float] = None) -> float: if rad and deg and rad != deg2rad(deg): warn("you gave two inconsistent values") elif not rad and not deg: warn("please give a value") elif deg: rad = deg2rad(deg) a = 0.246e-9 # graphene's lattice constant return a / 2 / sin(rad / 2)
def convert_timestamp(x): ''' This fucntion is to convert a pandas timestamp to actual timestamp and ignore the error. It is ''' warning.warn('convert_timestamp is deprecated, please use the pandas \ timestamp directly') try: return x.timestamp() except: pass
def sweep_1d(start: float, stop: float, sweep_up_rate: Optional[float] = 0, sweep_down_rate: Optional[float] = 0, waiting: Optional[float] = 0, time_per_point: Optional[float] = 1, **kw) -> tuple[float]: """ calculate the time necessary to carry out a 1d sweep. Args: start: sweep start point stop: sweep stop point sweep_up_rate: optional, /min sweep_down_rate: optional, /min waiting: optional time_per_point: optional, s Returns: tuple(float): time per sweep (min), number of points, step size """ echo = kw.pop('echo', True) if not sweep_up_rate and not sweep_down_rate: warn("no sweep rate given") sweep_range = abs(stop - start) sweep_rate = min(sweep_up_rate, sweep_down_rate) time_per_sweep = 0 if sweep_up_rate: time_per_sweep += sweep_range / sweep_up_rate if sweep_down_rate: time_per_sweep += sweep_range / sweep_down_rate if waiting: time_per_sweep += waiting time_per_sweep *= 60 num = int(sweep_range / (sweep_rate * time_per_point / 60)) step = sweep_range / num if echo: print('the sweep will take {}min {}s to complete. \n' 'you will record {} points (step size = {}) \n' 'expected end time: {}\n'.format( str(time_per_sweep // 60), str(time_per_sweep % 60), str(num), str(step), datetime.now() + timedelta(seconds=time_per_sweep))) return time_per_sweep, num, step
def area(deltaB: float, unit: str = 'm') -> float: A = h / e / deltaB if unit == 'm' or unit == 'meter': area = A elif unit == 'cm' or unit == 'centimeter': area = A * 10**4 elif unit == 'um' or unit == 'µm' or unit == 'micrometer': area = A * 10**12 elif unit == 'nm' or unit == 'nanometer': area = A * 10**18 else: warn('chose a proper unit: m, cm, um or nm') area = 0 return area
def k_nearest_neighbors(data, predict, k=3): if len(data) >= k: warning.warn("K is set to a value less than the total voting groups!") distances = [] for group in data: for features in data[group]: euclidean_distance = np.linalg.norm( np.array(features) - np.array(predict)) distance.append([euclidean_distance, group]) votes = [i[1] for i in sorted(distances)[:k]] # print(Counter(votes).most_common(1)) vote_result = Counter(votes).most_common(1)[0][0] confidence = Counter(votes).most_common(1)[0][1] / k return vote_result, confidence
def dress_results(self, model): """.. warning :: deprecated""" from warning import warn warn("unnecessary to call this deprecated function")
def logWarning(self, msg, title=None): self.logger.warn(msg) warning = warn(msg) warning.setTitle(title) warning.run()
# -*- coding: utf-8 -*- import logging import itertools try: from meepo.signals import signal except ImportError: from blinker import signal import warning warning.warn( """ You should checkout meepo develop branch to install !!! See: https://github.com/eleme/meepo. """, DeprecationWarning) from meepo.apps.eventsourcing import sqlalchemy_es_pub class EventHook(sqlalchemy_es_pub): def __init__(self, cache_clients, session, tables=None): super(EventHook, self).__init__(session, tables) self.cache_clients = cache_clients self.logger = logging.getLogger(__name__) def add(self, model): tablename = model.__tablename__ self.tables.add(tablename) self.install_cache_signal(tablename)
def cellsim(self, cellindex, return_just_cell=False): ''' main cell simulation and LFP generating procedure ''' cellParameters = self.cellParameters.copy() cellParameters.update(dict(morphology = self.shuffled_morphologies[cellindex], custom_code = [self.shuffled_custom_codes[cellindex]]) ) cell = LFPy.Cell(**cellParameters) cell.set_pos(**self.pop_soma_pos[cellindex]) cell.set_rotation(**self.rotations[cellindex]) if return_just_cell: #with several cells, NEURON can only hold one cell at the time allsecnames = [] allsec = [] for sec in cell.allseclist: allsecnames.append(sec.name()) for i in xrange(sec.nseg): allsec.append(sec.name()) cell.allsecnames = allsecnames cell.allsec = allsec return cell else: #set up synapse t = np.arange(self.gsyn[cellindex].size).astype(float) t *= self.cellParameters['timeres_python'] t += self.cellParameters['tstartms'] gsyn_t = neuron.h.Vector(t) #synapse conductance gsyn = neuron.h.Vector(self.gsyn[cellindex]) #insert mech and play vector for sec in neuron.h.soma: sec.insert('gsyn') for seg in sec: gsyn.play(seg._ref_g_gsyn, gsyn_t) #perform simulation if self.simulationParameters.has_key('to_file'): if self.simulationParameters['to_file']: cell.simulate(dotprodcoeffs=self.electrodecoeffs[cellindex], file_name=os.path.join(self.savefolder, self.default_h5_file) % (cellindex), **self.simulationParameters) else: cell.simulate(dotprodcoeffs=self.electrodecoeffs[cellindex], **self.simulationParameters) cell.LFP = cell.dotprodresults[0] else: cell.simulate(dotprodcoeffs=self.electrodecoeffs[cellindex], **self.simulationParameters) cell.LFP = cell.dotprodresults[0] cell.x = self.paramsMapping['elec_x'] cell.y = self.paramsMapping['elec_y'] cell.z = self.paramsMapping['elec_z'] cell.custom_code = self.shuffled_custom_codes[cellindex][1] cell.electrodecoeff = self.electrodecoeffs[cellindex] #access file object f = h5py.File(os.path.join(self.savefolder, self.default_h5_file) % (cellindex), compression='gzip') if self.simulationParameters.has_key('to_file'): if self.simulationParameters['to_file']: f['LFP'] = f['electrode000'].astype('float32') #save stuff from savelist for attrbt in self.savelist: try: del(f[attrbt]) except: pass try: if attrbt == 'LFP': f[attrbt] = getattr(cell, attrbt).astype('float32') else: f[attrbt] = getattr(cell, attrbt) except: try: f[attrbt] = str(getattr(cell, attrbt)) except: import warning warning.warn('Could not find %s in cell') % attrbt #print some stuff print 'SIZE %i, RANK %i, Cell %i, Min LFP: %.3f, Max LFP: %.3f' % \ (SIZE, RANK, cellindex, f['LFP'].value.min(), f['LFP'].value.max()) f.close() print 'Cell %s saved to file' % cellindex
def sample(self, n_samples=1): """ Get a sample from an analytical cell. Throws an error if the cell is not analytical. Parameters ---------- n_samples : number of samples obtained. (Default value = 1) """ if self.analytical: import warning warning.warn("This is an analytical cell. No need to sample.") elif self.analytical == False: # define the random variables ind_vols_rv = MultivariateLogNormalDiag(self.ind_vols, self.ind_d_vols) ind_concs_p_rv = MultivariateLogNormalDiag(self.ind_concs[0, :], self.ind_d_concs[0, :]) ind_concs_l_rv = MultivariateLogNormalDiag(self.ind_concs[1, :], self.ind_d_concs[1, :]) ind_concs_r_rv = MultivariateLogNormalDiag(self.ind_concs[2, :], self.ind_d_concs[2, :]) # sample independent volumes and concentrations from ind_vols = np.log(ind_vols_rv.sample(n_samples).numpy()) ind_concs_p = np.log(ind_concs_p_rv.sample(n_samples).numpy()) ind_concs_l = np.log(ind_concs_l_rv.sample(n_samples).numpy()) ind_concs_r = np.log(ind_concs_r_rv.sample(n_samples).numpy()) # the cumulative volume is the sum of all the volumes previously vols = np.tril(np.ones( (ind_vols.shape[1], ind_vols.shape[1]))).dot(ind_vols.transpose()).transpose() # use quantity instead q_p = np.tril(np.ones((ind_vols.shape[1], ind_vols.shape[1]))).dot( (ind_vols * ind_concs_p).transpose()).transpose() q_l = np.tril(np.ones((ind_vols.shape[1], ind_vols.shape[1]))).dot( (ind_vols * ind_concs_l).transpose()).transpose() q_r = np.tril(np.ones((ind_vols.shape[1], ind_vols.shape[1]))).dot( (ind_vols * ind_concs_r).transpose()).transpose() # calculate the concentrations from quantity concs_p = np.nan_to_num(np.true_divide(q_p, vols)) concs_l = np.nan_to_num(np.true_divide(q_l, vols)) concs_r = np.nan_to_num(np.true_divide(q_r, vols)) # now take average and give mean and variance self.vols = np.average(vols.transpose(), axis=1).flatten() self.concs = np.concatenate([ np.expand_dims(np.average(concs_p.transpose(), axis=1), axis=0), np.expand_dims(np.average(concs_l.transpose(), axis=1), axis=0), np.expand_dims(np.average(concs_r.transpose(), axis=1), axis=0) ], axis=0) # update volumes self.vols_cov = np.cov(vols.transpose()) self.concs_cov = np.concatenate([ np.expand_dims(np.cov(concs_p.transpose()), axis=0), np.expand_dims(np.cov(concs_l.transpose()), axis=0), np.expand_dims(np.cov(concs_r.transpose()), axis=0) ], axis=0) # mark the object as sampled self.sampled = True
BITNESS = 64 else: MKL_INT = ctypes.c_int32 BITNESS = 32 import sys if sys.platform == 'win32': if BITNESS == 32: libPath = libFile = 'C:\\Program Files (x86)\\IntelSWTools\\compilers_and_libraries_2017\\windows\\redist\\ia32_win\\mkl\\' elif BITNESS == 64: libPath = libFile = 'C:\\Program Files (x86)\\IntelSWTools\\compilers_and_libraries_2017\\windows\\redist\\intel64_win\\mkl\\' libFile = 'mkl_rt.dll' mkl_rt = ctypes.CDLL(libFile) else: import warning warning.warn('Unsupported platform:', sys.platform) class ThreadingLayer: INTEL = 0 SEQUENTIAL = 1 PGI = 2 GNU = 3 TBB = 4 mklSetThreadingLayer = mkl_rt.mkl_set_threading_layer mklSetThreadingLayer.argtypes = [ctypes.c_int32] mklSetThreadingLayer.restype = ctypes.c_int32 mklSetNumThreads = mkl_rt.MKL_Set_Num_Threads
def loaddir(directory, clear=False): if clear: Article.objects.all().delete() queue = os.listdir(directory) urls = set() while queue: artfile = queue.pop() if artfile[0] == '.': continue if artfile in ('template', 'template.rst', 'template.txt'): continue artfile = path.join(directory, artfile) if path.isdir(artfile): queue.extend([path.join(artfile, f) for f in os.listdir(artfile)]) continue input = file(artfile) header = {} linenr = 0 while True: line = input.readline().strip() linenr += 1 if line in ('---', '..'): break if line.find(':') < 0: raise IOError( 'gitcms.pages.load: In file %s, line %s. No \':\' found!' % (artfile, linenr)) tag, value = line.split(':', 1) value = value.strip() header[tag] = value blank = input.readline() linenr += 1 if blank.strip(): raise IOError, 'Blank line expected while processing file (%s:%s)\nGot "%s"' % ( artfile, linenr, blank) content = input.read() content = preprocess_rst_content(content) url = header['url'] if url and url[-1] == '/': import warning warning.warn('''\ gitcms.pages.loaddir: Removing / at end of url (%s) (Both versions will work for accessing the page.) ''' % url) url = url[:-1] if url in urls: raise IOError('gitcms.pages.loaddir: repeated URL detected (%s)' % url) taglist = [] for c in header.get('categories', '').split(): taglist.append(tag_for(c)) # if we got so far, implies that our article is safe to store. urls.add(url) A = Article(title=header['title'], keywords=header.get('keywords', ''), description=header.get('description', ''), url=url, author=header.get('author', ''), content=content) A.save() for c in taglist: A.tags.add(c)
def fit(self, conver=DEFAULT_CONVERGENCE, minit=DEFAULT_MINIT, maxit=DEFAULT_MAXIT, fflag=DEFAULT_FFLAG, maxgerr=DEFAULT_MAXGERR, going_inwards=False): """ Fit an elliptical isophote. Parameters ---------- conver : float, optional The main convergence criterion. Iterations stop when the largest harmonic amplitude becomes smaller (in absolute value) than ``conver`` times the harmonic fit rms. The default is 0.05. minit : int, optional The minimum number of iterations to perform. A minimum of 10 (the default) iterations guarantees that, on average, 2 iterations will be available for fitting each independent parameter (the four harmonic amplitudes and the intensity level). For the first isophote, the minimum number of iterations is 2 * ``minit`` to ensure that, even departing from not-so-good initial values, the algorithm has a better chance to converge to a sensible solution. maxit : int, optional The maximum number of iterations to perform. The default is 50. fflag : float, optional The acceptable fraction of flagged data points in the sample. If the actual fraction of valid data points is smaller than this, the iterations will stop and the current `~photutils.isophote.Isophote` will be returned. Flagged data points are points that either lie outside the image frame, are masked, or were rejected by sigma-clipping. The default is 0.7. maxgerr : float, optional The maximum acceptable relative error in the local radial intensity gradient. This is the main control for preventing ellipses to grow to regions of too low signal-to-noise ratio. It specifies the maximum acceptable relative error in the local radial intensity gradient. `Busko (1996; ASPC 101, 139) <http://adsabs.harvard.edu/abs/1996ASPC..101..139B>`_ showed that the fitting precision relates to that relative error. The usual behavior of the gradient relative error is to increase with semimajor axis, being larger in outer, fainter regions of a galaxy image. In the current implementation, the ``maxgerr`` criterion is triggered only when two consecutive isophotes exceed the value specified by the parameter. This prevents premature stopping caused by contamination such as stars and HII regions. A number of actions may happen when the gradient error exceeds ``maxgerr`` (or becomes non-significant and is set to `None`). If the maximum semimajor axis specified by ``maxsma`` is set to `None`, semimajor axis growth is stopped and the algorithm proceeds inwards to the galaxy center. If ``maxsma`` is set to some finite value, and this value is larger than the current semimajor axis length, the algorithm enters non-iterative mode and proceeds outwards until reaching ``maxsma``. The default is 0.5. going_inwards : bool, optional Parameter to define the sense of SMA growth. When fitting just one isophote, this parameter is used only by the code that defines the details of how elliptical arc segments ("sectors") are extracted from the image, when using area extraction modes (see the ``integrmode`` parameter in the `~photutils.isophote.EllipseSample` class). The default is `False`. Returns ------- result : `~photutils.isophote.Isophote` instance The fitted isophote, which also contains fit status information. Examples -------- >>> from photutils.isophote import EllipseSample, EllipseFitter >>> sample = EllipseSample(data, sma=10.) >>> fitter = EllipseFitter(sample) >>> isophote = fitter.fit() """ sample = self._sample # this flag signals that limiting gradient error (`maxgerr`) # wasn't exceeded yet. lexceed = False # here we keep track of the sample that caused the minimum harmonic # amplitude(in absolute value). This will eventually be used to # build the resulting Isophote in cases where iterations run to # the maximum allowed (maxit), or the maximum number of flagged # data points (fflag) is reached. minimum_amplitude_value = np.Inf minimum_amplitude_sample = None for iter in range(maxit): # Force the sample to compute its gradient and associated values. sample.update() # The extract() method returns sampled values as a 2-d numpy array # with the following structure: # values[0] = 1-d array with angles # values[1] = 1-d array with radii # values[2] = 1-d array with intensity values = sample.extract() # Fit harmonic coefficients. Failure in fitting is # a fatal error; terminate immediately with sample # marked as invalid. try: coeffs = fit_first_and_second_harmonics(values[0], values[2]) except Exception as e: # log.info(e) warning.warn( "Improper input: N=5 must not exceed M=0 [photutils.isophote.fitter]<Paste>" ) return Isophote(sample, iter + 1, False, 3) coeffs = coeffs[0] # largest harmonic in absolute value drives the correction. largest_harmonic_index = np.argmax(np.abs(coeffs[1:])) largest_harmonic = coeffs[1:][largest_harmonic_index] # see if the amplitude decreased; if yes, keep the # corresponding sample for eventual later use. if abs(largest_harmonic) < minimum_amplitude_value: minimum_amplitude_value = abs(largest_harmonic) minimum_amplitude_sample = sample # check if converged model = first_and_second_harmonic_function(values[0], coeffs) residual = values[2] - model if ((conver * sample.sector_area * np.std(residual)) > np.abs(largest_harmonic)): # Got a valid solution. But before returning, ensure # that a minimum of iterations has run. if iter >= minit - 1: sample.update() return Isophote(sample, iter + 1, True, 0) # it may not have converged yet, but the sample contains too # many invalid data points: return. if sample.actual_points < (sample.total_points * fflag): # when too many data points were flagged, return the # best fit sample instead of the current one. minimum_amplitude_sample.update() return Isophote(minimum_amplitude_sample, iter + 1, True, 1) # pick appropriate corrector code. corrector = _correctors[largest_harmonic_index] # generate *NEW* EllipseSample instance with corrected # parameter. Note that this instance is still devoid of other # information besides its geometry. It needs to be explicitly # updated for computations to proceed. We have to build a new # EllipseSample instance every time because of the lazy # extraction process used by EllipseSample code. To minimize # the number of calls to the area integrators, we pay a # (hopefully smaller) price here, by having multiple calls to # the EllipseSample constructor. sample = corrector.correct(sample, largest_harmonic) sample.update() # see if any abnormal (or unusual) conditions warrant # the change to non-iterative mode, or go-inwards mode. proceed, lexceed = self._check_conditions(sample, maxgerr, going_inwards, lexceed) if not proceed: sample.update() return Isophote(sample, iter + 1, True, -1) # Got to the maximum number of iterations. Return with # code 2, and handle it as a valid isophote. Use the # best fit sample instead of the current one. minimum_amplitude_sample.update() return Isophote(minimum_amplitude_sample, maxit, True, 2)
from tierpsy import AUX_FILES_DIR import os import warning DFLT_MODEL_FILTER_WORMS = os.path.join(AUX_FILES_DIR, 'model_isworm_20170407_184845.h5') if not os.path.exists(DFLT_MODEL_FILTER_WORMS): warning.warn( 'The default model file to filter spurious particles was not found. This step will not be done.' ) DFLT_MODEL_FILTER_WORMS = '' DFLT_MODEL_FOOD_CONTOUR = os.path.join(AUX_FILES_DIR, 'unet_RMSprop-5-04999-0.3997.h5') if not os.path.exists(DFLT_MODEL_FOOD_CONTOUR): warning.warn( 'The default model to obtain the food contour was not found. I would try a less accurate algorithm to calculate the contour.' ) DFLT_MODEL_FOOD_CONTOUR = ''