def to_dtype_dformat(data): """Transforms the given data parameter (string/ or sequence of string or sequence of sequence of string/:obj:`DataType`) into a tuple of two elements (:obj:`DataType`, :obj:`DataFormat`). :param data: the data information to be transformed :type data: str or seq<str> or seq<seq<str>> :return: a tuple <:obj:`DataType`, :obj:`DataFormat`> for the given data :rtype: tuple<:obj:`DataType`, :obj:`DataFormat`> """ import operator dtype, dformat = data, DataFormat.Scalar if isinstance(data, (str, unicode)): dtype, dformat = from_dtype_str(data) elif operator.isSequenceType(data): dformat = DataFormat.OneD dtype = data[0] if type(dtype) == str: dtype, dformat2 = from_dtype_str(dtype) if dformat2 == DataFormat.OneD: dformat = DataFormat.TwoD elif operator.isSequenceType(dtype): dformat = DataFormat.TwoD dtype = dtype[0] if type(dtype) == str: dtype, _ = from_dtype_str(dtype) dtype = DTYPE_MAP.get(dtype, DataType.Invalid) return dtype, dformat
def _fitdata_fired(self,new): from operator import isSequenceType,isMappingType if self.model is not None: if isSequenceType(new) and len(new) == 2: kw={'x':new[0],'y':new[1]} elif isSequenceType(new) and len(new) == 3: kw={'x':new[0],'y':new[1],'weights':new[2]} elif isMappingType(new): kw = dict(new) #add any missing pieces for i,k in enumerate(('x','y','weights')): if k not in new: if self.model.fiteddata: new[k] = self.model.fiteddata[i] else: raise ValueError('no pre-fitted data available') elif new is True: if self.model.fiteddata: fd = self.model.fiteddata kw= {'x':fd[0],'y':fd[1],'weights':fd[2]} else: raise ValueError('No data to fit') else: raise ValueError('unusable fitdata event input') if 'fixedpars' not in kw: kw['fixedpars'] = [tn.replace('fixfit_','') for tn in self.traits() if tn.startswith('fixfit_') if getattr(self,tn)] try: self.model.fitData(**kw) self.updatetraitparams = True self.lastfitfailure = None except Exception,e: self.lastfitfailure = e
def __init__(self, call, attribs=None, argfilter=None, expand_args=True, copy_attribs=True): """Initialize :Parameters: expand_args : bool Either to expand the output of looper into a list of arguments for call attribs : list of basestr What attributes of call to store and return later on? copy_attribs : bool Force copying values of attributes """ self.call = call """Call which gets called in the harvester.""" if attribs is None: attribs = [] if not isSequenceType(attribs): raise ValueError, "'attribs' have to specified as a sequence." if not (argfilter is None or isSequenceType(argfilter)): raise ValueError, "'argfilter' have to be a sequence or None." # now give it to me... self.argfilter = argfilter self.expand_args = expand_args self.copy_attribs = copy_attribs self.attribs = attribs
def test_isSequenceType(self): self.failUnlessRaises(TypeError, operator.isSequenceType) self.failUnless(operator.isSequenceType(dir())) self.failUnless(operator.isSequenceType(())) self.failUnless(operator.isSequenceType(xrange(10))) self.failUnless(operator.isSequenceType('yeahbuddy')) self.failIf(operator.isSequenceType(3))
def to_dtype_dformat(data): """Transforms the given data parameter (string/ or sequence of string or sequence of sequence of string/:obj:`DataType`) into a tuple of two elements (:obj:`DataType`, :obj:`DataFormat`). :param data: the data information to be transformed :type data: :obj:`str` or seq<str> or seq<seq<str>> :return: a tuple <:obj:`DataType`, :obj:`DataFormat`> for the given data :rtype: tuple<:obj:`DataType`, :obj:`DataFormat`> """ import operator dtype, dformat = data, DataFormat.Scalar if isinstance(data, (str, unicode)): dtype, dformat = from_dtype_str(data) elif operator.isSequenceType(data): dformat = DataFormat.OneD dtype = data[0] if isinstance(dtype, str): dtype, dformat2 = from_dtype_str(dtype) if dformat2 == DataFormat.OneD: dformat = DataFormat.TwoD elif operator.isSequenceType(dtype): dformat = DataFormat.TwoD dtype = dtype[0] if isinstance(dtype, str): dtype, _ = from_dtype_str(dtype) dtype = DTYPE_MAP.get(dtype, DataType.Invalid) return dtype, dformat
def __setitem__(self, index, W): """ Speed-up if x is a sparse matrix. TODO: checks (first remove the data). TODO: once we've got this working in all cases, should we submit to scipy? """ try: i, j = index except (ValueError, TypeError): raise IndexError, "invalid index" if isinstance(i, slice) and isinstance(j, slice) and\ (i.step is None) and (j.step is None) and\ (isinstance(W, sparse.lil_matrix) or isinstance(W, numpy.ndarray)): rows = self.rows[i] datas = self.data[i] j0 = j.start if isinstance(W, sparse.lil_matrix): for row, data, rowW, dataW in izip(rows, datas, W.rows, W.data): jj = bisect.bisect(row, j0) # Find the insertion point row[jj:jj] = [j0 + k for k in rowW] data[jj:jj] = dataW elif isinstance(W, ndarray): nq = W.shape[1] for row, data, rowW in izip(rows, datas, W): jj = bisect.bisect(row, j0) # Find the insertion point row[jj:jj] = range(j0, j0 + nq) data[jj:jj] = rowW elif isinstance(i, int) and isinstance(j, (list, tuple, numpy.ndarray)): if len(j) == 0: return row = dict(izip(self.rows[i], self.data[i])) try: row.update(dict(izip(j, W))) except TypeError: row.update(dict(izip(j, itertools.repeat(W)))) items = row.items() items.sort() row, data = izip(*items) self.rows[i] = list(row) self.data[i] = list(data) elif isinstance(i, slice) and isinstance(j, int) and isSequenceType(W): # This corrects a bug in scipy sparse matrix as of version 0.7.0, but # it is not efficient! for w, k in izip(W, xrange(*i.indices(self.shape[0]))): sparse.lil_matrix.__setitem__(self, (k, j), w) elif isinstance(i, int) and isinstance( j, slice) and (isNumberType(W) and not isSequenceType(W)): # this fixes a bug in scipy 0.7.1 sparse.lil_matrix.__setitem__( self, index, [W] * len(xrange(*j.indices(self.shape[1])))) elif isinstance(i, slice) and isinstance(j, slice) and isNumberType(W): n = len(xrange(*i.indices(self.shape[0]))) m = len(xrange(*j.indices(self.shape[1]))) sparse.lil_matrix.__setitem__(self, index, W * numpy.ones((n, m))) else: sparse.lil_matrix.__setitem__(self, index, W)
def setup_analog_scan(self, channels=(0, 1, 2, 3), sweeps=-1, rate=100, gains=GAIN2_DIFF, exttrig=0): if self.scanning: raise MeasurementComputingError, "cannot start new scan without stopping old one" if not isSequenceType(channels): channels = (channels, ) if not isSequenceType(gains): gains = (gains, ) * len(channels) if len(gains) != len(channels): raise MeasurementComputingError, \ "gain list not compatible with channel list" + str(channels) + ":" + str(gains) self.setup_gain_list(channels, gains) timerPre, timerVal, setupTime, actRate = self.compute_timer_vals( rate * len(channels)) self.actRate = actRate if sweeps <= 0: # continuous scan ! blocking = self.DATA_SAMPLE_SIZE // len(channels) if len(channels) * blocking != self.DATA_SAMPLE_SIZE: raise MeasurementComputingError, \ "continuous scan channel count must be submultiple of %d" % self.DATA_SAMPLE_SIZE tCount = self.DATA_SAMPLE_SIZE # each block is filled in continuous mode scanmode = self.AD_CONT_MODE elif sweeps == 1: # use AD_SINGLEEXEC mode for single sweep tCount = len(channels) scanmode = self.AD_SINGLE_MODE else: # round block count up to next block above requested samples blocks = (len(channels) * sweeps + self.DATA_SAMPLE_SIZE - 1) // self.DATA_SAMPLE_SIZE tCount = blocks * self.DATA_SAMPLE_SIZE if tCount > self.MAX_STORED_SAMPLES: raise MeasurementComputingError, \ "burst scan sample count must be < %d" % self.MAX_STORED_SAMPLES scanmode = self.AD_BURST_MODE self.burst_scan_count = tCount self.burst_scan_blocks = blocks tChigh = tCount >> 8 tClow = tCount & 255 self.write((self.CBAINSCAN, tClow, tChigh, timerVal + setupTime, timerPre, scanmode | exttrig)) self.continuous_scan_packet_index = 1 self.got_last_packet = 1 self.last_packet_time = 0.0 # long ago! self.packet_dt = self.DATA_SAMPLE_SIZE / actRate
def merge_undo ( self, undo_item ): """ Merges two undo items if possible. """ # Undo items are potentially mergeable only if they are of the same # class and refer to the same object trait, so check that first: if (isinstance( undo_item, self.__class__ ) and (self.object is undo_item.object) and (self.name == undo_item.name)): v1 = self.new_value v2 = undo_item.new_value t1 = type( v1 ) if t1 is type( v2 ): if t1 is str: # Merge two undo items if they have new values which are # strings which only differ by one character (corresponding # to a single character insertion, deletion or replacement # operation in a text editor): n1 = len( v1 ) n2 = len( v2 ) n = min( n1, n2 ) i = 0 while (i < n) and (v1[i] == v2[i]): i += 1 if v1[i + (n2 <= n1):] == v2[i + (n2 >= n1):]: self.new_value = v2 return True elif isSequenceType( v1 ): # Merge sequence types only if a single element has changed # from the 'original' value, and the element type is a # simple Python type: v1 = self.old_value if isSequenceType( v1 ): # Note: wxColour says it's a sequence type, but it # doesn't support 'len', so we handle the exception # just in case other classes have similar behavior: try: if len( v1 ) == len( v2 ): diffs = 0 for i, item in enumerate( v1 ): titem = type( item ) item2 = v2[i] if ((titem not in SimpleTypes) or (titem is not type( item2 )) or (item != item2)): diffs += 1 if diffs >= 2: return False self.new_value = v2 return True except: pass elif t1 in NumericTypes: # Always merge simple numeric trait changes: self.new_value = v2 return True return False
def merge_undo(self, undo_item): """ Merges two undo items if possible. """ # Undo items are potentially mergeable only if they are of the same # class and refer to the same object trait, so check that first: if ( isinstance(undo_item, self.__class__) and (self.object is undo_item.object) and (self.name == undo_item.name) ): v1 = self.new_value v2 = undo_item.new_value t1 = type(v1) if t1 is type(v2): if t1 is str: # Merge two undo items if they have new values which are # strings which only differ by one character (corresponding # to a single character insertion, deletion or replacement # operation in a text editor): n1 = len(v1) n2 = len(v2) n = min(n1, n2) i = 0 while (i < n) and (v1[i] == v2[i]): i += 1 if v1[i + (n2 <= n1) :] == v2[i + (n2 >= n1) :]: self.new_value = v2 return True elif isSequenceType(v1): # Merge sequence types only if a single element has changed # from the 'original' value, and the element type is a # simple Python type: v1 = self.old_value if isSequenceType(v1): # Note: wxColour says it's a sequence type, but it # doesn't support 'len', so we handle the exception # just in case other classes have similar behavior: try: if len(v1) == len(v2): diffs = 0 for i, item in enumerate(v1): titem = type(item) item2 = v2[i] if (titem not in SimpleTypes) or (titem is not type(item2)) or (item != item2): diffs += 1 if diffs >= 2: return False self.new_value = v2 return True except: pass elif t1 in NumericTypes: # Always merge simple numeric trait changes: self.new_value = v2 return True return False
def _flatten(seq): from operator import isSequenceType if not isSequenceType(seq): return seq while isSequenceType(seq): if type(item) in (TupleType, ListType): res.extend(flatten(item)) else: res.append(item) return res
def test_01_RequestCACerts(self): cacert = self.proxy.RetrieveCACertificates() self.failUnless(operator.isSequenceType(cacert), "RetrieveCACertificate returns a non-sequence") self.failUnless( len(cacert) == 2, "RetrieveCACertificate returns a tuple of len != 2") self.failUnless( type(cacert[1]) != str and operator.isSequenceType(cacert[1]), "cacert[1] is not a sequence") self.assert_( cacert[0], "RetrieveCACertificate returned failure: " + str(cacert[1])) certList = cacert[1] # # It should be a list containing a single pair (cert, policy) # self.assert_( len(certList) == 1, "Retrieved cert list is not of length 1") self.assert_( len(certList[0]) == 2, "Retrieved cert list does not contain a single pair") cert, policy = certList[0] self.assert_(re.search("access_id_CA", policy), "Policy string does not appear to be valid") self.assert_(re.search("BEGIN CERTIFICATE", cert), "Certificate does not appear to be valid") tmp = tempfile.mktemp() fh = open(tmp, "w") fh.write(cert) fh.close() fh = os.popen("openssl x509 -noout -subject -in %s" % (tmp), "r") subj = fh.read() fh.close() m = re.search(r"^subject=\s+(.*)", subj) self.assert_(m is not None, "Did not match subject") global issuer issuer = m.group(1) os.unlink(tmp)
def setup_analog_scan( self, channels=(0, 1, 2, 3), sweeps=-1, rate=100, gains=GAIN2_DIFF, exttrig=0): if self.scanning: raise MeasurementComputingError, "cannot start new scan without stopping old one" if not isSequenceType(channels): channels = (channels,) if not isSequenceType(gains): gains = (gains,) * len(channels) if len(gains) != len(channels): raise MeasurementComputingError, \ "gain list not compatible with channel list" + str(channels) + ":" + str(gains) self.setup_gain_list(channels, gains) timerPre, timerVal, setupTime, actRate = self.compute_timer_vals( rate * len(channels)) self.actRate = actRate if sweeps <= 0: # continuous scan ! blocking = self.DATA_SAMPLE_SIZE // len(channels) if len(channels) * blocking != self.DATA_SAMPLE_SIZE: raise MeasurementComputingError, \ "continuous scan channel count must be submultiple of %d" % self.DATA_SAMPLE_SIZE tCount = self.DATA_SAMPLE_SIZE # each block is filled in continuous mode scanmode = self.AD_CONT_MODE elif sweeps == 1: # use AD_SINGLEEXEC mode for single sweep tCount = len(channels) scanmode = self.AD_SINGLE_MODE else: # round block count up to next block above requested samples blocks = ( len(channels) * sweeps + self.DATA_SAMPLE_SIZE - 1) // self.DATA_SAMPLE_SIZE tCount = blocks * self.DATA_SAMPLE_SIZE if tCount > self.MAX_STORED_SAMPLES: raise MeasurementComputingError, \ "burst scan sample count must be < %d" % self.MAX_STORED_SAMPLES scanmode = self.AD_BURST_MODE self.burst_scan_count = tCount self.burst_scan_blocks = blocks tChigh = tCount >> 8 tClow = tCount & 255 self.write( (self.CBAINSCAN, tClow, tChigh, timerVal + setupTime, timerPre, scanmode | exttrig)) self.continuous_scan_packet_index = 1 self.got_last_packet = 1 self.last_packet_time = 0.0 # long ago! self.packet_dt = self.DATA_SAMPLE_SIZE / actRate
def __init__(self, values, shadow_values): if not isSequenceType(values) or not isSequenceType(shadow_values): raise TypeError("values and shadow_values must be sequences.") if len(values) != len(shadow_values): raise ValueError( "values and shadow_values must have the same length.") self.values = values # Store always a copy: values and shadow_values may be identical, # and if this is the case, the two reverse() calls in the method # reverse() below will cancel each other. self.shadow_values = shadow_values[:]
def test_isXxxType_more(self): import operator assert not operator.isSequenceType(list) assert not operator.isSequenceType(dict) assert not operator.isSequenceType({}) assert not operator.isMappingType(list) assert not operator.isMappingType(dict) assert not operator.isMappingType([]) assert not operator.isMappingType(()) assert not operator.isNumberType(int) assert not operator.isNumberType(float)
def server_init(args=None, groups=None): """ Initialize embedded server. If this client is not linked against the embedded server library, this function does nothing. args -- sequence of command-line arguments groups -- sequence of groups to use in defaults files """ global server_init_done if server_init_done: raise ProgrammingError('already initialized') args_count = 0 if args is not None: if not operator.isSequenceType(args): raise TypeError('args must be a sequence') try: args_count = len(args) except Exception: raise TypeError('args could not be sized') for arg in args: if not isinstance(arg, basestring): raise TypeError('args must contain strings') args_array = (ctypes.c_char_p * args_count)(*args) if groups is not None: if not operator.isSequenceType(groups): raise TypeError('groups must be a sequence') try: len(groups) except Exception: raise TypeError('groups could not be sized') for group in groups: if not isinstance(group, basestring): raise TypeError('groups must contain strings') # create a null-terminated list (one longer than we need) groups_array = (ctypes.c_char_p * (len(groups)+1))(*groups) res = _mysql_api.mysql_server_init(args_count, args_array, groups) if res: return do_exception(None) server_init_done = True
def test_01_RequestCACerts(self): cacert = self.proxy.RetrieveCACertificates() self.failUnless(operator.isSequenceType(cacert), "RetrieveCACertificate returns a non-sequence") self.failUnless(len(cacert) == 2, "RetrieveCACertificate returns a tuple of len != 2") self.failUnless(type(cacert[1]) != str and operator.isSequenceType(cacert[1]), "cacert[1] is not a sequence") self.assert_(cacert[0], "RetrieveCACertificate returned failure: " + str(cacert[1])) certList = cacert[1] # # It should be a list containing a single pair (cert, policy) # self.assert_(len(certList) == 1, "Retrieved cert list is not of length 1") self.assert_(len(certList[0]) == 2, "Retrieved cert list does not contain a single pair") cert, policy = certList[0] self.assert_(re.search("access_id_CA", policy), "Policy string does not appear to be valid") self.assert_(re.search("BEGIN CERTIFICATE", cert), "Certificate does not appear to be valid") tmp = tempfile.mktemp() fh = open(tmp, "w") fh.write(cert) fh.close() fh = os.popen("openssl x509 -noout -subject -in %s" % (tmp), "r") subj = fh.read() fh.close() m = re.search(r"^subject=\s+(.*)", subj) self.assert_(m is not None, "Did not match subject") global issuer issuer = m.group(1) os.unlink(tmp)
def constructFileName(fname, shaderName, shaderParams, shaderArgs): assert(kgUtils.isAString(shaderName) and kgUtils.isAString(fname)) assert(operator.isSequenceType(shaderParams) and operator.isSequenceType(shaderArgs)) fname = os.path.abspath(fname) fname1, ext = os.path.splitext(fname) resFileName=fname1 if(shaderParams != None or shaderArgs != None): print shaderParams print shaderArgs resFileName += '_' + '_'.join([ x[:2]\ + '_' + re.sub(r'[.]', '_', y) \ for x, y in map(None, shaderParams, shaderArgs)]) resFileName += ext print resFileName return resFileName
def _ExpandValue(var, specials, params, name, default): """Expand one value. This expands the <field>.<field>...<field> part of the variable expansion. A field may be of the form *<param> to use the value of a parameter as the field name. """ if var == '_key': return name elif var == '_this': return params if var.startswith('_'): value = specials else: value = params for v in var.split('.'): if v == '*_this': v = params if v.startswith('*'): v = _GetValue(specials['_params'], v[1:]) if operator.isSequenceType(v): v = v[0] # reduce repeated url param to single value value = _GetValue(value, str(v), default) return value
def add(self, *objs): """ Add an object or container of objects to the network """ self.unprepare() for obj in objs: if isinstance(obj, (NeuronGroup, Connection, NetworkOperation)): self._added_objects.append(obj) if isinstance(obj, NeuronGroup): if obj not in self.groups: self.groups.append(obj) elif isinstance(obj, Connection): if obj not in self.connections: self.connections.append(obj) elif isinstance(obj, NetworkOperation): if obj not in self._all_operations: self._operations_dict[obj.when].append(obj) self._all_operations.append(obj) elif isSequenceType(obj): for o in obj: self.add(o) else: raise TypeError('Only the following types of objects can be added to a network: NeuronGroup, Connection or NetworkOperation') try: gco = obj.contained_objects if gco is not None: self.add(gco) except AttributeError: pass
def init_with_figure(cls, data=None, affine=None, threshold=None, cut_coords=None, figure=None, axes=None, black_bg=False, leave_space=False): cut_coords = cls.find_cut_coords(data, affine, threshold, cut_coords) if not isinstance(figure, pl.Figure): # Make sure that we have a figure figsize = cls._default_figsize[:] # Adjust for the number of axes figsize[0] *= len(cut_coords) facecolor = 'k' if black_bg else 'w' if leave_space: figsize[0] += 3.4 figure = pl.figure(figure, figsize=figsize, facecolor=facecolor) else: if isinstance(axes, pl.Axes): assert axes.figure is figure, ("The axes passed are not " "in the figure") if axes is None: axes = [0., 0., 1., 1.] if leave_space: axes = [0.3, 0, .7, 1.] if operator.isSequenceType(axes): axes = figure.add_axes(axes) axes.axis('off') return cls(cut_coords, axes, black_bg)
def size(X): from operator import isSequenceType out = marray() while isSequenceType(X): out.append(len(X)) X = X[0] return out
def merge(*mappings, **opts): ''' Merges all mappings given as arguments. opts can have {'keyeq': predicate} ''' mapping = {} keytransform = opts.get('keytransform', lambda k: k) use_list = opts.get('use_list', lambda k: k) for elem in mappings: if isMappingType(elem): # {key: value, key: value, ...} items = elem.iteritems() elif isSequenceType(elem): # [(key, value), (key, value), ...] if not all(len(s) == 2 for s in elem): raise TypeError( 'mapping sequences must be sequences of (key,' 'value) pairs: %s %s', type(elem), repr(elem) ) items = elem else: raise TypeError('all arguments to merge must be mappings: %r', elem) for key, value in items: merge_values( mapping, keytransform(key), value, keytransform, use_list ) return mapping
def fireEvent(self, event_type, event_value, listeners=None): """sends an event to all listeners or a specific one""" if listeners is None: listeners = self._listeners if listeners is None: return if not operator.isSequenceType(listeners): listeners = listeners, for listener in listeners: if isinstance(listener, weakref.ref) or isinstance( listener, BoundMethodWeakref): l = listener() else: l = listener if l is None: continue meth = getattr(l, 'eventReceived', None) if meth is not None and operator.isCallable(meth): l.eventReceived(self, event_type, event_value) elif operator.isCallable(l): l(self, event_type, event_value)
def electrode(Re, Ce, v_el='v_el', vm='vm', i_inj='i_inj', i_cmd='i_cmd'): ''' An intracellular electrode modeled as an RC circuit, or multiple RC circuits in series (if Re, Ce are lists). v_el = electrode (=recording) potential vm = membrane potential i_inj = current entering the membrane i_cmd = electrode command current (None = no injection) Returns an Equations() object. ''' if isSequenceType(Re): if len(Re) != len(Ce) or len(Re) < 2: raise TypeError, "Re and Ce must have the same length" v_mid, i_mid = [], [] for i in range(len(Re) - 1): v_mid.append('v_mid_' + str(i) + unique_id()) i_mid.append('i_mid_' + str(i) + unique_id()) eqs = electrode(Re[0], Ce[0], v_mid[0], vm, i_inj, i_mid[0]) for i in range(1, len(Re) - 1): eqs + electrode(Re[i], Ce[i], v_mid[i], v_mid[i - 1], i_mid[i - 1], i_mid[i]) eqs += electrode(Re[-1], Ce[-1], v_el, v_mid[-1], i_mid[-1], i_cmd) return eqs else: if Ce > 0 * farad: return Equations(''' dvr/dt = ((vm-vr)/Re+ic)/Ce : mV ie = (vr-vm)/Re : nA''', vr=v_el, vm=vm, ic=i_cmd, ie=i_inj, \ Re=Re, Ce=Ce) else: # ideal electrode - pb here return Equations(''' vr = vm+Re*ic : volt ie = ic : amp''', vr=v_el, vm=vm, ic=i_cmd, ie=i_inj)
def init_with_figure(cls, data=None, affine=None, threshold=None, cut_coords=None, figure=None, axes=None, black_bg=False, leave_space=False): cut_coords = cls.find_cut_coords(data, affine, threshold, cut_coords) if isinstance(axes, pl.Axes) and figure is None: figure = axes.figure if not isinstance(figure, pl.Figure): # Make sure that we have a figure figsize = cls._default_figsize[:] # Adjust for the number of axes figsize[0] *= len(cut_coords) facecolor = 'k' if black_bg else 'w' if leave_space: figsize[0] += 3.4 figure = pl.figure(figure, figsize=figsize, facecolor=facecolor) else: if isinstance(axes, pl.Axes): assert axes.figure is figure, ("The axes passed are not " "in the figure") if axes is None: axes = [0., 0., 1., 1.] if leave_space: axes = [0.3, 0, .7, 1.] if operator.isSequenceType(axes): axes = figure.add_axes(axes) # People forget to turn their axis off, or to set the zorder, and # then they cannot see their slicer axes.axis('off') return cls(cut_coords, axes, black_bg)
def _uniquemerge2literal(attrs): """Compress a sequence into its unique elements (with string merge). Whenever there is more then one unique element in `attrs`, these are converted to a string and join with a '+' character inbetween. Parameters ---------- attrs : sequence, arbitrary Returns ------- Non-sequence arguments are passed as is. Sequences are converted into a single item representation (see above) and returned. None is returned in case of an empty sequence. """ # only do something if multiple items are given if not operator.isSequenceType(attrs): return attrs unq = np.unique(attrs) lunq = len(unq) if lunq > 1: return '+'.join([str(l) for l in unq]) elif lunq: # first entry (non return unq[0] else: return None
def _convert_to_svm_node_array(x): """ convert a sequence or mapping to an svm_node array """ import operator # Find non zero elements iter_range = [] if type(x) == dict: for k, v in x.iteritems(): # all zeros kept due to the precomputed kernel; no good solution yet # if v != 0: iter_range.append( k ) elif operator.isSequenceType(x): for j in range(len(x)): # if x[j] != 0: iter_range.append( j ) else: raise TypeError,"data must be a mapping or a sequence" iter_range.sort() data = svmc.svm_node_array(len(iter_range)+1) svmc.svm_node_array_set(data,len(iter_range),-1,0) j = 0 for k in iter_range: svmc.svm_node_array_set(data,j,k,x[k]) j = j + 1 return data
def init_with_figure(cls, img, threshold=None, cut_coords=None, figure=None, axes=None, black_bg=False, leave_space=False): cut_coords = cls.find_cut_coords(img, threshold, cut_coords) if isinstance(axes, pl.Axes) and figure is None: figure = axes.figure if not isinstance(figure, pl.Figure): # Make sure that we have a figure figsize = cls._default_figsize[:] # Adjust for the number of axes figsize[0] *= len(cut_coords) facecolor = 'k' if black_bg else 'w' if leave_space: figsize[0] += 3.4 figure = pl.figure(figure, figsize=figsize, facecolor=facecolor) if isinstance(axes, pl.Axes): assert axes.figure is figure, ("The axes passed are not " "in the figure") if axes is None: axes = [0., 0., 1., 1.] if leave_space: axes = [0.3, 0, .7, 1.] if operator.isSequenceType(axes): axes = figure.add_axes(axes) # People forget to turn their axis off, or to set the zorder, and # then they cannot see their slicer axes.axis('off') return cls(cut_coords, axes, black_bg)
def find_capacitance(model): ''' Tries to find the membrane capacitance from a set of differential equations or a model given as a dictionnary (with typical keys 'model', 'threshold' and 'reset'). ''' if type(model) == types.DictType: if 'Cm' in model: return model['Cm'] if 'C' in model: return model['C'] # Failed: look the equations if 'model' in model: model = model['model'] else: # no clue! raise TypeError, 'Strange model!' if isinstance(model, StateUpdater): if hasattr(model, 'Cm'): return model.Cm if hasattr(model, 'C'): return model.C if isSequenceType(model): model = model[0] # The first equation is the membrane equation if type(model) == types.FunctionType: if 'Cm' in model.func_globals: return model.func_globals['Cm'] if 'C' in model.func_globals: return model.func_globals['C'] # Nothing was found! raise TypeError, 'No capacitance found!'
def _action(self, key, func, missingok=False, **kwargs): """Run specific func either on a single item or on all of them Parameters ---------- key : str Name of the conditional attribute func Function (not bound) to call given an item, and **kwargs missingok : bool If True - do not complain about wrong key """ if isinstance(key, basestring): if key.upper() == 'ALL': for key_ in self: self._action(key_, func, missingok=missingok, **kwargs) else: try: func(self[key], **kwargs) except: if missingok: return raise elif operator.isSequenceType(key): for item in key: self._action(item, func, missingok=missingok, **kwargs) else: raise ValueError, \ "Don't know how to handle variable given by %s" % key
def merge(*mappings, **opts): ''' Merges all mappings given as arguments. opts can have {'keyeq': predicate} ''' mapping = {} keytransform = opts.get('keytransform', lambda k: k) use_list = opts.get('use_list', lambda k: k) for elem in mappings: if isMappingType(elem): # {key: value, key: value, ...} items = elem.iteritems() elif isSequenceType(elem): # [(key, value), (key, value), ...] if not all(len(s) == 2 for s in elem): raise TypeError( 'mapping sequences must be sequences of (key,' 'value) pairs: %s %s', type(elem), repr(elem)) items = elem else: raise TypeError('all arguments to merge must be mappings: %r', elem) for key, value in items: merge_values(mapping, keytransform(key), value, keytransform, use_list) return mapping
def _check_input_times(times): issequence = False if isSequenceType(times): ## Here assume sequence contains ## elements of same type. test_tm = times[0] issequence = True else: test_tm = times if not issequence: times = [times] if isinstance(test_tm, datetime.datetime): tticks = map(ticks, times) tticks = array(tticks, float) elif isNumberType(test_tm): tticks = array(times, float) else: raise TypeError("Second input for interpolation functions" \ " must be list/array of number or datatime, or single" \ " number or datetime") return tticks
def upgraderChangeUserAgentTuple(self, clients=None): "Change the user agent to the newer tuple format for detection" if not clients: clients = self.getClients() lookup = { "Konqueror/2.2": ("Mozilla/5", "Konqueror", "2", "2"), "Konqueror": ("Mozilla/5", "Konqueror", "", ""), "MSIE": ("Mozilla/4", "MSIE", "", ""), "MSIE 6": ("Mozilla/4", "MSIE", "6", ""), "MSIE 6.0": ("Mozilla/4", "MSIE", "6", "0"), "MSIE 5": ("Mozilla/4", "MSIE", "5", ""), "MSIE 4": ("Mozilla/4", "MSIE", "4", ""), "MSIE 3": ("Mozilla/3", "MSIE", "3", ""), "MSIE 2": ("Mozilla/2", "MSIE", "2", ""), "MSIE 5.5": ("Mozilla/4", "MSIE", "5", "5"), "MSIE 5.0": ("Mozilla/4", "MSIE", "5", "0"), "Opera": ("", "Opera", "", ""), "Opera/6": ("", "Opera", "6", ""), "Opera/6.0": ("", "Opera", "6", "0"), "Opera/5": ("", "Opera", "5", ""), "Opera/5.0": ("", "Opera", "5", "0"), } if operator.isSequenceType(clients): list = [lookup[i] for i in clients if i in lookup] if len(list): self.setClients(tuple(list))
def __init__(self, intf, dict=None, inst=None) : actualIntf = None if isinstance(intf, str) or isinstance(intf, unicode) : actualIntf = [ _jclass.JClass(intf) ] elif isinstance(intf, _jclass._JavaClass) : actualIntf = [ intf ] elif operator.isSequenceType(intf) : actualIntf = [] for i in intf : if isinstance(i, str) or isinstance(i, unicode) : actualIntf.append(_jclass.JClass(i)) elif isinstance(i, _jclass._JavaClass) : actualIntf.append(i) else: raise TypeError, "JProxy requires java interface classes or the names of java interfaces classes" else: raise TypeError, "JProxy requires java interface classes or the names of java interfaces classes" for i in actualIntf : if not JClassUtil.isInterface(i) : raise TypeError, "JProxy requires java interface classes or the names of java interfaces classes : "+i.__name__ if dict is not None and inst is not None : raise RuntimeError, "Specify only one of dict and inst" self._dict = dict self._inst = inst self._proxy = _jpype.createProxy(self, actualIntf)
def _build_parameter(self, param_def): '''Builds a list of parameters, each of them represented by a dictionary containing information: name, type, default_value, description, min and max values. In case of simple parameters, type is the parameter type. In case of ParamRepeat, type is a list containing definition of the param repeat. ''' ret = [] param_def = param_def or () for p in param_def: t = p[1] ret_p = {'min': 1, 'max': None} # take care of old ParamRepeat if isinstance(t, ParamRepeat): t = t.obj() if operator.isSequenceType(t) and not isinstance(t, (str, unicode)): if operator.isMappingType(t[-1]): ret_p.update(t[-1]) t = self._build_parameter(t[:-1]) else: t = self._build_parameter(t) ret_p['name'] = p[0] ret_p['type'] = t ret_p['default_value'] = p[2] ret_p['description'] = p[3] ret.append(ret_p) return ret
def geocentric_to_geographic_latitude(geoclat): """ Converts a geocentric latitude to a geographic/geodetic latitude. :param geoclat: An :class:`astropysics.coords.AngularCoordinate` object (or arguments to create one) or an angle in degrees for the geocentric latitude. :returns: An :class:`astropysics.coords.AngularCoordinate` object with the geographic latitude. """ from astropysics.constants import Rea, Reb from astropysics.coords import AngularCoordinate from operator import isSequenceType if not isinstance(geoclat, AngularCoordinate): if isSequenceType(geoclat): rads = AngularCoordinate(*geoclat).radians else: rads = AngularCoordinate(geoclat).radians else: rads = geoclat.radians boasq = (Reb / Rea)**2 return AngularCoordinate(np.arctan((1 / boasq) * np.tan(rads)), radians=True)
def __setitem__(self, key, value): """Add a new IndexedCollectable to the collection Parameters ---------- item : IndexedCollectable or of derived class. Must have 'name' assigned. """ # local binding ulength = self._uniform_length # XXX should we check whether it is some other Collectable? if not isinstance(value, ArrayCollectable): # if it is only a single element iterable, attempt broadcasting if isSequenceType(value) and len(value) == 1 and not ulength is None: if ulength > 1: # cannot use np.repeat, because it destroys dimensionality value = [value[0]] * ulength value = ArrayCollectable(value) if ulength is None: ulength = len(value) elif not len(value.value) == ulength: raise ValueError( "Collectable '%s' with length [%i] does not match " "the required length [%i] of collection '%s'." % (key, len(value.value), ulength, str(self)) ) # tell the attribute to maintain the desired length value.set_length_check(ulength) Collection.__setitem__(self, key, value)
def isSequenceType(obj): if six.PY2: import operator return operator.isSequenceType(obj) else: import collections.abc return isinstance(obj, collections.abc.Sequence)
def _setValidjdrange(self, val): """ Sets the jd range over which this method is valid. Trying to get something outside will result in an `exc`:EphemerisAccuracyWarning: Intended for use in __init__. :param val: The range as (minjd,maxjd), can be None to indicate no bound. If set to None, the result will be (None,None). """ if val is None: self._validrange = (None, None) else: v1, v2 = val if v1 is None and v2 is None: self._validrange = (None, None) else: from operator import isSequenceType from ..obstools import calendar_to_jd from datetime import datetime vs = [] for v in (v1, v2): if v is None: vs.append(None) elif v == 'now': vs.append(calendar_to_jd(datetime.utcnow(), tz=None)) elif hasattr(v, 'year') or isSequenceType(v): vs.append(calendar_to_jd(v)) else: vs.append(v) self._validrange = tuple(vs)
def seq_to_svm_node(x): """convert a sequence or mapping to an SVMNode array""" import operator length = len(x) # make two lists, one of indices, one of values # YYY Use isinstance instead of type...is so we could # easily use derived subclasses if isinstance(x, np.ndarray): iter_range = range(length) iter_values = x elif isinstance(x, dict): iter_range = list(x).sort() iter_values = np.ndarray(x.values()) elif operator.isSequenceType(x): iter_range = range(length) iter_values = np.asarray(x) else: raise TypeError, "data must be a mapping or an ndarray or a sequence" # allocate c struct data = svmc.svm_node_array(length + 1) # insert markers into the c struct svmc.svm_node_array_set(data, length, -1, 0.0) # pass the list and the ndarray to the c struct svmc.svm_node_array_set(data, iter_range, iter_values) return data
def _convert_to_svm_node_array(x, keep_zeros = True): """ convert a sequence or mapping to an svm_node array. 3/12/09 -- originally, zeros were included (the in-line comment suggests this had something to do with precomputed kernels?) test ? """ import operator # Find non zero elements iter_range = [] if type(x) == dict: for k, v in x.iteritems(): # all zeros kept due to the precomputed kernel; no good solution yet if v != 0 or keep_zeros: iter_range.append( k ) elif operator.isSequenceType(x): for j in range(len(x)): if x[j] != 0 or keep_zeros: iter_range.append( j ) else: raise TypeError,"data must be a mapping or a sequence" iter_range.sort() data = svmc.svm_node_array(len(iter_range)+1) svmc.svm_node_array_set(data,len(iter_range),-1,0) j = 0 for k in iter_range: svmc.svm_node_array_set(data,j,k,x[k]) j = j + 1 return data
def Vectorize(v): "Converts v from a sequence into a Vector3." if operator.isSequenceType(v): v = DirectX.Vector3(System.Single(v[0]), System.Single(v[1]), System.Single(v[2])) return v
def __idiv__(self,arg): if operator.isSequenceType(arg) : assert len(arg) == len(self.__GFlist) , "list of incorrect length" for l,g in izip(arg,self.__GFlist) : g /=l else : for i,g in self : g /= arg return self
def _uniquemerge2literal(attrs): """Compress a sequence into its unique elements (with string merge). Whenever there is more then one unique element in `attrs`, these are converted to a string and join with a '+' character inbetween. Parameters ---------- attrs : sequence, arbitrary Returns ------- Non-sequence arguments are passed as is. Sequences are converted into a single item representation (see above) and returned. None is returned in case of an empty sequence. """ # only do something if multiple items are given if not operator.isSequenceType(attrs): return attrs unq = np.unique(attrs) lunq = len(unq) if lunq > 1: return "+".join([str(l) for l in unq]) elif lunq: # first entry (non return unq[0] else: return None
def connect(self, attrs): if not operator.isSequenceType(attrs): attrs = (attrs,) self.disconnect() self._attrs = attrs for attr in self._attrs: attr.addListener(self)
def remove(self, *objs, **kwds): ''' Remove an object or sequence of objects from a Network (note that this will force the Network to be prepared from scratch). ''' unprepare = kwds.pop('unprepare', True) for obj in objs: if isinstance(obj, (NeuronGroup, Connection, NetworkOperation)): self._added_objects = [o for o in self._added_objects if o is not obj] if isinstance(obj, NeuronGroup): self.groups = [o for o in self.groups if o is not obj] if isinstance(obj, Connection): self.connections = [o for o in self.connections if o is not obj] if isinstance(obj, NetworkOperation): self._all_operations = [o for o in self._all_operations if o is not obj] self._operations_dict[obj.when] = [o for o in self._operations_dict[obj.when] if o is not obj] elif isSequenceType(obj): for o in obj: self.remove(o, unprepare=False) else: raise TypeError('Only the following types of objects can be removed from a network: NeuronGroup, Connection or NetworkOperation') try: gco = obj.contained_objects if gco is not None: self.remove(gco, unprepare=False) except AttributeError: pass if unprepare: self.unprepare()
def _convert_to_svm_node_array(x): """ convert a sequence or mapping to an svm_node array """ import operator # Find non zero elements iter_range = [] if type(x) == dict: for k, v in x.iteritems(): # all zeros kept due to the precomputed kernel; no good solution yet # if v != 0: iter_range.append(k) elif operator.isSequenceType(x): for j in range(len(x)): # if x[j] != 0: iter_range.append(j) else: raise TypeError, "data must be a mapping or a sequence" iter_range.sort() data = svmc.svm_node_array(len(iter_range) + 1) svmc.svm_node_array_set(data, len(iter_range), -1, 0) j = 0 for k in iter_range: svmc.svm_node_array_set(data, j, k, x[k]) j = j + 1 return data
def __idiv__(self,arg): if operator.isSequenceType(arg): assert len(arg) == len(self.__GFlist) , "list of incorrect length" for l,g in izip(arg,self.__GFlist): g /=l else: for i,g in self: self[i] /= arg return self
def __calKeyframes(start, end, func, *additionalArgs, **keyArgs): """ helping to calculate and pack key values """ perc = start values = func(perc, *additionalArgs) samplingRate = 60.0 if keyArgs.has_key('samplingRate'): samplingRate = keyArgs['samplingRate'] keyArrays = [] if operator.isSequenceType(values): valueCount = len(values) isSequence = True for value in values: keyArrays.append([value]) else: isSequence = False keyArrays.append(values) offset = 1.0 / samplingRate epsilon = offset * 0.0001 while perc != end: perc += offset if (end - start > 0) ^ ((perc + epsilon) < end): perc = end values = func(perc, *additionalArgs) if isSequence: for i in range(valueCount): keyArrays[i].append(values[i]) else: keyArrays.append(values) return keyArrays
def validate(self, data): """Checks that the data is a valid sequence.""" from operator import isSequenceType if not isSequenceType(data): return "List data has to be a sequence." return True