def __init__(self, data=''): if not isinstance(data, str): raise ValueError, type(data) # expecting str # TODO other types object.__init__(self) self.data = data self.state = self.State(data) # TODO strview or similar in the state self.stack = []
def __get__( self, obj, type = None ): value = None try: value = self.descr.__get__( obj, type ) except AttributeError: # special case for list: allow default to work with on +=; # unfortunately, that means that direct access also succeeds, even # as the property wasn't set yet ... (TODO: ideas??) if self.__default.__class__ == list: self.descr.__set__( obj, list(self.__default) ) value = self.descr.__get__( obj, type ) # no history else: raise if value is None: value = self.descr.__get__( obj, type ) # locked configurables return copies to prevent modifications if obj.isLocked(): import __builtin__ if __builtin__.type( value ) is dict: from ctypes import pythonapi, py_object from _ctypes import PyObj_FromPtr PyDictProxy_New = pythonapi.PyDictProxy_New PyDictProxy_New.argtypes = (py_object,) PyDictProxy_New.rettype = py_object value = PyObj_FromPtr( PyDictProxy_New( value ) ) elif __builtin__.type( value ) is list: value = tuple( value ) # point being that tuple is read-only else: import copy value = copy.copy( value ) return value
def __get__(self, obj, type=None): value = None try: value = self.descr.__get__(obj, type) except AttributeError: # special case for list: allow default to work with on +=; # unfortunately, that means that direct access also succeeds, even # as the property wasn't set yet ... (TODO: ideas??) if self.__default.__class__ == list: self.descr.__set__(obj, list(self.__default)) value = self.descr.__get__(obj, type) # no history else: raise if value is None: value = self.descr.__get__(obj, type) # locked configurables return copies to prevent modifications if obj.isLocked(): import __builtin__ if __builtin__.type(value) is dict: from ctypes import pythonapi, py_object from _ctypes import PyObj_FromPtr PyDictProxy_New = pythonapi.PyDictProxy_New PyDictProxy_New.argtypes = (py_object, ) PyDictProxy_New.rettype = py_object value = PyObj_FromPtr(PyDictProxy_New(value)) elif __builtin__.type(value) is list: value = tuple(value) # point being that tuple is read-only else: import copy value = copy.copy(value) return value
def get_metadata(self): job = self.__load_job() # override job type self.objectType = __builtin__.type(job).__name__ tasks = [] # get list of tasks from Tasks or Pipeline if job.tasks is not None and len(job.tasks) > 0: task = job.tasks[0] if isinstance(task, list): tasks = list([stage[0].__to_dict__() for stage in job.tasks]) elif isinstance(task, _Pipeline): for step in task.steps: tasks.extend(list([task_step.__to_dict__() for task_step in step])) # weakly check if there is possible results has_results = False if hasattr(job, "get_results") or "results" in self.__child_views: has_results = True return { "jobname": [job.name], "tasks": tasks, "env": job.environment.__to_dict__(), "status": job.get_status(), "metrics": job.get_metrics(), "taskstatus": job._get_task_status(), "jobtype": __builtin__.type(job).__name__, "starttime": job._get_start_time(), "endtime": job._get_end_time(), "has_results": has_results, }
def __repr__(self): ''' Describes current function wrapper. Examples: >>> function_decorator = FunctionDecorator( ... FunctionDecorator.__repr__) >>> function_decorator.class_object = FunctionDecorator >>> repr(function_decorator) # doctest: +ELLIPSIS 'Object of "FunctionDecorator" ... "__repr__" ... (NoneType).' >>> repr(FunctionDecorator(FunctionDecorator)) # doctest: +ELLIPSIS 'Object of "FunctionDecorator" with wrapped function "None" and...' ''' function_name = 'None' if self.__func__ is not None: function_name = self.__func__.__name__ if self.class_object: # # python3.5 # # return ( # # 'Object of "{class_name}" with class object ' # # '"{class_object}", object "{object}", wrapped function ' # # '"{wrapped_function}" and return value "{value}" ' # # '({type}).'.format( # # class_name=self.__class__.__name__, # # class_object=self.class_object.__name__, # # object=builtins.repr(self.object), # # wrapped_function=function_name, # # value=builtins.str(self.return_value), # # type=builtins.type(self.return_value).__name__)) # # return ( # # 'Object of "{class_name}" with wrapped function ' # # '"{wrapped_function}" and return value "{value}" ' # # '({type}).'.format( # # class_name=self.__class__.__name__, # # wrapped_function=function_name, # # value=builtins.str(self.return_value), # # type=builtins.type(self.return_value).__name__)) return ( 'Object of "{class_name}" with class object ' '"{class_object}", object "{object}", wrapped function ' '"{wrapped_function}" and return value "{value}" ' '({type}).'.format( class_name=self.__class__.__name__, class_object=self.class_object.__name__, object=builtins.repr(self.object), wrapped_function=function_name, value=convert_to_unicode(self.return_value), type=builtins.type(self.return_value).__name__)) return ( 'Object of "{class_name}" with wrapped function ' '"{wrapped_function}" and return value "{value}" ' '({type}).'.format( class_name=self.__class__.__name__, wrapped_function=function_name, value=convert_to_unicode(self.return_value), type=builtins.type(self.return_value).__name__))
def __init__(self, dbUserName, dbPasswd, dbAddress, dbPort=3306, dbName=None, charset='utf8', *args, **kwarg): self.tab_list = {} self.db_name = dbName self.__type_map = db_mapper.type_map self.__mysql_format = "mysql+pymysql://{dbUserName}:{dbPasswd}@{dbAddress}:{dbPort}/{dbName}?charset={charset}" self.__mysql_path = self.__mysql_format.format(dbUserName=dbUserName, dbPasswd=dbPasswd, dbAddress=dbAddress, dbPort=dbPort, dbName=dbName, charset=charset) self.engine = create_engine(self.__mysql_path, echo=False) self.conn = self.engine.connect() self.metadata = MetaData(self.engine) self.__Base = declarative_base(metadata=self.metadata) self.__Base.metadata.reflect(self.engine) for table_name in self.__Base.metadata.tables: print dbName, table_name try: setattr( self, table_name, __builtin__.type( str(table_name), (self.__Base, ), {'__table__': self.__Base.metadata.tables[table_name] })) self.tab_list[table_name] = (getattr(self, table_name)) except Exception as e: print Exception, ":", e if 'could not assemble any primary key' in e.message: try: setattr( self, table_name, __builtin__.type( str(table_name), (self.__Base, ), { '__table__': self.__Base.metadata.tables[table_name], '__mapper_args__': { 'primary_key': [ self.__Base.metadata. tables[table_name].c.id, ] } })) self.tab_list[table_name] = (getattr(self, table_name)) print "add primary key id" except Exception as e: print Exception, ":", e self.__Session = sessionmaker(bind=self.engine) self.__session = self.__Session()
def __init__(self, name, value, type=None, propagated=None, overridable=None, tosubclass=None, toinstance=None, translatable=None): self.name = name self.type = type self.propagated = propagated self.overridable = overridable self.tosubclass = tosubclass self.toinstance = toinstance self.translatable = translatable # Determine type of value if not specified import __builtin__ if type is None: # Can't work out what is going on if type and value are # both not set. if value is None: raise TypeError('Null qualifier "%s" must have a type' % name) if __builtin__.type(value) == list: # Determine type for list value if len(value) == 0: raise TypeError( 'Empty qualifier array "%s" must have a type' % name) self.type = cim_types.cimtype(value[0]) else: # Determine type for regular value self.type = cim_types.cimtype(value) # Don't let anyone set integer or float values. You must use # a subclass from the cim_type module. if __builtin__.type(value) in (int, float, long): raise TypeError('Must use a CIM type for numeric qualifiers.') self.value = value
def __exit__(self, exc_type, exc_value, traceback): r""" Keep or revert parser state. """ if not isinstance(self, ParserSkeleton): raise RuntimeError, type(self) # expecting ParserSkeleton if not len(self.stack) > 0: raise RuntimeError, len(self.stack) # expecting a non-empty stack _state = self.stack.pop(-1) if exc_type is None and exc_value is None and traceback is None: # all ok, forget previous state pass else: # has exception, revert to previous position and propagate exception self.state = _state
def maybe(self, func, *args, **kwargs): r""" Call ``func'' with the provided arguments. Return ``None'' on ´´AssertionError''. """ if not isinstance(self, ParserSkeleton): raise RuntimeError, type(self) # expecting ParserSkeleton if not getattr(self, func.__name__) == func: raise ValueError, repr(func) # must belong to self try: with self: # revert state on error return func(*args, **kwargs) except AssertionError: return None
def __load_job_handler(self, url, handler): m = re.match('load_job/(.*)', url) jobname = urllib2.unquote(m.group(1).encode('utf-8')).encode('utf-8') if jobname in self.__job_list and self.__job_list[jobname] is not None: job = self.__job_list[jobname] else: job = graphlab.deploy.jobs[jobname] self.__job_list[jobname] = job # link this loaded job to the child view, so job loading is only needed once self.__child_views[jobname]._set_job(job) if not job: handler.write({"job":None}) return # TODO: need to clean up get job type and get status, same code in job jobtype = __builtin__.type(job).__name__ job_status = job.get_status(_silent=True) job_info = {} job_info['name'] = job.name job_info['status'] = job_status job_info['starttime'] = str(job.get_start_time()) job_info['type'] = jobtype job_info['filename'] = jobname # add file name # get rest of the completed info if job.get_status(_silent=True) == 'Completed': job_info['endtime'] = str(job.get_end_time()) handler.write({"job": job_info})
def instance_of(type, excluded_types=(), displayname=None): if displayname is not None: def displayname_(cls): return displayname else: def displayname_(cls): message = 'instance of {!r}'.format(type) if excluded_types: message += \ ' but not of {{{}}}'\ .format(', '.join(repr(type_) for type_ in excluded_types)) return message displayname_.__name__ = 'displayname' displayname_ = classmethod(displayname_) @classmethod def _excluded_types(cls): return excluded_types @classmethod def _wrapped_type(cls): return type classname = type.__name__ + '_Instance' if excluded_types: classname += '_Excluding_' + '_'.join(type_.__name__ for type_ in excluded_types) class_attrs = {'_excluded_types': _excluded_types, '_wrapped_type': _wrapped_type} if displayname_ is not None: class_attrs['displayname'] = displayname_ return __builtin__.type(classname, (InstanceOfType,), class_attrs)
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _theZ=None, _theT=None, _theC=None, _roi=None, _locked=None, _g=None, _transform=None, _vectorEffect=None, _visibility=None, _fillColor=None, _fillRule=None, _strokeColor=None, _strokeDashArray=None, _strokeDashOffset=None, _strokeLineCap=None, _strokeLineJoin=None, _strokeMiterLimit=None, _strokeWidth=None, _fontFamily=None, _fontSize=None, _fontStretch=None, _fontStyle=None, _fontVariant=None, _fontWeight=None, _cx=None, _cy=None, _textValue=None): if __builtin__.type(self) == _M_omero.model.Point: raise RuntimeError('omero.model.Point is an abstract class') _M_omero.model.Shape.__init__(self, _id, _details, _loaded, _version, _theZ, _theT, _theC, _roi, _locked, _g, _transform, _vectorEffect, _visibility, _fillColor, _fillRule, _strokeColor, _strokeDashArray, _strokeDashOffset, _strokeLineCap, _strokeLineJoin, _strokeMiterLimit, _strokeWidth, _fontFamily, _fontSize, _fontStretch, _fontStyle, _fontVariant, _fontWeight) self._cx = _cx self._cy = _cy self._textValue = _textValue
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _theZ=None, _theT=None, _theC=None, _roi=None, _locked=None, _g=None, _transform=None, _vectorEffect=None, _visibility=None, _fillColor=None, _fillRule=None, _strokeColor=None, _strokeDashArray=None, _strokeDashOffset=None, _strokeLineCap=None, _strokeLineJoin=None, _strokeMiterLimit=None, _strokeWidth=None, _fontFamily=None, _fontSize=None, _fontStretch=None, _fontStyle=None, _fontVariant=None, _fontWeight=None): if __builtin__.type(self) == _M_omero.model.Shape: raise RuntimeError('omero.model.Shape is an abstract class') _M_omero.model.IObject.__init__(self, _id, _details, _loaded) self._version = _version self._theZ = _theZ self._theT = _theT self._theC = _theC self._roi = _roi self._locked = _locked self._g = _g self._transform = _transform self._vectorEffect = _vectorEffect self._visibility = _visibility self._fillColor = _fillColor self._fillRule = _fillRule self._strokeColor = _strokeColor self._strokeDashArray = _strokeDashArray self._strokeDashOffset = _strokeDashOffset self._strokeLineCap = _strokeLineCap self._strokeLineJoin = _strokeLineJoin self._strokeMiterLimit = _strokeMiterLimit self._strokeWidth = _strokeWidth self._fontFamily = _fontFamily self._fontSize = _fontSize self._fontStretch = _fontStretch self._fontStyle = _fontStyle self._fontVariant = _fontVariant self._fontWeight = _fontWeight
def _is_right_type(cls, given_type, expected_type): # # ''' Check whether a given type is expected type or given type is a \ subclass of expected type. Fixes bug that in python a boolean is a subtype of an integer. Examples: >>> CheckObject._is_right_type(bool, int) False >>> CheckObject._is_right_type(list, tuple) False >>> CheckObject._is_right_type(list, list) True >>> from collections import Iterable >>> CheckObject._is_right_type(list, Iterable) True ''' return ( given_type is expected_type or expected_type is Null or expected_type is builtins.type(None) or builtins.issubclass(given_type, expected_type) and not ( given_type is builtins.bool and expected_type is builtins.int))
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _vid=None, _label=None, _creationDate=None, _action=None, _lastUpdate=None, _barcode=None, _status=None, _numberOfSlots=None, _rows=None, _columns=None, _type=None, _arrayClass=None, _assayType=None): if __builtin__.type(self) == _M_omero.model.IlluminaArrayOfArrays: raise RuntimeError('omero.model.IlluminaArrayOfArrays is an abstract class') _M_omero.model.TiterPlate.__init__(self, _id, _details, _loaded, _version, _vid, _label, _creationDate, _action, _lastUpdate, _barcode, _status, _numberOfSlots, _rows, _columns) self._type = _type self._arrayClass = _arrayClass self._assayType = _assayType
def typecheck(item, type, recursive=False): print "t args:", item, type, recursive # `type` can be for example: "Block", Block, ["list", ["Block"]], list # `recursive=False` means don't recurse on pandoc types, but we still # check the native Python objects, that do not typecheck in __init__. # TODO: handle tuples properly: tuples exist as lists in the json AND # we also store them as lists (ex: Attr) to keep the mutability. # Only maps have "real" tuples ? Check that. Rethink the mutability ? # Mutability matters, but the ability to infer the "natural" Python # type from the Haskell spec also matters ... Think about Attr and Map # together wrt the use of tuples (these are the only occurences afaict) types = globals() if isinstance(type, str): root_type = type = types[type] if isinstance(type, __builtin__.type) and not issubclass(type, PandocType): assert type in (list, tuple, map, int, bool, unicode, float) return isinstance(item, type) # relax for tuples. if isinstance(type, list): root_type = types[type[0]] if issubclass(root_type, PandocType): if not isinstance(item, root_type): error = "{0} is not of type {1}" raise TypeError(error.format(item, root_type)) root_type = __builtin__.type(item) if recursive: if len(list(item)) != len(root_type.args_type): error = "invalid number of arguments, {0} instead of {1}" raise TypeError(error.format(len(list(item)), len(root_type.args_type))) else: for _type, child in zip(root_type.args_type, list(item)): typecheck(child, _type, recursive=recursive) else: assert root_type in (list, tuple, map) if not isinstance(item, root_type): # relax for tuples. error = "{0} is not of type {1}" raise TypeError(error.format(item, root_type)) if root_type is list: for child in list(item): child_type = type[1][0] typecheck(child, child_type, recursive=recursive) elif root_type is tuple: child_types = type[1] if len(child_types) != len(list(item)): error = "invalid number of arguments, {0} instead of {1}" raise TypeError(error.format(len(child_types), len(list(item)))) else: for _type, child in zip(child_types, list(item)): typecheck(child, _type, recursive=recursive) elif root_type is map: map_as_list = list(Sequence.iter(item)) kv_type = type[1] _type = ["list", ["tuple", kv_type]] typecheck(map_as_list, _type, recursive=recursive)
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _globalMin=None, _globalMax=None): if __builtin__.type(self) == _M_omero.model.StatsInfo: raise RuntimeError('omero.model.StatsInfo is an abstract class') _M_omero.model.IObject.__init__(self, _id, _details, _loaded) self._version = _version self._globalMin = _globalMin self._globalMax = _globalMax
def get(self, name, type="model"): """ Get an object from the pool :param name: the object name :param type: the type :return: the instance """ if type == "*": for type in self.classes.keys(): if name in self._pool[self.database_name][type]: break try: return self._pool[self.database_name][type][name] except KeyError: if type == "report": from trytond.report import Report # Keyword argument 'type' conflicts with builtin function cls = __builtin__.type(str(name), (Report,), {}) obj = object.__new__(cls) obj._name = name self.add(obj, type) obj.__init__() return obj raise
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _vid=None, _label=None, _creationDate=None, _action=None, _lastUpdate=None, _barcode=None, _status=None, _flowCell=None, _slot=None, _laneUK=None): if __builtin__.type(self) == _M_omero.model.Lane: raise RuntimeError('omero.model.Lane is an abstract class') _M_omero.model.Container.__init__(self, _id, _details, _loaded, _version, _vid, _label, _creationDate, _action, _lastUpdate, _barcode, _status) self._flowCell = _flowCell self._slot = _slot self._laneUK = _laneUK
def get(self, section_id, default_value=_sentinel, type=None): """ get(section_id, [default_value], type=None) Return the value pointed at **section_id**. If the key does not exist, **default_value** is returned. If *default_value* is left by default, an empty :class:`Conf` instance is returned. If *type* is given, the value at *section_id* must exist and be an instance of *type*, else a :exc:`TypeError` is raised. When used in combination with *default_value*, it is returned if there is no value instead of the :exc:`TypeError` but the :exc:`TypeError` is still raised when the value is not an instance of the *type*. """ try: value = self[section_id] except KeyError: if default_value is not _sentinel: return default_value if type is not None: raise TypeError( u'The key {0} is required but does not exists'.format( section_id)) return EmptyConf() if type and not isinstance(value, type): raise TypeError( '{key} has not the required type "{required}" but is a "{actual}"' .format(key=section_id, required=type, actual=__builtin__.type(value).__name__)) return value
def test_CreateAlbum(self): log("test_CreateAlbum: Start...") try: log("test_CreateAlbum: Start Sign in process.") op.launch_RT_before_running_case() log("test_CreateAlbum: Sign in successfully.") log("test_CreateAlbum: Create Album.") op.create_album() log("test_CreateAlbum: Create Album and add item to Album successfully.") log("test_CreateAlbum: Verify the Album from CloudAPI.") wait_for_upload_complete("test") log("test_CreateAlbum: The Album is created successfully.") except Exception as ex: log("test_CreateAlbum: failed with exception: %s" % __builtin__.type(ex).__name__) log(traceback.format_exc()) raise except: log("test_CreateAlbum: Unknown exception occur, Details:.") log(traceback.format_exc()) raise else: log("test_CreateAlbum: ends successfully without exception occur.")
def get(self, key, default=None, type=str): # required to restore shadowed __builtin__.type() import __builtin__ if default is not None: _type = __builtin__.type(default) else:_type = type val = self.__getitem__(key) return self.get_value(val, default, type)
def get(self, section_id, default_value=_sentinel, type=None): """ get(section_id, [default_value], type=None) Return the value pointed at **section_id**. If the key does not exist, **default_value** is returned. If *default_value* is left by default, an empty :class:`Conf` instance is returned. If *type* is given, the value at *section_id* must exist and be an instance of *type*, else a :exc:`TypeError` is raised. When used in combination with *default_value*, it is returned if there is no value instead of the :exc:`TypeError` but the :exc:`TypeError` is still raised when the value is not an instance of the *type*. """ try: value = self[section_id] except KeyError: if default_value is not _sentinel: return default_value if type is not None: raise TypeError(u'The key {0} is required but does not exists'.format(section_id)) return EmptyConf() if type and not isinstance(value, type): raise TypeError('{key} has not the required type "{required}" but is a "{actual}"'.format( key=section_id, required=type, actual=__builtin__.type(value).__name__)) return value
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _parent=None, _child=None): if __builtin__.type(self) == _M_omero.model.ProjectAnnotationLink: raise RuntimeError('omero.model.ProjectAnnotationLink is an abstract class') _M_omero.model.IObject.__init__(self, _id, _details, _loaded) self._version = _version self._parent = _parent self._child = _child
def __init__(self, name, value, type = None, propagated = None, overridable = None, tosubclass = None, toinstance = None, translatable = None): self.name = name self.type = type self.propagated = propagated self.overridable = overridable self.tosubclass = tosubclass self.toinstance = toinstance self.translatable = translatable # Determine type of value if not specified import __builtin__ if type is None: # Can't work out what is going on if type and value are # both not set. if value is None: raise TypeError('Null qualifier "%s" must have a type' % name) if __builtin__.type(value) == list: # Determine type for list value if len(value) == 0: raise TypeError( 'Empty qualifier array "%s" must have a type' % name) self.type = cim_types.cimtype(value[0]) else: # Determine type for regular value self.type = cim_types.cimtype(value) # Don't let anyone set integer or float values. You must use # a subclass from the cim_type module. if __builtin__.type(value) in (int, float, long): raise TypeError('Must use a CIM type for numeric qualifiers.') self.value = value
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _renderingDef=None, _xstart=None, _ystart=None, _xend=None, _yend=None): if __builtin__.type(self) == _M_omero.model.ContrastStretchingContext: raise RuntimeError('omero.model.ContrastStretchingContext is an abstract class') _M_omero.model.CodomainMapContext.__init__(self, _id, _details, _loaded, _version, _renderingDef) self._xstart = _xstart self._ystart = _ystart self._xend = _xend self._yend = _yend
def __init__(self, _id=None, _details=None, _loaded=False, _entityId=None, _entityType=None, _lsid=None, _uuid=None): if __builtin__.type(self) == _M_omero.model.ExternalInfo: raise RuntimeError('omero.model.ExternalInfo is an abstract class') _M_omero.model.IObject.__init__(self, _id, _details, _loaded) self._entityId = _entityId self._entityType = _entityType self._lsid = _lsid self._uuid = _uuid
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _parent=None, _child=None, _owner=None): if __builtin__.type(self) == _M_omero.model.GroupExperimenterMap: raise RuntimeError('omero.model.GroupExperimenterMap is an abstract class') _M_omero.model.IObject.__init__(self, _id, _details, _loaded) self._version = _version self._parent = _parent self._child = _child self._owner = _owner
def __init__(self, _id=None, _details=None, _loaded=False, _entityId=None, _entityType=None, _action=None, _event=None): if __builtin__.type(self) == _M_omero.model.EventLog: raise RuntimeError('omero.model.EventLog is an abstract class') _M_omero.model.IObject.__init__(self, _id, _details, _loaded) self._entityId = _entityId self._entityType = _entityType self._action = _action self._event = _event
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _vid=None, _label=None, _conf=None): if __builtin__.type(self) == _M_omero.model.ActionSetup: raise RuntimeError('omero.model.ActionSetup is an abstract class') _M_omero.model.IObject.__init__(self, _id, _details, _loaded) self._version = _version self._vid = _vid self._label = _label self._conf = _conf
def bigendian(ptype): '''Will convert an integer_t to bigendian form''' if not issubclass(ptype, type): raise error.TypeError(ptype, 'bigendian') import __builtin__ d = dict(ptype.__dict__) d['byteorder'] = config.byteorder.bigendian return __builtin__.type(ptype.__name__, ptype.__bases__, d)
def littleendian(ptype): '''Will convert an pfloat_t to littleendian form''' import __builtin__ if not issubclass(ptype, type) or ptype is type: raise error.TypeError(ptype, 'littleendian') d = dict(ptype.__dict__) d['byteorder'] = config.byteorder.littleendian return __builtin__.type(ptype.__name__, ptype.__bases__, d)
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _node=None, _uuid=None, _owner=None, _timeToIdle=None, _timeToLive=None, _started=None, _closed=None, _message=None, _defaultEventType=None, _userAgent=None, _eventsSeq=None, _eventsLoaded=False, _annotationLinksSeq=None, _annotationLinksLoaded=False, _annotationLinksCountPerOwner=None, _group=None, _itemCount=None, _active=None, _data=None): if __builtin__.type(self) == _M_omero.model.Share: raise RuntimeError('omero.model.Share is an abstract class') _M_omero.model.Session.__init__(self, _id, _details, _loaded, _version, _node, _uuid, _owner, _timeToIdle, _timeToLive, _started, _closed, _message, _defaultEventType, _userAgent, _eventsSeq, _eventsLoaded, _annotationLinksSeq, _annotationLinksLoaded, _annotationLinksCountPerOwner) self._group = _group self._itemCount = _itemCount self._active = _active self._data = _data
def value(self, section, key, default=None, type=str): # required to restore shadowed __builtin__.type() import __builtin__ if default is not None: _type = __builtin__.type(default) else:_type = type val = self._config_parser.get(section, key) return self.get_value(val, default, _type)
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _cdStart=None, _cdEnd=None, _bitResolution=None): if __builtin__.type(self) == _M_omero.model.QuantumDef: raise RuntimeError('omero.model.QuantumDef is an abstract class') _M_omero.model.IObject.__init__(self, _id, _details, _loaded) self._version = _version self._cdStart = _cdStart self._cdEnd = _cdEnd self._bitResolution = _bitResolution
def typecheck(item, type, recursive=False): # `type` can be for example: "Block", Block, ["list", ["Block"]], list # `recursive=False` means don't recurse on pandoc types, but we still # check the native Python objects, that do not typecheck in __init__. # TODO: handle tuples properly: tuples exist as lists in the json AND # we also store them as lists (ex: Attr) to keep the mutability. # Only maps have "real" tuples ? Check that. Rethink the mutability ? # Mutability matters, but the ability to infer the "natural" Python # type from the Haskell spec also matters ... Think about Attr and Map # together wrt the use of tuples (these are the only occurences afaict) types = globals() if isinstance(type, str): root_type = type = types[type] if isinstance(type, __builtin__.type) and not issubclass(type, PandocType): assert type in (list, tuple, map, int, bool, unicode, float) return isinstance(item, type) # relax for tuples. if isinstance(type, list): root_type = types[type[0]] if issubclass(root_type, PandocType): if not isinstance(item, root_type): error = "{0} is not of type {1}" raise TypeError(error.format(item, root_type)) root_type = __builtin__.type(item) if recursive: if len(list(item)) != len(root_type.args_type): error = "invalid number of arguments, {0} instead of {1}" raise TypeError(error.format(len(list(item)), len(root_type.args_type))) else: for _type, child in zip(root_type.args_type, list(item)): typecheck(child, _type, recursive=recursive) else: assert root_type in (list, tuple, map) if not isinstance(item, root_type): # relax for tuples. error = "{0} is not of type {1}" raise TypeError(error.format(item, root_type)) if root_type is list: for child in list(item): child_type = type[1][0] typecheck(child, child_type, recursive=recursive) elif root_type is tuple: child_types = type[1] if len(child_types) != len(list(item)): error = "invalid number of arguments, {0} instead of {1}" raise TypeError(error.format(len(child_types), len(list(item)))) else: for _type, child in zip(child_types, list(item)): typecheck(child, _type, recursive=recursive) elif root_type is map: map_as_list = list(Sequence.iter(item)) kv_type = type[1] _type = ["list", ["tuple", kv_type]] typecheck(map_as_list, _type, recursive=recursive)
def list(ap='.',r=False,type='',t='',d=False,dir=False,f=False,file=False): '''Parms:boll r recursion str (type,t) '(d,f,a,r)' default return all''' # print ap if dir:d=True if file:f=True if t and not type:type=t if 'd' in type:d=True if 'f' in type:f=True if 'a' in type:d=True;f=True if 'r' in type:r=True if d or dir or f or file:pass else:d=f=True #default return all if py.type(ap)!=py.type('') or py.len(ap)<1:ap='.' # if len(ap)==2 and ap.endswith(':'):ap+='/' if not U.inMuti(ap,'/','\\',f=str.endswith):ap+='/' # print ap # U.repl() ########## below r is result rls=[] try:r3=py.list(_os.walk(ap).next()) except Exception as ew: # print ap;raise ew return [] if ap=='./':ap='' # U.repl() r3[1]=[ap+i for i in r3[1]] r3[2]=[ap+i for i in r3[2]] if d:rls.extend(r3[1]) # if r: for i in r3[1]:rls.extend(list(i,r=r,d=d,f=f)) if f:rls.extend(r3[2]) return rls
def get(self, expr="/proc/meminfo"): """ This process works as yangcli's GET function. A lot of information can be got from the running netconf agent. If an xpath-based expression is also set, the results can be filtered. The results are not printed out in a file, it's only printed to stdout """ reply = self.__connection.get(filter=('xpath', expr)).data_xml print(type(reply))
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _attenuation=None, _wavelength=None, _lightSource=None, _microbeamManipulation=None): if __builtin__.type(self) == _M_omero.model.LightSettings: raise RuntimeError('omero.model.LightSettings is an abstract class') _M_omero.model.IObject.__init__(self, _id, _details, _loaded) self._version = _version self._attenuation = _attenuation self._wavelength = _wavelength self._lightSource = _lightSource self._microbeamManipulation = _microbeamManipulation
def count(fields): if (sumTitle!="All Mutations"): #if (not any([x==typeValue for x in fields])): if (__builtin__.type(typeValue) is list): if (not any([x==fields[typeField[0]] for x in typeValue])): return else: if (not fields[typeField[0]]==typeValue): return if subtypeField[0]!=[]: if ((subtypeComparison=="equal") or (subtypeComparison=="greater")): subtypeFieldValue = int(fields[subtypeField[0]]) else: subtypeFieldValue = fields[subtypeField[0]] if (not compare(subtypeFieldValue, subtypeValue, subtypeComparison)): return #if (True): #if (type=="10"): #if ((type!="10")&(fields[typeField]!="N")) { # print lnum, type, subtype, subtypeColumn, subtypeField[0] # typeField, subtypeField #} if (not variants[0].has_key(fields[VARIANT_TYPE_FIELD]) and (fields[COMPLEX_ID_FIELD]=="")): (variants[0])[fields[VARIANT_TYPE_FIELD]] = 1 else: if (fields[COMPLEX_ID_FIELD]==""): (variants[0])[fields[VARIANT_TYPE_FIELD]] = (variants[0])[fields[VARIANT_TYPE_FIELD]] + 1 if (fields[CLUSTER_ID_FIELD]!=""): (clusters[0])[fields[CLUSTER_ID_FIELD]] = fields[CLUSTER_COORD_FIELD] if (fields[COMPLEX_ID_FIELD]!=""): (complex[0])[fields[COMPLEX_ID_FIELD]] = fields[COMPLEX_ID_FIELD] #cat("c", fields[COMPLEX_ID_FIELD], "c") # 04/15/14 change to all non-complex rows if (fields[COMPLEX_ID_FIELD]==""): mutations[0] = mutations[0]+1 if ((fields[COMPLEX_ID_FIELD]=="") and (fields[VARIANT_TYPE_FIELD]=="SNP")): # 04/15/14 change to all non-complex rows #mutations <- mutations+1 # for (title in findTitles[as.logical(as.numeric(fields[fieldNames %in% findTitles]))]) { for title in [findTitles[x] for x in which([bool(x) for x in [int(x) for x in [fields[x] for x in which([x in findTitles for x in fieldNames])]]])]: mutationCount = (mutList[0])[title][fields[TUMOR_SEQ_ALLELE2_FIELD]] (mutList[0])[title][fields[TUMOR_SEQ_ALLELE2_FIELD]] = mutationCount + 1 #baseCounts <- as.numeric(fields[fieldNames %in% countTitles]) baseCounts = [int(x) for x in [fields[x] for x in uniqueFieldNumbers]] for i in range(len(baseCounts)): (baseCountList[0])[uniqueFieldNames[i]] = (baseCountList[0])[uniqueFieldNames[i]] + baseCounts[i] else: sys.stdout.write("X")
def runBuildCommand(arguments = None, currentWorkingDirectory = None, callerArguments = None, init_environment = None): if init_environment is None: init_environment = {} buildcommand = ['make'] if hasattr(callerArguments, 'buildcommand') and callerArguments.buildcommand: buildcommand = callerArguments.buildcommand.split() if arguments: buildcommand.extend(arguments if __builtin__.type(arguments) is list else arguments.split()) return runCommand(buildcommand, currentWorkingDirectory, callerArguments, init_environment = init_environment)
def select(msg="", title="Sikuli Selection", options=(), default=None): if len(options) == 0: return "" if default: if not __builtin__.type(default) is types.StringType: try: default = options[default] except: default = None return Sikulix.popSelect(msg, title, options, default)
def test_should_verify_repeat_field_added_to_questionnaire(self): xls_parser_response = XlsFormParser(self.REPEAT, u"My questionnairé").parse() mangroveService = MangroveService(self.mock_request, xls_parser_response=xls_parser_response) mangroveService.create_project() questionnaire_code = mangroveService.questionnaire_code mgr = mangroveService.manager from_model = get_form_model_by_code(mgr, questionnaire_code) self.assertNotEqual([], [f for f in from_model.fields if type(f) is FieldSet and f.fields])
def wait(self, target, timeout=None): ttype = __builtin__.type(target) if ttype is types.IntType or ttype is types.FloatType: time.sleep(target) return if timeout == None: ret = JRegion.wait(self, target) else: ret = JRegion.wait(self, target, timeout) return ret
def buscarmesasajax(request): if request.method == 'GET': q = request.GET['q'] print(type(q)) listado = Mesa.objects.filter(id=q).order_by('sector')[:30] print("listado mesas ajax") print(listado) return render_to_response( 'Salon/modificarmesa/busquedaresultados.html', {'listado': listado}, context_instance=RequestContext(request))
def format(obj, level=0): type = __builtin__.type(obj) if type is NoneType: return red('None') if type is TypeType: pass if type is BooleanType: return green(str(obj)) if type in [StringType, UnicodeType]: return yellow(str(obj)) if type in [IntType, LongType, FloatType, ComplexType]: return bold_blue(str(obj)) if type in (TupleType, ListType): open, close = ('(', ')') if type is TupleType else ('[', ']') if len(obj) is 0: return open + close s = [] i = 0 width = str(len(str(len(obj)))) for e in obj: s.append(('%s[%' + width + 'd] %s') % \ (indent(level + 1), i, format(e, level + 1))) i += 1 return open + "\n" + \ ",\n".join(s) + \ "\n" + indent(level) + close if type is DictType: if len(obj) is 0: return '{}' width = str(max([flen(format(k)) for k in obj.keys()])) s = [] for k in obj.keys(): v = obj[k] s.append(('%s%' + width + 's: %s') % \ (indent(level + 1), format(k), format(v, level + 1))) return '{' + "\n" + \ ",\n".join(s) + \ "\n" + indent(level) + '}' if type is LambdaType: return str(obj) return str(obj)
def capture(*args): scr = ScreenUnion() if len(args) == 0: simg = scr.userCapture() if simg: return simg.getFilename() else: return None elif len(args) == 1: if __builtin__.type(args[0]) is types.StringType or __builtin__.type(args[0]) is types.UnicodeType: simg = scr.userCapture(args[0]) if simg: return simg.getFilename() else: return None else: return scr.capture(args[0]).getFilename() elif len(args) == 4: return scr.capture(args[0], args[1], args[2], args[3]).getFilename() else: return None
def Field(module, str_enum): class_module = __builtin__.type(module, (object,), {}) str_enum = str_enum.replace(" ", "") str_enum = str_enum.replace("\n", "") idx = 0 for name in str_enum.split(","): if '=' in name: name,val = name.rsplit('=', 1) if val.isalnum(): idx = eval(val) setattr(class_module, name.strip(), idx) idx += 1 return class_module
def paste(self, *args): if len(args) == 1: target = None s = args[0] elif len(args) == 2: target = args[0] s = args[1] t_str = __builtin__.type(s) if t_str is types.UnicodeType: pass # do nothing elif t_str is types.StringType: s = java.lang.String(s, "utf-8") return JRegion.paste(self, target, s)
def runInstallCommand(arguments = ['install'], currentWorkingDirectory = None, callerArguments = None, init_environment = None): if init_environment is None: init_environment = {} if hasattr(callerArguments, 'installcommand') and callerArguments.installcommand: installcommand = callerArguments.installcommand.split() else: installcommand = ['make', '-j1'] # had the case that the -j1 on the make command was ignored if there is a MAKEFLAGS variable if os.name != 'nt': init_environment["MAKEFLAGS"] = "-j1" if arguments: installcommand.extend(arguments if __builtin__.type(arguments) is list else arguments.split()) return runCommand(installcommand, currentWorkingDirectory, callerArguments, init_environment = init_environment)
def __init__(self, name, type=None, value=None): if value == None: self.name = str(name) self.value = name else: assert (isinstance(name, str)) self.name = name self.value = value if type == None: self.type = self.__types[__builtin__.type(self.value)] else: self.type = str(type) self.op = "cnst" self.args = (value, )
def setbyteorder(endianness): import __builtin__ if endianness in (config.byteorder.bigendian,config.byteorder.littleendian): for k,v in globals().iteritems(): if v is not integer_t and isinstance(v,__builtin__.type) and issubclass(v,integer_t) and getattr(v, 'byteorder', config.defaults.integer.order) != endianness: d = dict(v.__dict__) d['byteorder'] = endianness globals()[k] = __builtin__.type(v.__name__, v.__bases__, d) # re-instantiate types continue return elif getattr(endianness, '__name__', '').startswith('big'): return setbyteorder(config.byteorder.bigendian) elif getattr(endianness, '__name__', '').startswith('little'): return setbyteorder(config.byteorder.littleendian) raise ValueError("Unknown integer endianness {!r}".format(endianness))
def buscarmesasajaxResultados(request): if request.method == 'GET': q = request.GET['q'] print(type(q)) if q != "": mesas = Mesa.objects.filter(id=q).order_by('sector') else: mesas_lista = Mesa.objects.all().order_by("sector") paginator = Paginator(mesas_lista, PAGINADO_PRODUCTOS) mesas = paginator.page(1) return render_to_response( 'Salon/modificarmesa/busquedaresultados_items.html', {'mesas': mesas}, context_instance=RequestContext(request))
def isErrorReply(self, data): """ """ if ioHub.isIterable(data) and len(data) > 0: if ioHub.isIterable(data[0]): return False else: if (type(data[0]) in (str, unicode)) and data[0].find('ERROR') >= 0: return ioHubClientException(data) return False else: raise ioHubClientException( 'Response from ioHub should always be iterable and have a length > 0' )
def onChange(self, arg1, arg2=None): t_arg1 = __builtin__.type(arg1) if t_arg1 is types.IntType: min_size = arg1 handler = arg2 else: min_size = None handler = arg1 class AnonyObserver(SikuliEventAdapter): def targetChanged(self, event): handler(event) if min_size != None: return JRegion.onChange(self, min_size, AnonyObserver()) return JRegion.onChange(self, AnonyObserver())
def __call__(cls, *lstArgs, **dictArgs): """ Creates an instance (OWL individual) """ #create the instance of cls in memory instance = cls.__new__(cls) #doesn't seem to be necessary #instance.__init__() #set attributes #gives the illusion of instantiating multiple classes #this is a dirty hack, but perhaps all that's possible given #Python's semantics #print type(instance) instance._types = [] if len(lstArgs) == 2: instance._types = lstArgs[1] instance._types.append(__builtin__.type(instance)) #store the type name for use in Ontology.py if len(lstArgs) == 2: instance._type_names = [] for t in lstArgs[1]: instance._type_names.append(t.name) #quote is not strictly needed here, but it's added to be #consistent with the uri of 'cls' in 'new' instance.uri = quote(URIRef(lstArgs[0])) instance.name = getLocalName(instance.uri) #instance.uri=instance.uri.replace('#','/') #instance.getTypeNames=instancemethod(getTypeNames,instance,cls) instance.getURI = instancemethod(getURI, instance, cls) #print 'created a '+cls.name cls._instances.append(instance) return instance
def __init__(self, timeout=0, compress=False, host='', port=0, protocolMajor=0, protocolMinor=0, encodingMajor=0, encodingMinor=0, mcastInterface='', mcastTtl=0): if __builtin__.type(self) == _M_Ice.UDPEndpointInfo: raise RuntimeError('Ice.UDPEndpointInfo is an abstract class') _M_Ice.IPEndpointInfo.__init__(self, timeout, compress, host, port) self.protocolMajor = protocolMajor self.protocolMinor = protocolMinor self.encodingMajor = encodingMajor self.encodingMinor = encodingMinor self.mcastInterface = mcastInterface self.mcastTtl = mcastTtl
def create_schema(self, tableName, *args, **kwarg): try: cur_table = Table( tableName, self.metadata, Column('Tid', self.__type_map['int'], primary_key=True, autoincrement=True)) for key in kwarg: cur_table.append_column( Column(key, self.__type_map[kwarg[key]])) cur_table.create() setattr( self, tableName, __builtin__.type(str(tableName), (self.__Base, ), {'__table__': cur_table})) self.tab_list[table_name] = (getattr(self, table_name)) except Exception as e: print Exception, ":", e finally: return