def initialize(self): self.log.trace() self.conf['command'] = os.path.basename(self.conf['path']) if not os.access(self.conf['path'], os.X_OK): raise getmailConfigurationError('%s not executable' % self.conf['path']) if type(self.conf['arguments']) != tuple: raise getmailConfigurationError('incorrect arguments format;' ' see documentation (%s)' % self.conf['arguments']) try: self.exitcodes_keep = [ int(i) for i in self.conf['exitcodes_keep'] if 0 <= int(i) <= 255 ] self.exitcodes_drop = [ int(i) for i in self.conf['exitcodes_drop'] if 0 <= int(i) <= 255 ] if not self.exitcodes_keep: raise getmailConfigurationError('exitcodes_keep set empty') if sets.ImmutableSet(self.exitcodes_keep).intersection( sets.ImmutableSet(self.exitcodes_drop)): raise getmailConfigurationError('exitcode sets intersect') except ValueError, o: raise getmailConfigurationError( 'invalid exit code specified (%s)' % o)
def write_oldmailfile(self, forget_deleted=True): '''Write oldmail info to oldmail file.''' self.log.trace() if (self.__oldmail_written or not self.__initialized or not self.gotmsglist): return wrote = 0 try: f = updatefile(self.oldmail_filename) msgids = sets.ImmutableSet(self.__delivered.keys()).union( sets.ImmutableSet(self.oldmail.keys())) for msgid in msgids: self.log.debug('msgid %s ...' % msgid) if forget_deleted and msgid in self.deleted: # Already deleted, don't remember this one self.log.debug(' was deleted, skipping' + os.linesep) continue # else: # not deleted, remember this one's time stamp t = self.oldmail.get(msgid, self.timestamp) self.log.debug(' timestamp %s' % t + os.linesep) f.write('%s\0%i%s' % (msgid, t, os.linesep)) wrote += 1 f.close() self.log.moreinfo('wrote %i uids for %s' % (wrote, self) + os.linesep) except IOError, o: self.log.error('failed writing oldmail file for %s (%s)' % (self, o) + os.linesep) f.abort()
def check_py_token_dbs_same_names(): """Compare render.py DB_TOKENS key name sets among db types""" all_same = True token_name_set = None token_name_set_src = '' for dbtype in render.DB_TOKENS.keys(): if dbtype in render.DBTYPES_FALLTHROUGH_ONLY: tokens = render.DB_TOKENS[dbtype].keys() if (len(tokens) != 1) or ('fallthrough' not in tokens): all_same = False print( progname + ": render.py DB_TOKEN expected only 'fallthrough' for dbtype " + dbtype) elif token_name_set is None: token_name_set = sets.ImmutableSet(render.DB_TOKENS[dbtype].keys()) token_name_set_src = dbtype else: diffr = (token_name_set ^ (sets.ImmutableSet(render.DB_TOKENS[dbtype].keys()))) if len(diffr): all_same = False print(progname + ": render.py DB_TOKEN sets differ: " + token_name_set_src + " vs dbtype " + dbtype + ": " + ", ".join(diffr)) return all_same
def _parse_templates(self, rootelem): indices = [] templates = [] template_directory_names = set([]) formatted_dir_names = set([]) for templateselem in rootelem.findall('./templates'): indices = etce.utils.nodestr_to_nodelist( templateselem.get('indices')) indices_set = set(indices) templates_global_overlaylists = \ OverlayListChainFactory().make(templateselem.findall("./overlaylist"), indices) for elem in list(templateselem): # ignore comments if isinstance(elem, lxml.etree._Comment): continue template_indices = indices template_indices_str = elem.attrib.get('indices') if template_indices_str: template_indices = etce.utils.nodestr_to_nodelist( template_indices_str) if not set(template_indices).issubset(indices_set): message = 'indices for template element "%s" are not ' \ 'a subset of parent templatefiles indices. ' \ 'Quitting.' % elem.attrib['name'] raise RuntimeError(message) if elem.tag == 'directory': templates.append( TemplateDirectoryBuilder( elem, template_indices, self._global_overlays, templates_global_overlaylists)) elif elem.tag == 'file': templates.append( TemplateFileBuilder(elem, template_indices, self._global_overlays, templates_global_overlaylists)) for t in templates: formatted_dir_names.update(t.formatted_hostnames) if isinstance(t, TemplateDirectoryBuilder): template_directory_names.update([t.template_directory_name]) return (indices, templates, sets.ImmutableSet(template_directory_names), sets.ImmutableSet(formatted_dir_names))
def __build_frequent_patterns(self, prefix_path, item): """ Build all permutations of elements from the given prefix path and item """ if len(prefix_path) == 0: return [] frequent_patterns = [] prefix_path.append((item, 0)) for length in xrange(1, len(prefix_path)): if length > 0: i = 0 while (i + length) <= len(prefix_path): x = i + 1 while (x + length) <= len(prefix_path): set = [prefix_path[i][0]] z = x while (z - x) < length: set.append(prefix_path[z][0]) z += 1 set = sets.ImmutableSet(set) frequent_patterns.append(set) x += 1 i += 1 return frequent_patterns
def test_amf3_encode(self): x = sets.ImmutableSet(['1', '2', '3']) self.assertTrue( check_buffer( pyamf.encode(x, encoding=pyamf.AMF3).getvalue(), ('\t\x07\x01', ('\x06\x031', '\x06\x033', '\x06\x032'))))
def checkconf(self): self.log.trace() if self.__confchecked: return for item in self._confitems: # New class-based configuration item self.log.trace('checking %s\n' % item.name) self.conf[item.name] = item.validate(self.conf) unknown_params = sets.ImmutableSet(self.conf.keys()).difference( sets.ImmutableSet([item['name'] for item in self._confitems])) for param in sorted(list(unknown_params), key=str.lower): self.log.warning('Warning: ignoring unknown parameter "%s" ' '(value: %s)\n' % (param, self.conf[param])) self.__confchecked = True self.log.trace('done\n')
def cosine(repr1, repr2): """ Compute the Cosine similarity between repr1 and repr2 (http://en.wikipedia.org/wiki/Cosine_distance). Each repr is given as a dict: term=>value. Note: We could also use scipy.sparse operations, which are more concise, but this would require constructing a term=>id map. See common.idmap in my Python common package. """ import math norm1 = 0. for t in repr1: norm1 += repr1[t] * repr1[t] norm1 = math.sqrt(norm1) norm2 = 0. for t in repr2: norm2 += repr2[t] * repr2[t] norm2 = math.sqrt(norm2) # from common.mydict import sort as dictsort # print dictsort(repr1)[:10] # print dictsort(repr2)[:10] import sets allterms = sets.ImmutableSet(repr1.keys() + repr2.keys()) sim = 0. for t in allterms: # print "%.3f = %.3f * %.3f, %s" % (repr1[t] * repr2[t],repr1[t], repr2[t], t.encode("utf-8")) sim += (1. * repr1[t] * repr2[t] / (norm1 * norm2)) return sim
def test_amf0_encode(self): x = sets.ImmutableSet(['1', '2', '3']) self.assertTrue( check_buffer( pyamf.encode(x, encoding=pyamf.AMF0).getvalue(), ('\n\x00\x00\x00\x03', ('\x02\x00\x011', '\x02\x00\x013', '\x02\x00\x012'))))
def right_options_move(self): """Not al and not necessarily the best right options, but if there is a right option <= 0, then the return Set will also contain such an option""" d = {} if len(self.gebiete) > 0: class strategy_failed(Exception): pass try: fuzzy = 0 for (l, r) in self.gebiete: if (r % 2 == 0) and (l != r): raise strategy_failed if (l % 2 == 1) or (l == r): fuzzy = fuzzy + 1 if fuzzy % 2 == 1: for i in range(len(self.gebiete)): (l, r) = self.gebiete[i] if (l % 2 == 1) and (r % 2 == 1): if r == 1: j = 0 else: j = 1 return [(self.take_column(i, j), (i, j))] else: if (l == r): #even case return [(self.take_column(i, j), (i, j))] else: for i in range(len(self.gebiete)): if (l % 2 == 0) and (r % 2 == 1): if r == 1: j = 0 else: j = 1 return [(self.take_column(i, j), (i, j))] #shouldn't fail here in simplified games raise strategy_failed except strategy_failed: pass for i in range(len(self.gebiete)): (l, r) = self.gebiete[i] if r % 2 == 0: m = r / 2 else: m = r / 2 + 1 for j in range(m): if (l % 2 == 1) and (r % 2 == 1): if (r > 1) and (j != 1): continue d[sets.ImmutableSet(self.take_column(i, j).__char())] = (i, j) return [(kuchen(k), d[k]) for k in d.keys()]
def Freeze(self): """ Makes the |self| object immutable. Calling setter on |self|'s property will throw exception. """ assert self._finder_options assert self._test_class self._frozen = True self._test_cases_ids_to_run = sets.ImmutableSet(self._test_cases_ids_to_run) self._client_configs = tuple(self._client_configs)
def parse(lines): maze = list(lines) pickups = set([]) for y, line in enumerate(maze): for x, c in enumerate(line): if c == '0': start = (y, x) elif c in '123456789': pickups.add((y, x)) return Node(maze, start, sets.ImmutableSet(pickups), start)
def check_py_token_dbs_same_names(): """Compare render.py DB_TOKENS key name sets among db types""" all_same = True token_name_set = None token_name_set_src = '' for dbtype in render.DB_TOKENS.keys(): if token_name_set is None: token_name_set = sets.ImmutableSet(render.DB_TOKENS[dbtype].keys()) token_name_set_src = dbtype else: diffr = (token_name_set ^ (sets.ImmutableSet(render.DB_TOKENS[dbtype].keys()))) if len(diffr): all_same = False print(progname + ": render.py DB_TOKEN sets differ: " + token_name_set_src + " vs dbtype " + dbtype + ": " + ", ".join(diffr)) return all_same
def parse_file(): if len(sys.argv) < 2: print "Error: no input" exit() f = open(sys.argv[1], 'r') node_count = int(f.readline()) paths = [] for ln in f.readlines(): paths.append(sets.ImmutableSet(map(int, ln.split()))) return node_count, paths
def addGroupMember(self, member): if member == None: return None if self.getGroupMembers() == None: self.groupMembers = sets.ImmutableSet() #Same as HashSet? ## if member == self: ## raise APIException("An obsGroup cannot have itself as a mentor. obsGroup: " + self \ ## + " obsMember attempting to add: " + member) #I think APIException is defined in another JAVA class file; not sure if Python has this member.setObsGroup(self) self.groupMembers.add(member)
def __init__(self, name, value, ignore_vals=None, multiple=True): self.name = name self.multiple = multiple if self.multiple: if value is None: value = [] else: value = list(value) self.value = value self.search_pat = '' self.errorstr = '' if ignore_vals is None: ignore_vals = sets.ImmutableSet() self.ignore_vals = ignore_vals
def parse_file(): if len(sys.argv) < 2: print "Error: no input" exit() f = open(sys.argv[1], 'r') node_count = int(f.readline()) scores = dict() paths = [] for ln in f.readlines(): nums = map(int, ln.split()) p = sets.ImmutableSet(nums[1:]) scores[p] = int(nums[0]) paths.append(p) return node_count, paths, scores
def process_file(ontology): content = file(ontologies_dir + ontology, 'r').read() matches = re.findall('[A-Z][A-Za-z]+\(', content) matches = matches + re.findall( '(Data[A-Za-z]+\(|Object[a-zA-Z]+\(|HasKey)', content) matches = [a[:-1] for a in matches] axioms = sets.ImmutableSet(matches) key = hash(axioms) categories[key] = axioms if (ontologies.has_key(key)): ontologies[key].append(ontology) else: ontologies[key] = [ ontology, ]
def nexts(self): (y, x) = self.pos for dy in [-1, 0, 1]: for dx in [-1, 0, 1]: if abs(dy + dx) == 1: new_pos = (y + dy, x + dx) if new_pos in self.pickups: new_pickups = sets.ImmutableSet({ pickup for pickup in self.pickups if pickup != new_pos }) else: new_pickups = self.pickups yield (1, Node(self.maze, new_pos, new_pickups, self.origin))
def filter_bugs(infile, outfile): ''' Picks out specific bugs from the silva database. ''' f_in = open(infile, "r") f_out = open(outfile, "w") regex = r';([A-Za-z]+) ([A-Za-z]+).*\n' bugs_list = ["Fusobacterium nucleatum", "Yersinia pestis", "Trichodesmium erythraeum", "Treponema pallidum", "Streptococcus sanguinis", "Prevotella melaninogenica"] bugs = sets.ImmutableSet(bugs_list) print(bugs) fKeep = False for line in f_in: if line[0] == '>': # Only keep those that are in our set match = re.search(regex, line) if match: strBugName = match.group(1) + " " + match.group(2) if strBugName in bugs: fKeep = True print(line) else: fKeep = False else: fKeep = False if fKeep: f_out.write(line) # write to own separate file with open("_".join(strBugName.split()) + ".fna", "a") as f: f.write(line) f_in.close() f_out.close()
a['SetType'] = _set = set() a['FrozenSetType'] = frozenset() # built-in exceptions (CH 6) a['ExceptionType'] = _exception = _function2()[0] # string services (CH 7) a['SREPatternType'] = _srepattern = re.compile('') # data types (CH 8) a['ArrayType'] = array.array("f") a['DequeType'] = collections.deque([0]) a['DefaultDictType'] = collections.defaultdict(_function, _dict) a['TZInfoType'] = datetime.tzinfo() a['DateTimeType'] = datetime.datetime.today() a['CalendarType'] = calendar.Calendar() if not PY3: a['SetsType'] = sets.Set() a['ImmutableSetType'] = sets.ImmutableSet() a['MutexType'] = mutex.mutex() # numeric and mathematical types (CH 9) a['DecimalType'] = decimal.Decimal(1) a['CountType'] = itertools.count(0) # data compression and archiving (CH 12) a['TarInfoType'] = tarfile.TarInfo() # generic operating system services (CH 15) a['LoggerType'] = logging.getLogger() a['FormatterType'] = logging.Formatter() # pickle ok a['FilterType'] = logging.Filter() # pickle ok a['LogRecordType'] = logging.makeLogRecord(_dict) # pickle ok a['OptionParserType'] = _oparser = optparse.OptionParser() # pickle ok a['OptionGroupType'] = optparse.OptionGroup(_oparser, "foo") # pickle ok a['OptionType'] = optparse.Option('--foo') # pickle ok if HAS_CTYPES:
getattr(child.__class__, self.key).impl.remove(child._state, instance, initiator, passive=True) class ClassState(object): """tracks state information at the class level.""" def __init__(self): self.mappers = {} self.attrs = {} self.has_mutable_scalars = False import sets _empty_set = sets.ImmutableSet() class InstanceState(object): """tracks state information at the instance level.""" def __init__(self, obj): self.class_ = obj.__class__ self.obj = weakref.ref(obj, self.__cleanup) self.dict = obj.__dict__ self.committed_state = {} self.modified = False self.callables = {} self.parents = {} self.pending = {} self.appenders = {} self.instance_dict = None
def check_java_token_values_vs_py(dbh_java_fullpath): """Compare render.py DB_TOKENS values against those in SOCDBHelper.upgradeSchema""" global java_all_ok java_all_ok = True line_num = 0 state = "" # parser 'state machine' shorthand for next part of the comparison area def print_err(msg): global java_all_ok print(progname + ".check_java_token_values_vs_py: " + dbh_java_fullpath + " line " + str(line_num) + ": Parse error within COMPARISON AREA, see py source; state=" + state + ": " + msg) java_all_ok = False try: token_names = None token_dbtype_vals = { } # key = dbtype or 'default', val = dict with tokennames & values with open(dbh_java_fullpath) as f: # Read lines until we see "TOKEN DECLARATION LIST". # At that point parse the next line for token list. # Then, read lines until we see "BEGIN COMPARISON AREA". # At that point read and "parse"; ignore comment-only lines. # When we see "END COMPARISON AREA" (hopefully at expected time), stop reading. f_line = "" saw_decl_line = False saw_begin_line = False saw_all_expected = False curr_case_dbtype = None # while parsing switch cases; 'default' can be a value here dbtypes_fallthrough_to_next = [] # while parsing switch cases: # "// fallthrough" db types to note when reaching next non-fallthrough # (not otherwise used yet: if 2 DB types share all tokens, they can share the same generated template) while java_all_ok and (f_line is not None): f_line = f.readline() if f_line is None: break f_line = f_line.strip() line_num += 1 if not len(f_line): continue if not saw_begin_line: if state == 'decl': if f_line.startswith("//"): continue # assumes 2 or more tokens are declared, all on same line if f_line.startswith("private static String "): m = re.search(r"String\s+(\w+(,\s*\w+)+)\s*;", f_line) if m: token_names = sets.Set([ tokname.strip() for tokname in m.group(1).split(',') ]) state = '' # will read until BEGIN COMPARISON AREA else: print_err( "failed regex match: private static String ..." ) else: print_err("expected: private static String") elif f_line == "// TOKEN DECLARATION LIST -- test_token_consistency.py": saw_decl_line = True state = 'decl' elif f_line == "// BEGIN COMPARISON AREA -- test_token_consistency.py": saw_begin_line = True state = 'switch' if not saw_decl_line: print( progname + ': Missing "TOKEN DECLARATION LIST" before "BEGIN COMPARISON AREA" (line ' + str(line_num) + ') in ' + dbh_java_fullpath) java_all_ok = False elif f_line == "// END COMPARISON AREA -- test_token_consistency.py": if not saw_all_expected: print(progname + ': "END COMPARISON AREA" too early (line ' + str(line_num) + ' state ' + state + ') in ' + dbh_java_fullpath) java_all_ok = False else: break # <--- Normal read-loop termination --- else: if f_line.startswith("//") and (f_line != '// fallthrough'): continue if state == 'switch': if re.search(r"^switch\w*\(dbType\)$", f_line): state = '{' else: print_err("failed regex match") elif state == '{': if f_line == '{': state = 'case' # expects case:, default:, or '}' elif state == 'case': if f_line == '}': state = 'end' saw_all_expected = True elif f_line == 'default:': state = 'caseline' curr_case_dbtype = 'default' token_dbtype_vals[curr_case_dbtype] = {} else: m = re.search(r"^case\s+DBTYPE_(\w+)\s*:", f_line) if m: state = 'caseline' curr_case_dbtype = m.group(1).lower() if curr_case_dbtype == 'postgresql': curr_case_dbtype = 'postgres' token_dbtype_vals[curr_case_dbtype] = {} else: print_err( "failed regex match: case DBTYPE_...") elif state == 'caseline': if f_line == 'break;': # done parsing this case if len(dbtypes_fallthrough_to_next): for fall_dbtype in dbtypes_fallthrough_to_next: token_dbtype_vals[fall_dbtype][ 'fallthrough'] = curr_case_dbtype dbtypes_fallthrough_to_next = [] state = 'case' elif f_line == '// fallthrough': dbtypes_fallthrough_to_next.append( curr_case_dbtype) state = 'case' else: m = re.search(r'^(\w+)\s*=\s*"([^"]*)";\s*$', f_line) if m: token_dbtype_vals[curr_case_dbtype][m.group( 1)] = m.group(2) else: print_err( "failed regex match: var assign | break | // fallthrough" ) elif state == 'end': print_err("expected: END COMPARISON AREA") if not saw_begin_line: print(progname + ': Missing "BEGIN COMPARISON AREA" in ' + dbh_java_fullpath) java_all_ok = False if not java_all_ok: return False # parse error(s) # Sanity-check dbtypes found in java if 'default' in token_dbtype_vals: if len(token_dbtype_vals) < 3: print( progname + ': switch(dbType) should have at least 2 dbTypes + default: ' + dbh_java_fullpath + " near line " + str(line_num)) java_all_ok = False else: print(progname + ': switch(dbType) missing default case: ' + dbh_java_fullpath + " near line " + str(line_num)) java_all_ok = False # Check if all dbtypes (including default) have the same set of token_names for dbtype in token_dbtype_vals.keys(): if dbtype in render.DBTYPES_FALLTHROUGH_ONLY: tokens = token_dbtype_vals[dbtype].keys() if (len(tokens) != 1) or ('fallthrough' not in tokens): java_all_ok = False print( progname + ": SOCDBHelper.upgradeSchema token sets: Expected fallthrough for dbtype " + dbtype) continue diffr = (token_names ^ (sets.ImmutableSet(token_dbtype_vals[dbtype].keys()))) if len(diffr): java_all_ok = False print( progname + ": SOCDBHelper.upgradeSchema token sets differ: String declaration vs dbtype " + dbtype + ": " + ", ".join(diffr)) # Check that dbtypes here (besides default) are same as render.DB_TOKENS dbtypes_set = sets.Set(token_dbtype_vals.keys()) if 'default' in dbtypes_set: # should be there, prevent error if not; presence is sanity-checked in above code dbtypes_set.remove('default') diffr = (dbtypes_set ^ (sets.ImmutableSet(render.DB_TOKENS.keys()))) if len(diffr): java_all_ok = False print( progname + ": SOCDBHelper.upgradeSchema db types differ vs render.DB_TOKENS: " + ", ".join(diffr)) # For java token names, check token values vs render.DB_TOKENS for non-default dbtypes if java_all_ok: for dbtype in dbtypes_set: if dbtype in render.DBTYPES_FALLTHROUGH_ONLY: tok_list = ['fallthrough'] else: tok_list = token_names for token_name in tok_list: if render.DB_TOKENS[dbtype][ token_name] != token_dbtype_vals[dbtype][ token_name]: if java_all_ok: print( progname + ": SOCDBHelper.upgradeSchema token value differs from render.DB_TOKENS:" ) java_all_ok = False print("- DBTYPE_" + dbtype.upper() + ": token " + token_name) return java_all_ok except IOError as e: print(progname + ": Error reading " + dbh_java_fullpath + ": " + str(e)) return False
def unused_addresses(self): """Return all short addresses that are not in use.""" used_addresses = sets.ImmutableSet(self._devices.keys()) return list(self._all_addresses - used_addresses)
colored_buttons = sets.ImmutableSet([ Buttons.mute, Buttons.solo, Buttons.stop_clip, Buttons.automate, Buttons.record, Buttons.play, Buttons.top_display_0, Buttons.top_display_1, Buttons.top_display_2, Buttons.top_display_3, Buttons.top_display_4, Buttons.top_display_5, Buttons.top_display_6, Buttons.top_display_7, Buttons.bottom_display_0, Buttons.bottom_display_1, Buttons.bottom_display_2, Buttons.bottom_display_3, Buttons.bottom_display_4, Buttons.bottom_display_5, Buttons.bottom_display_6, Buttons.bottom_display_7, Buttons.length_1_32t, Buttons.length_1_32, Buttons.length_1_16t, Buttons.length_1_16, Buttons.length_1_8t, Buttons.length_1_8, Buttons.length_1_4t, Buttons.length_1_4 ])
class Bus(object): """A DALI bus.""" _all_addresses = sets.ImmutableSet(xrange(64)) def __init__(self, name=None, interface=None): self._devices = {} self._bus_scanned = False # Have we scanned the bus for devices? self.name = name self._interface = interface def get_interface(self): if not self._interface: raise NotConnected return self._interface def add_device(self, device): if device.bus and device.bus != self: raise DeviceAlreadyBound() if device.address in self._devices: raise DuplicateDevice() if not isinstance(device.address, int) or device.address < 0 \ or device.address > 63: raise BadDevice("device address is invalid") self._devices[device.address] = device device.bus = self def unused_addresses(self): """Return all short addresses that are not in use.""" used_addresses = sets.ImmutableSet(self._devices.keys()) return list(self._all_addresses - used_addresses) def scan(self): """Scan the bus for devices and ensure there are device objects for each discovered device. """ i = self.get_interface() for sa in xrange(0, 64): if sa in self._devices: continue response = i.send(gear.QueryControlGearPresent(address.Short(sa))) if response.value: device.Device(address=sa, bus=self) self._bus_scanned = True def set_search_addr(self, addr): i = self.get_interface() i.send(gear.SetSearchAddrH((addr >> 16) & 0xff)) i.send(gear.SetSearchAddrM((addr >> 8) & 0xff)) i.send(gear.SetSearchAddrL(addr & 0xff)) def find_next(self, low, high): """Find the ballast with the lowest random address. The caller guarantees that there are no ballasts with an address lower than 'low'. If found, returns the random address. SearchAddr will be set to this address in all ballasts. The ballast is not withdrawn. If not found, returns None. """ i = self.get_interface() self.set_search_addr(high) if low == high: response = i.send(gear.Compare()) if response.value is True: return low return None response = i.send(gear.Compare()) if response.value is True: midpoint = (low + high) / 2 return self.find_next(low, midpoint) \ or self.find_next(midpoint + 1, high) def assign_short_addresses(self): """Search for devices on the bus with no short address allocated, and allocate each one a short address from the set of unused addresses. """ if not self._bus_scanned: self.scan() addrs = self.unused_addresses() i = self.get_interface() i.send(gear.Terminate()) i.send(gear.Initialise(broadcast=False, address=None)) i.send(gear.Randomise()) # Randomise may take up to 100ms time.sleep(0.1) low = 0 high = 0xffffff while low is not None: low = self.find_next(low, high) if low is not None: if addrs: new_addr = addrs.pop(0) i.send(gear.ProgramShortAddress(new_addr)) r = i.send(gear.VerifyShortAddress(new_addr)) if r.value is not True: raise ProgramShortAddressFailure(new_addr) i.send(gear.Withdraw()) device.Device(address=new_addr, bus=self) else: i.send(gear.Terminate()) raise NoFreeAddress low = low + 1 i.send(gear.Terminate())
def test_python_immutable_set(self): self.w(sets.ImmutableSet(('a', 'b')), u'["a","b"]')
#Only need to list IDs that do not start with "xml", "XML", etc. RESERVED_NAMES = [ '__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__', '__getitem__', '__hash__', '__init__', '__iter__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', '__unicode__', '__weakref__', '_childNodes', '_docIndex', '_localName', '_namespaceURI', '_parentNode', '_prefix', '_rootNode', 'childNodes', 'docIndex', 'localName', 'namespaceURI', 'next_elem', 'nodeName', 'nodeType', 'ownerDocument', 'parentNode', 'prefix', 'rootNode', 'locals', 'None' ] RESERVED_NAMES.extend(keyword.kwlist) RESERVED_NAMES = sets.ImmutableSet(RESERVED_NAMES) #Phases to which rules should be added #Usually there should only be one MAKE_INSTANCE phase rule, and this is #Usually the default rule #PRE_INSTANCE rules are usually for preventing certain events from creating objects #POST_INSTANCE rules are usually for decorating or modifying created objects PRE_INSTANCE, MAKE_INSTANCE, POST_INSTANCE = 1, 2, 3 #All local names in this class must be prefixed with "xml" so that they #Don't conflict with the generated class name for the element binding object def create_element(xmlnaming_rule, xmlqname, xmlns=None, xmlename=None): xmlprefix, xmllocal = SplitQName(xmlqname) if not xmlename: xmlename = xmlnaming_rule.xml_to_python(xmllocal, xmlns) xmlns_class = g_namespaces.setdefault(xmlns, namespace(xmlns, xmlprefix))
if codecs.mulaw is not None: format_to_codec[PT_PCMU] = MulawCodec() if codecs.alaw is not None: format_to_codec[PT_PCMA] = AlawCodec() if codecs.gsm is not None: format_to_codec[PT_GSM] = GSMCodec() if codecs.speex is not None: format_to_codec[PT_SPEEX] = SpeexCodec() #if codecs.dvi4 is not None: # format_to_codec[PT_DVI4] = DVI4Codec() #if codecs.ilbc is not None: # format_to_codec[PT_ILBC] = ILBCCodec() return format_to_codec known_formats = (sets.ImmutableSet(make_codec_set().keys()) - sets.ImmutableSet([ PT_CN, PT_xCN, ])) class Codecker: def __init__(self, format): self.format_to_codec = make_codec_set() if not format in known_formats: raise ValueError("Can't handle codec %r" % format) self.format = format self.handler = None def set_handler(self, handler):
# Copyright (C) 2005, 2006, 2007 Michael Bayer [email protected] # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Defines ANSI SQL operations. Contains default implementations for the abstract objects in the sql module. """ from sqlalchemy import schema, sql, engine, util, sql_util, exceptions from sqlalchemy.engine import default import string, re, sets, weakref ANSI_FUNCS = sets.ImmutableSet([ 'CURRENT_DATE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER', 'LOCALTIME', 'LOCALTIMESTAMP', 'SESSION_USER', 'USER' ]) RESERVED_WORDS = util.Set([ 'all', 'analyse', 'analyze', 'and', 'any', 'array', 'as', 'asc', 'asymmetric', 'authorization', 'between', 'binary', 'both', 'case', 'cast', 'check', 'collate', 'column', 'constraint', 'create', 'cross', 'current_date', 'current_role', 'current_time', 'current_timestamp', 'current_user', 'default', 'deferrable', 'desc', 'distinct', 'do', 'else', 'end', 'except', 'false', 'for', 'foreign', 'freeze', 'from', 'full', 'grant', 'group', 'having', 'ilike', 'in', 'initially', 'inner', 'intersect', 'into', 'is', 'isnull', 'join', 'leading', 'left', 'like', 'limit', 'localtime', 'localtimestamp', 'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset', 'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps', 'placing', 'primary', 'references', 'right', 'select', 'session_user', 'similar', 'some', 'symmetric', 'table', 'then', 'to',