def analysis(sliceno): if options.numeric_comma: try_locales = [ 'da_DK', 'nb_NO', 'nn_NO', 'sv_SE', 'fi_FI', 'en_ZA', 'es_ES', 'es_MX', 'fr_FR', 'ru_RU', 'de_DE', 'nl_NL', 'it_IT', ] for localename in try_locales: localename = localename.encode('ascii') if not backend.numeric_comma(localename): break if not backend.numeric_comma(localename + b'.UTF-8'): break else: raise Exception( "Failed to enable numeric_comma, please install at least one of the following locales: " + " ".join(try_locales)) if options.filter_bad: badmap_fh = open('badmap%d' % (sliceno, ), 'w+b') bad_count, default_count, minmax, link_candidates = analysis_lap( sliceno, badmap_fh, True) if sum(itervalues(bad_count)): final_bad_count, default_count, minmax, link_candidates = analysis_lap( sliceno, badmap_fh, False) final_bad_count = max(itervalues(final_bad_count)) else: final_bad_count = 0 badmap_fh.close() else: bad_count, default_count, minmax, link_candidates = analysis_lap( sliceno, None, False) final_bad_count = 0 for src, dst in link_candidates: symlink(src, dst) return bad_count, final_bad_count, default_count, minmax
def collect(key, value, path=''): path = "%s/%s" % (path, key,) if isinstance(value, dict): for v in itervalues(value): collect('*', v, path) return spec = value2spec(value) assert res.get(path, spec) == spec, 'Method %s has incompatible types in options%s' % (method, path,) res[path] = spec
def chk(key, value): if value is OptionString or isinstance(value, RequiredOption): res.add(key) elif isinstance(value, OptionEnum): if None not in value._valid: res.add(key) elif isinstance(value, dict): for v in itervalues(value): chk(key, v) elif isinstance(value, (list, tuple, set,)): for v in value: chk(key, v)
def _update_finish(self, dict_of_hashes, verbose=False): """Filters in-use database on valid hashes. Always call after (a sequence of) update_workspace calls. """ # discard cached setup.json from any gone jobs # (so we reload it if they reappear, and also so we don't see them here) for j in set(_paramsdict) - self._fsjid: del _paramsdict[j] discarded_due_to_hash_list = [] # Keep lists of jobs per method, only with valid hashes self.db_by_method = defaultdict(list) for setup in itervalues(_paramsdict): if setup.hash in dict_of_hashes.get(setup.method, ()): job = _mkjob(setup) self.db_by_method[job.method].append(job) else: discarded_due_to_hash_list.append(setup.jobid) # Newest first for l in itervalues(self.db_by_method): l.sort(key=attrgetter('time'), reverse=True) if verbose: if discarded_due_to_hash_list: print("DATABASE: discarding due to unknown hash: %s" % ', '.join(discarded_due_to_hash_list)) print("DATABASE: Full database contains %d items" % (sum(len(v) for v in itervalues(self.db_by_method)),))
def _receiver(self): while True: try: op, length = struct.unpack('<cI', recvall(self.sock, 5)) data = recvall(self.sock, length) cookie, data = pickle.loads(data) q = self._waiters.pop(cookie) q.put(data) except Exception: break # All is lost, unblock anyone waiting for q in itervalues(self._waiters): try: q.put(None, block=False) except QueueFull: pass
def new_runners(config): from dispatch import run if 'py' in runners: del runners['py'] for runner in itervalues(runners): runner.kill() runners.clear() py_v = 'py3' if PY3 else 'py2' todo = {py_v: sys.executable} for k, v in iteritems(config): if re.match(r"py\d+$", k): todo[k] = v for k, py_exe in iteritems(todo): sock_p, sock_c = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) cmd = [py_exe, './runner.py', str(sock_c.fileno())] pid = run(cmd, [sock_p.fileno()], [sock_c.fileno()], False) sock_c.close() runners[k] = Runner(pid=pid, sock=sock_p) runners['py'] = runners[py_v] return runners
def make_license_header(target, source, env): src_copyright = source[0] src_license = source[1] dst = target[0] class LicenseReader: def __init__(self, license_file): self._license_file = license_file self.line_num = 0 self.current = self.next_line() def next_line(self): line = self._license_file.readline() self.line_num += 1 while line.startswith("#"): line = self._license_file.readline() self.line_num += 1 self.current = line return line def next_tag(self): if not ":" in self.current: return ("", []) tag, line = self.current.split(":", 1) lines = [line.strip()] while self.next_line() and self.current.startswith(" "): lines.append(self.current.strip()) return (tag, lines) from collections import OrderedDict projects = OrderedDict() license_list = [] with open_utf8(src_copyright, "r") as copyright_file: reader = LicenseReader(copyright_file) part = {} while reader.current: tag, content = reader.next_tag() if tag in ("Files", "Copyright", "License"): part[tag] = content[:] elif tag == "Comment": # attach part to named project projects[content[0]] = projects.get(content[0], []) + [part] if not tag or not reader.current: # end of a paragraph start a new part if "License" in part and not "Files" in part: # no Files tag in this one, so assume standalone license license_list.append(part["License"]) part = {} reader.next_line() data_list = [] for project in itervalues(projects): for part in project: part["file_index"] = len(data_list) data_list += part["Files"] part["copyright_index"] = len(data_list) data_list += part["Copyright"] with open_utf8(dst, "w") as f: f.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n") f.write("#ifndef _EDITOR_LICENSE_H\n") f.write("#define _EDITOR_LICENSE_H\n") f.write("const char *const GODOT_LICENSE_TEXT =") with open_utf8(src_license, "r") as license_file: for line in license_file: escaped_string = escape_string(line.strip()) f.write('\n\t\t"' + escaped_string + '\\n"') f.write(";\n\n") f.write("struct ComponentCopyrightPart {\n" "\tconst char *license;\n" "\tconst char *const *files;\n" "\tconst char *const *copyright_statements;\n" "\tint file_count;\n" "\tint copyright_count;\n" "};\n\n") f.write("struct ComponentCopyright {\n" "\tconst char *name;\n" "\tconst ComponentCopyrightPart *parts;\n" "\tint part_count;\n" "};\n\n") f.write("const char *const COPYRIGHT_INFO_DATA[] = {\n") for line in data_list: f.write('\t"' + escape_string(line) + '",\n') f.write("};\n\n") f.write("const ComponentCopyrightPart COPYRIGHT_PROJECT_PARTS[] = {\n") part_index = 0 part_indexes = {} for project_name, project in iteritems(projects): part_indexes[project_name] = part_index for part in project: f.write('\t{ "' + escape_string(part["License"][0]) + '", ' + "©RIGHT_INFO_DATA[" + str(part["file_index"]) + "], " + "©RIGHT_INFO_DATA[" + str(part["copyright_index"]) + "], " + str(len(part["Files"])) + ", " + str(len(part["Copyright"])) + " },\n") part_index += 1 f.write("};\n\n") f.write("const int COPYRIGHT_INFO_COUNT = " + str(len(projects)) + ";\n") f.write("const ComponentCopyright COPYRIGHT_INFO[] = {\n") for project_name, project in iteritems(projects): f.write('\t{ "' + escape_string(project_name) + '", ' + "©RIGHT_PROJECT_PARTS[" + str(part_indexes[project_name]) + "], " + str(len(project)) + " },\n") f.write("};\n\n") f.write("const int LICENSE_COUNT = " + str(len(license_list)) + ";\n") f.write("const char *const LICENSE_NAMES[] = {\n") for l in license_list: f.write('\t"' + escape_string(l[0]) + '",\n') f.write("};\n\n") f.write("const char *const LICENSE_BODIES[] = {\n\n") for l in license_list: for line in l[1:]: if line == ".": f.write('\t"\\n"\n') else: f.write('\t"' + escape_string(line) + '\\n"\n') f.write('\t"",\n\n') f.write("};\n\n") f.write("#endif\n")
def make_license_header(target, source, env): src_copyright = source[0] src_license = source[1] dst = target[0] class LicenseReader: def __init__(self, license_file): self._license_file = license_file self.line_num = 0 self.current = self.next_line() def next_line(self): line = self._license_file.readline() self.line_num += 1 while line.startswith("#"): line = self._license_file.readline() self.line_num += 1 self.current = line return line def next_tag(self): if not ':' in self.current: return ('', []) tag, line = self.current.split(":", 1) lines = [line.strip()] while self.next_line() and self.current.startswith(" "): lines.append(self.current.strip()) return (tag, lines) from collections import OrderedDict projects = OrderedDict() license_list = [] with open_utf8(src_copyright, "r") as copyright_file: reader = LicenseReader(copyright_file) part = {} while reader.current: tag, content = reader.next_tag() if tag in ("Files", "Copyright", "License"): part[tag] = content[:] elif tag == "Comment": # attach part to named project projects[content[0]] = projects.get(content[0], []) + [part] if not tag or not reader.current: # end of a paragraph start a new part if "License" in part and not "Files" in part: # no Files tag in this one, so assume standalone license license_list.append(part["License"]) part = {} reader.next_line() data_list = [] for project in itervalues(projects): for part in project: part["file_index"] = len(data_list) data_list += part["Files"] part["copyright_index"] = len(data_list) data_list += part["Copyright"] with open_utf8(dst, "w") as f: f.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n") f.write("#ifndef _EDITOR_LICENSE_H\n") f.write("#define _EDITOR_LICENSE_H\n") f.write("const char *const GODOT_LICENSE_TEXT =") with open_utf8(src_license, "r") as license_file: for line in license_file: escaped_string = escape_string(line.strip()) f.write("\n\t\t\"" + escaped_string + "\\n\"") f.write(";\n\n") f.write("struct ComponentCopyrightPart {\n" "\tconst char *license;\n" "\tconst char *const *files;\n" "\tconst char *const *copyright_statements;\n" "\tint file_count;\n" "\tint copyright_count;\n" "};\n\n") f.write("struct ComponentCopyright {\n" "\tconst char *name;\n" "\tconst ComponentCopyrightPart *parts;\n" "\tint part_count;\n" "};\n\n") f.write("const char *const COPYRIGHT_INFO_DATA[] = {\n") for line in data_list: f.write("\t\"" + escape_string(line) + "\",\n") f.write("};\n\n") f.write("const ComponentCopyrightPart COPYRIGHT_PROJECT_PARTS[] = {\n") part_index = 0 part_indexes = {} for project_name, project in iteritems(projects): part_indexes[project_name] = part_index for part in project: f.write("\t{ \"" + escape_string(part["License"][0]) + "\", " + "©RIGHT_INFO_DATA[" + str(part["file_index"]) + "], " + "©RIGHT_INFO_DATA[" + str(part["copyright_index"]) + "], " + str(len(part["Files"])) + ", " + str(len(part["Copyright"])) + " },\n") part_index += 1 f.write("};\n\n") f.write("const int COPYRIGHT_INFO_COUNT = " + str(len(projects)) + ";\n") f.write("const ComponentCopyright COPYRIGHT_INFO[] = {\n") for project_name, project in iteritems(projects): f.write("\t{ \"" + escape_string(project_name) + "\", " + "©RIGHT_PROJECT_PARTS[" + str(part_indexes[project_name]) + "], " + str(len(project)) + " },\n") f.write("};\n\n") f.write("const int LICENSE_COUNT = " + str(len(license_list)) + ";\n") f.write("const char *const LICENSE_NAMES[] = {\n") for l in license_list: f.write("\t\"" + escape_string(l[0]) + "\",\n") f.write("};\n\n") f.write("const char *const LICENSE_BODIES[] = {\n\n") for l in license_list: for line in l[1:]: if line == ".": f.write("\t\"\\n\"\n") else: f.write("\t\"" + escape_string(line) + "\\n\"\n") f.write("\t\"\",\n\n") f.write("};\n\n") f.write("#endif\n")
def values(self, show_time=False): for v in itervalues(super(TimerDict, self)): if show_time: yield v else: yield v[0]
def get_item_by_uid(self, uid): for v in itervalues(self.tree): if v['uid'] == uid: return v
def convert(default_v, v): if isinstance(default_v, RequiredOption): if v is None and not default_v.none_ok: raise OptionException( 'Option %s on method %s requires a non-None value (%r)' % ( k, method, default_v.value, )) default_v = default_v.value if default_v is None or v is None: if default_v is OptionString: raise OptionException( 'Option %s on method %s requires a non-empty string value' % ( k, method, )) if hasattr(default_v, '_valid') and v not in default_v._valid: raise OptionException( 'Option %s on method %s requires a value in %s' % ( k, method, default_v._valid, )) if isinstance(default_v, OptionDefault): v = default_v.default return v if isinstance(default_v, OptionDefault): default_v = default_v.value if isinstance(default_v, dict) and isinstance(v, dict): if default_v: sample_v = first_value(default_v) for chk_v in itervalues(default_v): assert isinstance(chk_v, type(sample_v)) return { k: convert(sample_v, v) for k, v in iteritems(v) } else: return v if isinstance(default_v, ( list, set, tuple, )) and isinstance(v, str_types + ( list, set, tuple, )): if isinstance(v, str_types): v = (e.strip() for e in v.split(',')) if default_v: sample_v = first_value(default_v) for chk_v in default_v: assert isinstance(chk_v, type(sample_v)) v = (convert(sample_v, e) for e in v) return type(default_v)(v) if isinstance(default_v, ( OptionEnum, OptionEnumValue, )): if not (v or None) in default_v._valid: ok = False for cand_prefix in default_v._prefixes: if v.startswith(cand_prefix): ok = True break if not ok: raise OptionException( '%r not a permitted value for option %s on method %s (%s)' % (v, k, method, default_v._valid)) return v or None if isinstance(default_v, str_types + num_types) and isinstance( v, str_types + num_types): if default_v is OptionString: v = str(v) if not v: raise OptionException( 'Option %s on method %s requires a non-empty string value' % ( k, method, )) return v if isinstance(default_v, unicode) and isinstance(v, bytes): return v.decode('utf-8') return type(default_v)(v) if (isinstance(default_v, type) and isinstance(v, typefuzz(default_v))) or isinstance( v, typefuzz(type(default_v))): return v if isinstance(default_v, bool) and isinstance(v, (str, int)): lv = str(v).lower() if lv in ( 'true', '1', 't', 'yes', 'on', ): return True if lv in ( 'false', '0', 'f', 'no', 'off', '', ): return False if isinstance(default_v, _date_types): default_v = type(default_v) if default_v in _date_types: try: return typing_conv[default_v.__name__](v) except Exception: raise OptionException( 'Failed to convert option %s %r to %s on method %s' % ( k, v, default_v, method, )) if isinstance(v, str_types) and not v: return type(default_v)() if isinstance(default_v, type): # JobWithFile or similar default_v = default_v() if isinstance(default_v, JobWithFile): defaults = type(default_v).__new__.__defaults__ if not isinstance(v, ( list, tuple, )) or len(v) > len(defaults): raise OptionException( 'Option %s (%r) on method %s is not %s compatible' % (k, v, method, type(default_v))) v = tuple(v) + defaults[len( v):] # so all of default_v gets convert()ed. v = [convert(dv, vv) for dv, vv in zip(default_v, v)] return type(default_v)(*v) raise OptionException( 'Failed to convert option %s of %s to %s on method %s' % ( k, type(v), type(default_v), method, ))