Ejemplo n.º 1
0
def _mkjob(setup):
	params_with_defaults = {}
	# Fill in defaults for all methods, update with actual options
	def optfilter(d):
		res = {}
		for k, v in iteritems(d):
			if isinstance(v, OptionEnum):
				v = None
			elif isinstance(v, OptionDefault):
				v = v.default
			res[k] = v
		return res
	for method, params in iteritems(setup.params):
		if method in _control.Methods.params:
			d = {k: optfilter(v) for k, v in iteritems(_control.Methods.params[method].defaults)}
		else:
			d = {}
		for k, v in iteritems(d):
			v.update(params[k])
		params_with_defaults[method] = d
	optset = _control.Methods.params2optset(params_with_defaults)
	job = Job(
		id     = setup.jobid,
		method = setup.method,
		params = setup.params[setup.method],
		optset = optset,
		hash   = setup.hash,
		time   = setup.starttime,
		total  = setup.profile.total,
	)
	return job
def options2typing(method, options):
    from extras import JobWithFile
    res = {}

    def value2spec(value):
        if isinstance(value, list):
            if not value:
                return
            fmt = '[%s]'
            value = value[0]
        else:
            fmt = '%s'
        typ = None
        if value is JobWithFile or isinstance(value, JobWithFile):
            typ = 'JobWithFile'
        elif isinstance(value, set):
            typ = 'set'
        elif value in (
                datetime,
                date,
                time,
                timedelta,
        ):
            typ = value.__name__
        elif isinstance(value, (
                datetime,
                date,
                time,
                timedelta,
        )):
            typ = type(value).__name__
        if typ:
            return fmt % (typ, )

    def collect(key, value, path=''):
        path = "%s/%s" % (
            path,
            key,
        )
        if isinstance(value, dict):
            for v in itervalues(value):
                collect('*', v, path)
            return
        spec = value2spec(value)
        assert res.get(
            path,
            spec) == spec, 'Method %s has incompatible types in options%s' % (
                method,
                path,
            )
        res[path] = spec

    for k, v in iteritems(options):
        collect(k, v)
    # reverse by key len, so something inside a dict always comes before
    # the dict itself. (We don't currently have any dict-like types, but we
    # might later.)
    return sorted(([k[1:], v] for k, v in iteritems(res) if v),
                  key=lambda i: -len(i[0]))
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--ccache', action='store_true', default=False)
    parser.add_argument('--host-only', action='store_true', default=False)
    parser.add_argument('--android', action='store_true', default=False)
    parser.add_argument('--build', metavar='TARGET')
    args = parser.parse_args()

    configs = {}
    if not args.host_only:
        if args.android:
            for config_name, gn_args in iteritems(ANDROID_BUILD_CONFIGS):
                for arch in ANDROID_ARCHS:
                    full_config_name = '%s_%s' % (config_name, arch)
                    configs[full_config_name] = gn_args + ('target_cpu="%s"' %
                                                           arch, )
        for config_name, gn_args in iteritems(LINUX_BUILD_CONFIGS):
            if dict(a.split('=') for a in gn_args).get('is_clang',
                                                       None) == 'true':
                continue
            for arch in LINUX_ARCHS:
                full_config_name = '%s_%s' % (config_name, arch)
                configs[full_config_name] = gn_args + (
                    'target_cpu="%s"' % arch, 'target_os="linux"')

    system = platform.system().lower()
    if system == 'linux':
        configs.update(LINUX_BUILD_CONFIGS)
    elif system == 'darwin':
        configs.update(MAC_BUILD_CONFIGS)
    else:
        assert False, 'Unsupported system %r' % system

    if args.ccache:
        for config_name, gn_args in iteritems(configs):
            configs[config_name] = gn_args + ('cc_wrapper="ccache"', )

    out_base_dir = os.path.join(ROOT_DIR, 'out')
    if not os.path.isdir(out_base_dir):
        os.mkdir(out_base_dir)

    gn = os.path.join(ROOT_DIR, 'tools', 'gn')

    for config_name, gn_args in iteritems(configs):
        print('\n\033[32mBuilding %-20s[%s]\033[0m' %
              (config_name, ','.join(gn_args)))
        out_dir = os.path.join(ROOT_DIR, 'out', config_name)
        if not os.path.isdir(out_dir):
            os.mkdir(out_dir)
        gn_cmd = (gn, 'gen', out_dir, '--args=%s' % (' '.join(gn_args)),
                  '--check')
        print(' '.join(quote(c) for c in gn_cmd))
        subprocess.check_call(gn_cmd, cwd=ROOT_DIR)
        if args.build:
            ninja = os.path.join(ROOT_DIR, 'tools', 'ninja')
            ninja_cmd = (ninja, '-C', '.', args.build)
            subprocess.check_call(ninja_cmd, cwd=out_dir)
Ejemplo n.º 4
0
	def params2optset(self, params):
		optset = set()
		for optmethod, method_params in iteritems(params):
			for group, d in iteritems(method_params):
				filled_in = dict(self.params[optmethod].defaults[group])
				filled_in.update(d)
				for optname, optval in iteritems(filled_in):
					optset.add('%s %s-%s %s' % (optmethod, group, optname, _reprify(optval),))
		return optset
			def fixup(d):
				if isinstance(d, defaultdict) and not picklable(d.default_factory):
					if not d:
						return {}
					v = next(iteritems(d))
					if isinstance(v, defaultdict) and not picklable(v.default_factory):
						return {k: fixup(v) for k, v in iteritems(d)}
					else:
						return dict(d)
				else:
					return d
Ejemplo n.º 6
0
def patternProperties(validator, patternProperties, instance, schema):
    if not validator.is_type(instance, "object"):
        return

    for pattern, subschema in iteritems(patternProperties):
        for k, v in iteritems(instance):
            if re.search(pattern, k):
                for error in validator.descend(
                        v, subschema, path=k, schema_path=pattern
                ):
                    yield error
Ejemplo n.º 7
0
    def __init__(self, package_list, configfilename, daemon_config):
        super(SubMethods, self).__init__(package_list, configfilename)
        t0 = time()
        self.runners = new_runners(daemon_config)
        per_runner = defaultdict(list)
        for key, val in iteritems(self.db):
            package = val['package']
            per_runner[val['version']].append((package, key))
        warnings = []
        failed = []
        self.hash = {}
        self.params = {}
        self.typing = {}
        for version, data in iteritems(per_runner):
            runner = self.runners.get(version)
            if not runner:
                msg = '%%s.%%s (unconfigured version %s)' % (version)
                failed.extend(msg % t for t in sorted(data))
                continue
            w, f, h, p = runner.load_methods(data)
            warnings.extend(w)
            failed.extend(f)
            self.hash.update(h)
            self.params.update(p)
        for key, params in iteritems(self.params):
            self.typing[key] = options2typing(key, params.options)
            params.defaults = params2defaults(params)
            params.required = options2required(params.options)

        def prt(a, prefix):
            maxlen = (max(len(e) for e in a) + len(prefix))
            line = '=' * maxlen
            print()
            print(line)
            for e in sorted(a):
                msg = prefix + e
                print(msg + ' ' * (maxlen - len(msg)))
            print(line)
            print()

        if warnings:
            prt(warnings, 'WARNING: ')
        if failed:
            print('\033[47;31;1m')
            prt(failed, 'FAILED to import ')
            print('\033[m')
            raise MethodLoadException(failed)
        print("Updated %d methods on %d runners in %.1f seconds" % (
            len(self.hash),
            len(per_runner),
            time() - t0,
        ))
Ejemplo n.º 8
0
 def get_reqlist(self):
     for method, data in self.tree.items():
         full_params = {}
         for submethod, given_params in iteritems(data['params']):
             params = {
                 k: dict(v)
                 for k, v in iteritems(
                     self.methods.params[submethod].defaults)
             }
             for k, v in iteritems(given_params):
                 params[k].update(v)
             full_params[submethod] = params
         yield method, data['uid'], self.methods.params2optset(full_params)
Ejemplo n.º 9
0
def patternProperties(validator, patternProperties, instance, schema):
    if not validator.is_type(instance, "object"):
        return

    for pattern, subschema in iteritems(patternProperties):
        for k, v in iteritems(instance):
            if re.search(pattern, k):
                for error in validator.descend(
                        v,
                        subschema,
                        path=k,
                        schema_path=pattern,
                ):
                    yield error
Ejemplo n.º 10
0
        def iter_errors(self, instance, _schema=None):
            if _schema is None:
                _schema = self.schema

            scope = _schema.get(u"id")
            if scope:
                self.resolver.push_scope(scope)
            try:
                ref = _schema.get(u"$ref")
                if ref is not None:
                    validators = [(u"$ref", ref)]
                else:
                    validators = iteritems(_schema)

                for k, v in validators:
                    validator = self.VALIDATORS.get(k)
                    if validator is None:
                        continue

                    errors = validator(self, v, instance, _schema) or ()
                    for error in errors:
                        # set details if not already set by the called fn
                        error._set(
                            validator=k,
                            validator_value=v,
                            instance=instance,
                            schema=_schema,
                        )
                        if k != u"$ref":
                            error.schema_path.appendleft(k)
                        yield error
            finally:
                if scope:
                    self.resolver.pop_scope()
Ejemplo n.º 11
0
    def __init__(
        self,
        base_uri,
        referrer,
        store=(),
        cache_remote=True,
        handlers=(),
        urljoin_cache=None,
        remote_cache=None,
    ):
        if urljoin_cache is None:
            urljoin_cache = lru_cache(1024)(urljoin)
        if remote_cache is None:
            remote_cache = lru_cache(1024)(self.resolve_from_url)

        self.referrer = referrer
        self.cache_remote = cache_remote
        self.handlers = dict(handlers)

        self._scopes_stack = [base_uri]
        self.store = _utils.URIDict(
            (id, validator.META_SCHEMA)
            for id, validator in iteritems(meta_schemas)
        )
        self.store.update(store)
        self.store[base_uri] = referrer

        self._urljoin_cache = urljoin_cache
        self._remote_cache = remote_cache
Ejemplo n.º 12
0
 def __init__(self, package_list, configfilename):
     self.package_list = package_list
     self.db = {}
     for package in self.package_list:
         try:
             package_mod = import_module(package)
             if not hasattr(package_mod, "__file__"):
                 raise ImportError("no __file__")
         except ImportError:
             raise Exception(
                 "Failed to import %s, maybe missing __init__.py?" %
                 (package, ))
         confname = os.path.join(os.path.dirname(package_mod.__file__),
                                 configfilename)
         tmp = read_method_conf(confname)
         for x in tmp:
             if x in self.db:
                 print(
                     "METHOD:  ERROR, method \"%s\" defined both in \"%s\" and \"%s\"!"
                     % (x, package, self.db[x]['package']))
                 exit(1)
         for x in tmp.values():
             x['package'] = os.path.basename(package)
         self.db.update(tmp)
     # build dependency tree for all methods
     self.deptree = {}
     for method in self.db:
         self.deptree[method] = self._build_dep_tree(method, tree={})
     self.link = {k: v.get('link') for k, v in iteritems(self.db)}
 def __repr__(self):
   return json.dumps({
       k: (list(sorted(v)) if isinstance(v, set) else v)
       for (k, v) in iteritems(self.__dict__)
   },
                     indent=4,
                     sort_keys=True)
def _reprify(o):
    if isinstance(o, OptionDefault):
        o = o.default
    if isinstance(o, (bytes, str, int, float, long, bool, NoneType)):
        return repr(o)
    if isinstance(o, unicode):
        # not reachable in PY3, the above "str" matches
        return repr(o.encode('utf-8'))
    if isinstance(o, set):
        return '[%s]' % (', '.join(map(_reprify, _sorted_set(o))), )
    if isinstance(o, (list, tuple)):
        return '[%s]' % (', '.join(map(_reprify, o)), )
    if isinstance(o, dict):
        return '{%s}' % (', '.join('%s: %s' % (
            _reprify(k),
            _reprify(v),
        ) for k, v in sorted(iteritems(o))), )
    if isinstance(o, (
            datetime,
            date,
            time,
            timedelta,
    )):
        return str(o)
    raise Exception('Unhandled %s in dependency resolution' % (type(o), ))
Ejemplo n.º 15
0
        def iter_errors(self, instance, _schema=None):
            if _schema is None:
                _schema = self.schema

            with self.resolver.in_scope(_schema.get("id", "")):
                ref = _schema.get("$ref")
                if ref is not None:
                    validators = [("$ref", ref)]
                else:
                    validators = iteritems(_schema)

                for k, v in validators:
                    validator = self.VALIDATORS.get(k)
                    if validator is None:
                        continue

                    errors = validator(self, v, instance, _schema) or ()
                    for error in errors:
                        # set details if not already set by the called fn
                        error._set(
                            validator=k,
                            validator_value=v,
                            instance=instance,
                            schema=_schema,
                        )
                        if k != "$ref":
                            error.schema_path.appendleft(k)
                        yield error
Ejemplo n.º 16
0
def params2defaults(params):
	d = DotDict()
	for key in ('datasets', 'jobids',):
		r = {}
		for v in params[key]:
			if isinstance(v, list):
				r[v[0]] = []
			else:
				r[v] = None
		d[key] = r
	def fixup(item):
		if isinstance(item, dict):
			d = {k: fixup(v) for k, v in iteritems(item)}
			if len(d) == 1 and first_value(d) is None and first_value(item) is not None:
				return {}
			return d
		if isinstance(item, (list, tuple, set,)):
			l = [fixup(v) for v in item]
			if l == [None] and list(item) != [None]:
				l = []
			return type(item)(l)
		if isinstance(item, type):
			return None
		assert isinstance(item, (bytes, unicode, int, float, long, bool, OptionEnum, NoneType, datetime.datetime, datetime.date, datetime.time, datetime.timedelta)), type(item)
		return item
	def fixup0(item):
		if isinstance(item, RequiredOption):
			item = item.value
		if isinstance(item, OptionDefault):
			item = item.default
		return fixup(item)
	d.options = {k: fixup0(v) for k, v in iteritems(params.options)}
	return d
Ejemplo n.º 17
0
def list_threads():
    """List all available threads in most efficient way."""
    def order(item):
        """Order Threads by latest comment or start time."""
        thread = item[1]
        timestamp = thread['timestamp']
        return thread.get('last_comment', {}).get('timestamp') or timestamp

    # Read Threads from Links and Content databases
    with content.pipeline() as pipe:
        uids = []
        for thread_uid in links.lrange(build_key(THREADS_KEY), 0, -1):
            pipe.hgetall(build_key(THREAD_KEY, thread_uid))
            uids.append(thread_uid)
        threads = dict(zip(uids, pipe.execute()))

    # Make another multi request for threads' counters and last comments where
    # possible
    comments_request = OrderedDict()

    for thread_uid, thread in iteritems(threads):
        last_comment_uid = thread.get('last_comment_uid')
        if not last_comment_uid:
            continue
        comments_request[thread_uid] = thread['last_comment_uid']

    # We assume that last comment and comments counter available only for
    # threads with comments
    if comments_request:
        with links.pipeline() as pipe:
            for thread_uid in iterkeys(comments_request):
                pipe.get(build_key(THREAD_COUNTER_KEY, thread_uid))
            response = zip(iterkeys(comments_request), pipe.execute())

        for thread_uid, counter in response:
            threads[thread_uid]['comments_counter'] = counter

        with content.pipeline() as pipe:
            for thread_uid, comment_uid in iteritems(comments_request):
                key = build_key(COMMENT_KEY, thread_uid, comment_uid)
                pipe.hgetall(key)
            response = zip(iterkeys(comments_request), pipe.execute())

        for thread_uid, comment in response:
            threads[thread_uid]['last_comment'] = comment

    return OrderedDict(sorted(iteritems(threads), key=order, reverse=True))
Ejemplo n.º 18
0
 def update(self, other_dict):
     if not isinstance(other_dict, dict):
         raise TypeError('%r is not a dict.' % other_dict)
     now = datetime.datetime.now()
     tmp = {}
     for k, v in iteritems(other_dict):
         tmp[k] = [v, now]
     super(TimerDict, self).update(tmp)
Ejemplo n.º 19
0
    def total_errors(self):
        """
        The total number of errors in the entire tree, including children.

        """

        child_errors = sum(len(tree) for _, tree in iteritems(self._contents))
        return len(self.errors) + child_errors
Ejemplo n.º 20
0
def _job_candidates_options(candidates):
    for jobid, remset in iteritems(candidates):
        setup = job_params(jobid)
        optdiff = defaultdict(dict)
        for thing in remset:
            section, name = thing.split('-', 1)
            optdiff[section][name] = setup[section][name]
        yield jobid, optdiff
Ejemplo n.º 21
0
    def total_errors(self):
        """
        The total number of errors in the entire tree, including children.

        """

        child_errors = sum(len(tree) for _, tree in iteritems(self._contents))
        return len(self.errors) + child_errors
Ejemplo n.º 22
0
	def fmt(tree, start_indent=0):
		for pid, d in sorted(iteritems(tree), key=lambda i: (i[1].stack or ((0,),))[0][0]):
			last[0] = d
			indent = start_indent
			for msg, t, _ in d.stack:
				res.append((pid, indent, msg, t))
				indent += 1
			fmt(d.children, indent)
Ejemplo n.º 23
0
def _unicode_as_utf8bytes(obj):
	if isinstance(obj, unicode):
		return obj.encode('utf-8')
	elif isinstance(obj, dict):
		return DotDict((_unicode_as_utf8bytes(k), _unicode_as_utf8bytes(v)) for k, v in iteritems(obj))
	elif isinstance(obj, list):
		return [_unicode_as_utf8bytes(v) for v in obj]
	else:
		return obj
Ejemplo n.º 24
0
	def typefix(e):
		if isinstance(e, dict):
			return dict_type((typefix(k), typefix(v)) for k, v in iteritems(e))
		elif isinstance(e, (list, tuple, set,)):
			return [typefix(v) for v in e]
		elif PY2 and isinstance(e, bytes):
			return uni(e)
		else:
			return e
Ejemplo n.º 25
0
	def optfilter(d):
		res = {}
		for k, v in iteritems(d):
			if isinstance(v, OptionEnum):
				v = None
			elif isinstance(v, OptionDefault):
				v = v.default
			res[k] = v
		return res
Ejemplo n.º 26
0
		def upd(aggregate, part, level):
			if level == depth:
				aggregate.update(part)
			else:
				for k, v in iteritems(part):
					if k in aggregate:
						upd(aggregate[k], v, level + 1)
					else:
						aggregate[k] = v
Ejemplo n.º 27
0
 def __new__(cls, jobid, name=None):
     if isinstance(jobid, (tuple, list)):
         jobid = _dsid(jobid)
     elif isinstance(jobid, dict):
         assert not name, "Don't pass both a separate name and jobid as {job: dataset}"
         assert len(jobid) == 1, "Only pass a single {job: dataset}"
         jobid, dsname = next(iteritems(jobid))
         if not jobid:
             return None
         jobid = job_params(jobid, default_empty=True).datasets.get(dsname)
         if not jobid:
             return None
     if '/' in jobid:
         assert not name, "Don't pass both a separate name and jobid as jid/name"
         jobid, name = jobid.split('/', 1)
     assert jobid, "If you really meant to use yourself as a dataset, pass params.jobid explicitly."
     name = uni(name or 'default')
     assert '/' not in name
     if name == 'default':
         suffix = ''
     else:
         suffix = '/' + name
     if jobid is _new_dataset_marker:
         from g import JOBID
         fullname = JOBID + suffix
     else:
         fullname = jobid + suffix
     obj = unicode.__new__(cls, fullname)
     obj.name = uni(name or 'default')
     if jobid is _new_dataset_marker:
         obj._data = DotDict({
             'version': (
                 2,
                 2,
             ),
             'filename': None,
             'hashlabel': None,
             'caption': '',
             'columns': {},
             'parent': None,
             'previous': None,
             'lines': [],
         })
         obj.jobid = None
     else:
         obj.jobid = jobid
         obj._data = DotDict(_ds_load(obj))
         assert obj._data.version[0] == 2 and obj._data.version[
             1] >= 2, "%s/%s: Unsupported dataset pickle version %r" % (
                 jobid,
                 name,
                 obj._data.version,
             )
         obj._data.columns = dict(obj._data.columns)
     return obj
Ejemplo n.º 28
0
def new_runners(config):
    from dispatch import run
    if 'py' in runners:
        del runners['py']
    for runner in itervalues(runners):
        runner.kill()
    runners.clear()
    py_v = 'py3' if PY3 else 'py2'
    todo = {py_v: sys.executable}
    for k, v in iteritems(config):
        if re.match(r"py\d+$", k):
            todo[k] = v
    for k, py_exe in iteritems(todo):
        sock_p, sock_c = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
        cmd = [py_exe, './runner.py', str(sock_c.fileno())]
        pid = run(cmd, [sock_p.fileno()], [sock_c.fileno()], False)
        sock_c.close()
        runners[k] = Runner(pid=pid, sock=sock_p)
    runners['py'] = runners[py_v]
    return runners
Ejemplo n.º 29
0
 def _fix_jobids(self, key):
     for method, data in iteritems(self.tree):
         method_params = data['params'][method]
         data = method_params[key]
         method_wants = self.methods.params[method][key]
         res = {}
         for jobid_name in method_wants:
             if isinstance(jobid_name, str_types):
                 value = data.get(jobid_name)
                 assert value is None or isinstance(
                     value,
                     str), 'Input %s on %s not a string as required' % (
                         jobid_name,
                         method,
                     )
             elif isinstance(jobid_name, list):
                 if len(jobid_name) != 1 or not isinstance(
                         jobid_name[0], str_types):
                     raise OptionException('Bad %s item on %s: %s' % (
                         key,
                         method,
                         repr(jobid_name),
                     ))
                 jobid_name = jobid_name[0]
                 value = data.get(jobid_name)
                 if value:
                     if isinstance(value, str_types):
                         value = [e.strip() for e in value.split(',')]
                 else:
                     value = []
                 assert isinstance(
                     value, list
                 ), 'Input %s on %s not a list or string as required' % (
                     jobid_name,
                     method,
                 )
             else:
                 raise OptionException(
                     '%s item of unknown type %s on %s: %s' % (
                         key,
                         type(jobid_name),
                         method,
                         repr(jobid_name),
                     ))
             res[jobid_name] = value
         method_params[key] = res
         spill = set(data) - set(res)
         if spill:
             raise OptionException('Unknown %s on %s: %s' % (
                 key,
                 method,
                 ', '.join(sorted(spill)),
             ))
Ejemplo n.º 30
0
def job_params(jobid=None, default_empty=False):
	if default_empty and not jobid:
		return DotDict(
			options=DotDict(),
			datasets=DotDict(),
			jobids=DotDict(),
		)
	d = json_load('setup.json', jobid)
	for method, tl in iteritems(d.get('_typing', {})):
		_apply_typing(d.params[method].options, tl)
	d.update(d.params[d.method])
	return d
Ejemplo n.º 31
0
def properties_draft4(validator, properties, instance, schema):
    if not validator.is_type(instance, "object"):
        return

    for property, subschema in iteritems(properties):
        if property in instance:
            for error in validator.descend(
                instance[property],
                subschema,
                path=property,
                schema_path=property,
            ):
                yield error
Ejemplo n.º 32
0
def _test():
    from gzwrite import typed_writer
    for key, data in iteritems(convfuncs):
        key = key.split(":")[0]
        typed_writer(typerename.get(key, key))
        assert data.size in (
            0,
            1,
            4,
            8,
        ), (key, data)
        if isinstance(data.conv_code_str, list):
            for v in data.conv_code_str:
                assert isinstance(v, (str, NoneType)), (key, data)
        else:
            assert isinstance(data.conv_code_str, (str, NoneType)), (key, data)
        if data.conv_code_str and data.size:
            assert typerename.get(key, key) in minmaxfuncs
        assert data.pyfunc is None or callable(data.pyfunc), (key, data)
    for key, mm in iteritems(minmaxfuncs):
        for v in mm:
            assert isinstance(v, str), key
Ejemplo n.º 33
0
def properties_draft4(validator, properties, instance, schema):
    if not validator.is_type(instance, "object"):
        return

    for property, subschema in iteritems(properties):
        if property in instance:
            for error in validator.descend(
                    instance[property],
                    subschema,
                    path=property,
                    schema_path=property,
            ):
                yield error
Ejemplo n.º 34
0
 def wrapper(*argv, **argd):
     t_ansi    = GuessStringType.t_ansi
     t_unicode = GuessStringType.t_unicode
     v_types   = [ type(item) for item in argv ]
     v_types.extend( [ type(value) for (key, value) in compat.iteritems(argd) ] )
     if t_unicode in v_types:
         argv = list(argv)
         for index in compat.xrange(len(argv)):
             if v_types[index] == t_unicode:
                 argv[index] = t_ansi(argv[index])
         for key, value in argd.items():
             if type(value) == t_unicode:
                 argd[key] = t_ansi(value)
     return fn(*argv, **argd)
Ejemplo n.º 35
0
    def __init__(self, methods, setup):
        tree = methods.new_deptree(setup.method)
        self.methods = methods
        self.top_method = setup.method
        self.tree = tree
        self.add_flags({
            'make': False,
            'link': False,
        })
        seen = set()
        for method, data in iteritems(self.tree):
            seen.add(method)
            data['params'] = {method: setup.params[method]}
        unmatched = {
            method: params
            for method, params in iteritems(setup.params) if method not in seen
        }
        if unmatched:
            from extras import json_encode
            print("DepTree Warning:  Unmatched options remain:",
                  json_encode(unmatched, as_str=True))

        def collect(method):
            # All methods that method depend on
            for child in tree[method]['dep']:
                yield child
                for method in collect(child):
                    yield method

        # This probably updates some with the same data several times,
        # but this is cheap (key: dictref updates, nothing more.)
        for method, data in iteritems(self.tree):
            for submethod in set(collect(method)):
                data['params'].update(tree[submethod]['params'])
        self._fix_options(False)
        self._fix_jobids('jobids')
        self._fix_jobids('datasets')
Ejemplo n.º 36
0
    def __init__(self, mapping=None, max_lifespan=3600*24*3):
        self.max_lifespan = 0
        self.set_lifespan(max_lifespan)

        now = datetime.datetime.now()
        tmp = {}
        if isinstance(mapping, dict):
            for k, v in iteritems(mapping):
                tmp[k] = [v, now]
            super(TimerDict, self).__init__(tmp)
        # todo: ...
        else:
            for k, v in mapping or ():
                tmp[k] = [v, now]
            super(TimerDict, self).__init__(tmp)
Ejemplo n.º 37
0
    def __init__(
        self, base_uri, referrer, store=(), cache_remote=True, handlers=(),
    ):
        self.base_uri = base_uri
        self.resolution_scope = base_uri
        # This attribute is not used, it is for backwards compatibility
        self.referrer = referrer
        self.cache_remote = cache_remote
        self.handlers = dict(handlers)

        self.store = _utils.URIDict(
            (id, validator.META_SCHEMA)
            for id, validator in iteritems(meta_schemas)
        )
        self.store.update(store)
        self.store[base_uri] = referrer
Ejemplo n.º 38
0
    def mySpawn(sh, escape, cmd, args, env):

        newargs = ' '.join(args[1:])
        cmdline = cmd + " " + newargs

        rv = 0
        env = {str(key): str(value) for key, value in iteritems(env)}
        if len(cmdline) > 32000 and cmd.endswith("ar"):
            cmdline = cmd + " " + args[1] + " " + args[2] + " "
            for i in range(3, len(args)):
                rv = mySubProcess(cmdline + args[i], env)
                if rv:
                    break
        else:
            rv = mySubProcess(cmdline, env)

        return rv
Ejemplo n.º 39
0
def main():
    """Wipe all data from Links and Content storages."""
    start_time = time.time()
    print('Start deleting available Threads')

    with app.app_context():
        # Storage requires app in global context
        import storage

        for thread_uid, thread in iteritems(storage.list_threads()):
            storage.delete_thread(thread_uid)
            print('    Thread deleted! UID #{0}, subject: {1}'.
                  format(thread_uid, thread['subject']))

        storage.links.delete(build_key(THREADS_KEY))

    print('All Threads deleted. Done in {0:.4f}s'.
          format(time.time() - start_time))
    return False
Ejemplo n.º 40
0
def dependencies(validator, dependencies, instance, schema):
    if not validator.is_type(instance, "object"):
        return

    for property, dependency in iteritems(dependencies):
        if property not in instance:
            continue

        if validator.is_type(dependency, "object"):
            for error in validator.descend(
                    instance, dependency, schema_path=property
            ):
                yield error
        else:
            dependencies = _utils.ensure_list(dependency)
            for dependency in dependencies:
                if dependency not in instance:
                    yield ValidationError(
                        "%r is a dependency of %r" % (dependency, property)
                    )
Ejemplo n.º 41
0
def properties_draft3(validator, properties, instance, schema):
    if not validator.is_type(instance, "object"):
        return

    for property, subschema in iteritems(properties):
        if property in instance:
            for error in validator.descend(
                instance[property],
                subschema,
                path=property,
                schema_path=property,
            ):
                yield error
        elif subschema.get("required", False):
            error = ValidationError("%r is a required property" % property)
            error._set(
                validator="required",
                validator_value=subschema["required"],
                instance=instance,
                schema=schema,
            )
            error.path.appendleft(property)
            error.schema_path.extend([property, "required"])
            yield error
Ejemplo n.º 42
0
 def items(self, show_time=False):
     for k, v in iteritems(super(TimerDict, self)):
         if show_time:
             yield k, v
         else:
             yield k, v[0]
Ejemplo n.º 43
0
                             5: "wlan_interface_state_associating",
                             6: "wlan_interface_state_discovering",
                             7: "wlan_interface_state_authenticating"}

# The DOT11_MAC_ADDRESS types are used to define an IEEE media access control
# (MAC) address.
DOT11_MAC_ADDRESS = c_ubyte * 6

# The DOT11_BSS_TYPE enumerated type defines a basic service set (BSS) network
# type.
DOT11_BSS_TYPE = c_uint
DOT11_BSS_TYPE_DICT_KV = {1: "dot11_BSS_type_infrastructure",
                          2: "dot11_BSS_type_independent",
                          3: "dot11_BSS_type_any"}
DOT11_BSS_TYPE_DICT_VK = { v: k for k, v in
        iteritems(DOT11_BSS_TYPE_DICT_KV) }

# The DOT11_PHY_TYPE enumeration defines an 802.11 PHY and media type.
DOT11_PHY_TYPE = c_uint
DOT11_PHY_TYPE_DICT = {0: "dot11_phy_type_unknown",
                       1: "dot11_phy_type_fhss",
                       2: "dot11_phy_type_dsss",
                       3: "dot11_phy_type_irbaseband",
                       4: "dot11_phy_type_ofdm",
                       5: "dot11_phy_type_hrdsss",
                       6: "dot11_phy_type_erp",
                       7: "dot11_phy_type_ht",
                       0x80000000: "dot11_phy_type_IHV_start",
                       0xffffffff: "dot11_phy_type_IHV_end"}

# The DOT11_AUTH_ALGORITHM enumerated type defines a wireless LAN
Ejemplo n.º 44
0
 def _set(self, **kwargs):
     for k, v in iteritems(kwargs):
         if getattr(self, k) is _unset:
             setattr(self, k, v)
Ejemplo n.º 45
0
def make_license_header(target, source, env):
    src_copyright = source[0]
    src_license = source[1]
    dst = target[0]

    class LicenseReader:
        def __init__(self, license_file):
            self._license_file = license_file
            self.line_num = 0
            self.current = self.next_line()

        def next_line(self):
            line = self._license_file.readline()
            self.line_num += 1
            while line.startswith("#"):
                line = self._license_file.readline()
                self.line_num += 1
            self.current = line
            return line

        def next_tag(self):
            if not ':' in self.current:
                return ('', [])
            tag, line = self.current.split(":", 1)
            lines = [line.strip()]
            while self.next_line() and self.current.startswith(" "):
                lines.append(self.current.strip())
            return (tag, lines)

    from collections import OrderedDict
    projects = OrderedDict()
    license_list = []

    with open_utf8(src_copyright, "r") as copyright_file:
        reader = LicenseReader(copyright_file)
        part = {}
        while reader.current:
            tag, content = reader.next_tag()
            if tag in ("Files", "Copyright", "License"):
                part[tag] = content[:]
            elif tag == "Comment":
                # attach part to named project
                projects[content[0]] = projects.get(content[0], []) + [part]

            if not tag or not reader.current:
                # end of a paragraph start a new part
                if "License" in part and not "Files" in part:
                    # no Files tag in this one, so assume standalone license
                    license_list.append(part["License"])
                part = {}
                reader.next_line()

    data_list = []
    for project in itervalues(projects):
        for part in project:
            part["file_index"] = len(data_list)
            data_list += part["Files"]
            part["copyright_index"] = len(data_list)
            data_list += part["Copyright"]

    with open_utf8(dst, "w") as f:

        f.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n")
        f.write("#ifndef _EDITOR_LICENSE_H\n")
        f.write("#define _EDITOR_LICENSE_H\n")
        f.write("const char *const GODOT_LICENSE_TEXT =")

        with open_utf8(src_license, "r") as license_file:
            for line in license_file:
                escaped_string = escape_string(line.strip())
                f.write("\n\t\t\"" + escaped_string + "\\n\"")
        f.write(";\n\n")

        f.write("struct ComponentCopyrightPart {\n"
                "\tconst char *license;\n"
                "\tconst char *const *files;\n"
                "\tconst char *const *copyright_statements;\n"
                "\tint file_count;\n"
                "\tint copyright_count;\n"
                "};\n\n")

        f.write("struct ComponentCopyright {\n"
                "\tconst char *name;\n"
                "\tconst ComponentCopyrightPart *parts;\n"
                "\tint part_count;\n"
                "};\n\n")

        f.write("const char *const COPYRIGHT_INFO_DATA[] = {\n")
        for line in data_list:
            f.write("\t\"" + escape_string(line) + "\",\n")
        f.write("};\n\n")

        f.write("const ComponentCopyrightPart COPYRIGHT_PROJECT_PARTS[] = {\n")
        part_index = 0
        part_indexes = {}
        for project_name, project in iteritems(projects):
            part_indexes[project_name] = part_index
            for part in project:
                f.write("\t{ \"" + escape_string(part["License"][0]) + "\", "
                        + "&COPYRIGHT_INFO_DATA[" + str(part["file_index"]) + "], "
                        + "&COPYRIGHT_INFO_DATA[" + str(part["copyright_index"]) + "], "
                        + str(len(part["Files"])) + ", "
                        + str(len(part["Copyright"])) + " },\n")
                part_index += 1
        f.write("};\n\n")

        f.write("const int COPYRIGHT_INFO_COUNT = " + str(len(projects)) + ";\n")

        f.write("const ComponentCopyright COPYRIGHT_INFO[] = {\n")
        for project_name, project in iteritems(projects):
            f.write("\t{ \"" + escape_string(project_name) + "\", "
                    + "&COPYRIGHT_PROJECT_PARTS[" + str(part_indexes[project_name]) + "], "
                    + str(len(project)) + " },\n")
        f.write("};\n\n")

        f.write("const int LICENSE_COUNT = " + str(len(license_list)) + ";\n")

        f.write("const char *const LICENSE_NAMES[] = {\n")
        for l in license_list:
            f.write("\t\"" + escape_string(l[0]) + "\",\n")
        f.write("};\n\n")

        f.write("const char *const LICENSE_BODIES[] = {\n\n")
        for l in license_list:
            for line in l[1:]:
                if line == ".":
                    f.write("\t\"\\n\"\n")
                else:
                    f.write("\t\"" + escape_string(line) + "\\n\"\n")
            f.write("\t\"\",\n\n")
        f.write("};\n\n")

        f.write("#endif\n")