Exemple #1
0
def parse_user_config(alias_d, colour_d):
    from accelerator.compat import open
    from os import environ
    cfgdir = environ.get('XDG_CONFIG_HOME')
    if not cfgdir:
        home = environ.get('HOME')
        if not home:
            return None
        cfgdir = join(home, '.config')
    fn = join(cfgdir, 'accelerator', 'config')
    try:
        fh = open(fn, 'r', encoding='utf-8')
    except IOError:
        return None
    with fh:
        from configparser import ConfigParser
        cfg = ConfigParser()
        cfg.optionxform = str  # case sensitive (don't downcase aliases)
        cfg.read_file(fh)
        if 'alias' in cfg:
            alias_d.update(cfg['alias'])
        if 'colour' in cfg:
            colour_d.update({
                k: [_unesc_re.sub(_unesc, e) for e in v.split()]
                for k, v in cfg['colour'].items()
            })
Exemple #2
0
 def open_input(self, filename, mode='r', encoding=None, errors=None):
     assert 'r' in mode, "Don't write to input files"
     if 'b' not in mode and encoding is None:
         encoding = 'utf-8'
     return open(self.input_filename(filename),
                 mode,
                 encoding=encoding,
                 errors=errors)
Exemple #3
0
 def open(self,
          filename,
          mode='r',
          sliceno=None,
          encoding=None,
          errors=None):
     assert 'r' in mode, "Don't write to other jobs"
     return open(self.filename(filename, sliceno),
                 mode,
                 encoding=encoding,
                 errors=errors)
Exemple #4
0
 def open(self,
          filename,
          mode='r',
          sliceno=None,
          encoding=None,
          errors=None):
     assert 'r' in mode, "Don't write to other jobs"
     if 'b' not in mode and encoding is None:
         encoding = 'utf-8'
     return open(self.filename(filename, sliceno),
                 mode,
                 encoding=encoding,
                 errors=errors)
Exemple #5
0
 def __exit__(self, type, value, tb):
     # We don't care if an exception occured, we still want to save
     # the report.
     # But if saving the report produces an exception we want to
     # ignore that and re-raise the original exception (or raise
     # our own exception if no original exception exists).
     try:
         if tb is None:
             self.line()
         with open('report.txt', 'w', encoding='utf-8') as F:
             F.write(uni(self.s))
         if self.stdout:
             print(self.s)
     except Exception:
         # This logic looks backwards, but it isn't
         if tb is None:
             raise
     finally:
         self._closed = True
Exemple #6
0
 def output(self, what=None):
     if isinstance(what, int):
         fns = [str(what)]
     else:
         assert what in (None, 'prepare', 'analysis',
                         'synthesis'), 'Unknown output %r' % (what, )
         if what in (None, 'analysis'):
             fns = [str(sliceno) for sliceno in range(self.params.slices)]
             if what is None:
                 fns = ['prepare'] + fns + ['synthesis']
         else:
             fns = [what]
     res = []
     for fn in fns:
         fn = self.filename('OUTPUT/' + fn)
         if os.path.exists(fn):
             with open(fn,
                       'rt',
                       encoding='utf-8',
                       errors='backslashreplace') as fh:
                 res.append(fh.read())
     return ''.join(res)
Exemple #7
0
def parse_user_config():
    from accelerator.compat import open
    from os import environ
    cfgdir = environ.get('XDG_CONFIG_HOME')
    if not cfgdir:
        home = environ.get('HOME')
        if not home:
            return None
        cfgdir = join(home, '.config')
    fn = join(cfgdir, 'accelerator', 'config')
    try:
        fh = open(fn, 'r', encoding='utf-8')
    except IOError:
        return None
    with fh:
        from configparser import ConfigParser
        cfg = ConfigParser()
        cfg.optionxform = str  # case sensitive (don't downcase aliases)
        cfg.read_file(fh)
        if 'alias' in cfg:
            return cfg['alias']
    return None
Exemple #8
0
 def _open(fn, _mode):
     # ignore the passed mode, use the one we have
     return open(fn, mode, encoding=encoding, errors=errors)
Exemple #9
0
def load_config(filename):
    from accelerator.shell import UserError

    key = None
    multivalued = {'workdirs', 'method packages', 'interpreters'}
    required = {'slices', 'logfile', 'workdirs', 'method packages'}
    known = {
        'target workdir', 'listen', 'urd', 'result directory',
        'source directory', 'project directory'
    } | required | multivalued
    cfg = {key: [] for key in multivalued}
    cfg['listen'] = '.socket.dir/daemon', None

    class _E(Exception):
        pass

    def parse_pair(thing, val):
        a = val.split()
        if len(a) != 2 or not a[1].startswith('/'):
            raise _E("Invalid %s specification %r (expected 'name /path')" % (
                thing,
                val,
            ))
        return a

    def check_interpreter(val):
        if val[0] == 'DEFAULT':
            raise _E("Don't override DEFAULT interpreter")
        if not os.path.isfile(val[1]):
            raise _E('%r does not exist' % (val, ))

    parsers = dict(
        slices=int,
        workdirs=partial(parse_pair, 'workdir'),
        interpreters=partial(parse_pair, 'interpreter'),
        listen=resolve_listen,
        urd=resolve_listen,
    )
    checkers = dict(interpreter=check_interpreter, )

    try:
        with open(filename, 'r', encoding='utf-8') as fh:
            for lineno, line in enumerate(fh, 1):
                line = line.split('#', 1)[0].rstrip()
                if not line.strip():
                    continue
                if line == line.strip():
                    if ':' not in line:
                        raise _E('Expected a ":"')
                    key, val = line.split(':', 1)
                    if key not in known:
                        raise _E('Unknown key %r' % (key, ))
                else:
                    if not key:
                        raise _E('First line indented')
                    val = line
                val = interpolate(val).strip()
                if val:
                    if key in parsers:
                        val = parsers[key](val)
                    if key in checkers:
                        checkers[key](val)
                    if key in multivalued:
                        cfg[key].append(val)
                    else:
                        if key in cfg:
                            raise _E("%r doesn't take multiple values" %
                                     (key, ))
                        cfg[key] = val
        lineno = None

        missing = set()
        for req in required:
            if not cfg[req]:
                missing.add(req)
        if missing:
            raise _E('Missing required keys %r' % (missing, ))

        # Reformat result a bit so the new format doesn't require code changes all over the place.
        rename = {
            'target workdir': 'target_workdir',
            'method packages': 'method_directories',
            'source directory': 'source_directory',
            'result directory': 'result_directory',
            'project directory': 'project_directory',
        }
        res = DotDict({rename.get(k, k): v for k, v in cfg.items()})
        if 'target_workdir' not in res:
            res.target_workdir = res.workdirs[0][0]
        if 'project_directory' not in res:
            res.project_directory = os.path.dirname(filename)
        res.workdirs = dict(res.workdirs)
        if res.target_workdir not in res.workdirs:
            raise _E('target workdir %r not in defined workdirs %r' % (
                res.target_workdir,
                set(res.workdirs),
            ))
        res.interpreters = dict(res.interpreters)
        res.listen, res.url = fixup_listen(res.project_directory, res.listen)
        if res.get('urd'):
            res.urd_listen, res.urd = fixup_listen(res.project_directory,
                                                   res.urd, True)
        else:
            res.urd_listen, res.urd = None, None
    except _E as e:
        if lineno is None:
            prefix = 'Error in %s:\n' % (filename, )
        else:
            prefix = 'Error on line %d of %s:\n' % (
                lineno,
                filename,
            )
        raise UserError(prefix + e.args[0])

    return res
Exemple #10
0
def load_config(filename):
    from accelerator.error import UserError

    multivalued = {'workdirs', 'method packages', 'interpreters'}
    required = {'slices', 'workdirs', 'method packages'}
    known = {
        'target workdir', 'listen', 'urd', 'board listen', 'result directory',
        'input directory', 'project directory'
    } | required | multivalued
    cfg = {key: [] for key in multivalued}

    def fixpath(fn, realpath=True):
        # convert relative path to absolute wrt location of config file
        p = os.path.join(project_directory, fn)
        if realpath:
            p = os.path.realpath(p)
        else:
            p = os.path.normpath(p)
        return p

    class _E(Exception):
        pass

    def parse_workdir(val):
        return val[0], fixpath(val[1])

    def parse_interpreter(val):
        return val[0], fixpath(val[1], False)

    def check_interpreter(val):
        if val[0] == 'DEFAULT':
            raise _E("Don't override DEFAULT interpreter")
        if not os.path.isfile(val[1]):
            raise _E('%r does not exist' % (val, ))

    def check_workdirs(val):
        name, path = val
        if name in (v[0] for v in cfg['workdirs']):
            raise _E('Workdir %s redefined' % (name, ))
        if path in (v[1] for v in cfg['workdirs']):
            raise _E('Workdir path %r re-used' % (path, ))

    parsers = {
        'slices': (['count'], int),
        'workdirs': (['name', 'path'], parse_workdir),
        'interpreters': (['name', 'path'], parse_interpreter),
        'listen': (['path or [host]:port'], resolve_listen),
        'urd': (['path or [host]:port'], resolve_listen),
        'board listen': (['path or [host]:port'], resolve_listen),
        'input directory': (['path'], fixpath),
        'result directory': (['path'], fixpath),
    }
    checkers = dict(
        interpreter=check_interpreter,
        workdirs=check_workdirs,
    )

    with open(filename, 'r', encoding='utf-8') as fh:
        lines = list(enumerate(fh, 1))

    def parse(handle):
        key = None
        for n, line in lines:
            lineno[0] = n
            line_stripped = line.strip()
            if not line_stripped or line_stripped[0] == '#':
                continue
            if line == line.lstrip():
                if ':' not in line:
                    raise _E('Expected a ":"')
                key, val = line.split(':', 1)
                if key not in known:
                    raise _E('Unknown key %r' % (key, ))
            else:
                if not key:
                    raise _E('First line indented')
                val = line
            val = shlex.split(interpolate(val), posix=True, comments=True)
            if val:
                handle(key, val)

    def just_project_directory(key, val):
        if key == 'project directory':
            if len(val) != 1:
                raise _E(
                    "%s takes a single value path (maybe you meant to quote it?)"
                    % (key, ))
            project_directory[0] = val[0]

    def everything(key, val):
        if key in parsers:
            args, p = parsers[key]
            if len(val) != len(args):
                if len(args) == 1:
                    raise _E(
                        "%s takes a single value %s (maybe you meant to quote it?)"
                        % (key, args[0]))
                else:
                    raise _E("%s takes %d values (expected %s, got %r)" %
                             (key, len(args), ' '.join(args), val))
            if len(args) == 1:
                val = val[0]
            val = p(val)
        elif len(val) == 1:
            val = val[0]
        else:
            raise _E("%s takes a single value (maybe you meant to quote it?)" %
                     (key, ))
        if key in checkers:
            checkers[key](val)
        if key in multivalued:
            cfg[key].append(val)
        else:
            if key in cfg:
                raise _E("%r doesn't take multiple values" % (key, ))
            cfg[key] = val

    try:
        project_directory = [os.path.dirname(filename)]
        lineno = [None]
        parse(just_project_directory)
        lineno = [None]
        project_directory = os.path.realpath(project_directory[0])
        parse(everything)
        lineno = [None]

        missing = set()
        for req in required:
            if not cfg[req]:
                missing.add(req)
        if missing:
            raise _E('Missing required keys %r' % (missing, ))

        # Reformat result a bit so the new format doesn't require code changes all over the place.
        rename = {
            'target workdir': 'target_workdir',
            'method packages': 'method_directories',
            'input directory': 'input_directory',
            'result directory': 'result_directory',
            'project directory': 'project_directory',
            'board listen': 'board_listen',
        }
        res = DotDict({rename.get(k, k): v for k, v in cfg.items()})
        if 'listen' not in res:
            res.listen = '.socket.dir/server', None
        if 'target_workdir' not in res:
            res.target_workdir = res.workdirs[0][0]
        if 'project_directory' not in res:
            res.project_directory = os.path.dirname(filename)
        res.project_directory = os.path.realpath(res.project_directory)
        res.workdirs = dict(res.workdirs)
        if res.target_workdir not in res.workdirs:
            raise _E('target workdir %r not in defined workdirs %r' % (
                res.target_workdir,
                set(res.workdirs),
            ))
        res.interpreters = dict(res.interpreters)
        res.listen, res.url = fixup_listen(res.project_directory, res.listen)
        if res.get('urd'):
            res.urd_listen, res.urd = fixup_listen(res.project_directory,
                                                   res.urd, True)
        else:
            res.urd_listen, res.urd = None, None
        res.board_listen, _ = fixup_listen(
            res.project_directory,
            res.get('board_listen', ('.socket.dir/board', None)))
    except _E as e:
        if lineno[0] is None:
            prefix = 'Error in %s:\n' % (filename, )
        else:
            prefix = 'Error on line %d of %s:\n' % (
                lineno[0],
                filename,
            )
        raise UserError(prefix + e.args[0])

    return res
Exemple #11
0
def execute_process(workdir, jobid, slices, result_directory, common_directory, source_directory, index=None, workdirs=None, daemon_url=None, subjob_cookie=None, parent_pid=0):
	WORKDIRS.update(workdirs)

	g.job = jobid
	setproctitle('launch')
	path = os.path.join(workdir, jobid)
	try:
		os.chdir(path)
	except Exception:
		print("Cannot cd to workdir", path)
		exit(1)

	g.params = params = job_params()
	method_ref = import_module(params.package+'.a_'+params.method)
	g.sliceno = -1

	g.job = CurrentJob(jobid, params, result_directory, source_directory)
	g.slices = slices

	g.options          = params.options
	g.datasets         = params.datasets
	g.jobids           = params.jobids

	method_ref.options = params.options
	method_ref.datasets= params.datasets
	method_ref.jobids  = params.jobids

	g.daemon_url       = daemon_url
	g.running          = 'launch'
	status._start('%s %s' % (jobid, params.method,), parent_pid)

	def dummy():
		pass

	prepare_func   = getattr(method_ref, 'prepare'  , dummy)
	analysis_func  = getattr(method_ref, 'analysis' , dummy)
	synthesis_func = getattr(method_ref, 'synthesis', dummy)

	synthesis_needs_analysis = 'analysis_res' in getarglist(synthesis_func)

	fd2pid, names, masters, slaves = iowrapper.setup(slices, prepare_func is not dummy, analysis_func is not dummy)
	def switch_output():
		fd = slaves.pop()
		os.dup2(fd, 1)
		os.dup2(fd, 2)
		os.close(fd)
	iowrapper.run_reader(fd2pid, names, masters, slaves)
	for fd in masters:
		os.close(fd)

	# A chain must be finished from the back, so sort on that.
	sortnum_cache = {}
	def dw_sortnum(name):
		if name not in sortnum_cache:
			dw = dataset._datasetwriters[name]
			if dw.previous and dw.previous.startswith(jobid + '/'):
				pname = dw.previous.split('/')[1]
				num = dw_sortnum(pname) + 1
			else:
				num = 0
			sortnum_cache[name] = num
		return sortnum_cache[name]

	prof = {}
	if prepare_func is dummy:
		prof['prepare'] = 0 # truthish!
	else:
		t = time()
		switch_output()
		g.running = 'prepare'
		g.subjob_cookie = subjob_cookie
		setproctitle(g.running)
		with status.status(g.running):
			g.prepare_res = method_ref.prepare(**args_for(method_ref.prepare))
			to_finish = [dw.name for dw in dataset._datasetwriters.values() if dw._started]
			if to_finish:
				with status.status("Finishing datasets"):
					for name in sorted(to_finish, key=dw_sortnum):
						dataset._datasetwriters[name].finish()
		c_fflush()
		prof['prepare'] = time() - t
	switch_output()
	setproctitle('launch')
	from accelerator.extras import saved_files
	if analysis_func is dummy:
		prof['per_slice'] = []
		prof['analysis'] = 0
	else:
		t = time()
		g.running = 'analysis'
		g.subjob_cookie = None # subjobs are not allowed from analysis
		with status.status('Waiting for all slices to finish analysis') as update:
			g.update_top_status = update
			prof['per_slice'], files, g.analysis_res = fork_analysis(slices, analysis_func, args_for(analysis_func), synthesis_needs_analysis, slaves)
			del g.update_top_status
		prof['analysis'] = time() - t
		saved_files.update(files)
	t = time()
	g.running = 'synthesis'
	g.subjob_cookie = subjob_cookie
	setproctitle(g.running)
	with status.status(g.running):
		synthesis_res = synthesis_func(**args_for(synthesis_func))
		if synthesis_res is not None:
			blob.save(synthesis_res, temp=False)
		if dataset._datasetwriters:
			with status.status("Finishing datasets"):
				for name in sorted(dataset._datasetwriters, key=dw_sortnum):
					dataset._datasetwriters[name].finish()
	if dataset._datasets_written:
		with open('datasets.txt', 'w', encoding='utf-8') as fh:
			for name in dataset._datasets_written:
				fh.write(name)
				fh.write(u'\n')
	c_fflush()
	t = time() - t
	prof['synthesis'] = t

	from accelerator.subjobs import _record
	return None, (prof, saved_files, _record)