Ejemplo n.º 1
0
def load_cfg(fn):
	global cfg

	from accelerator.configfile import load_config
	from accelerator.job import WORKDIRS

	cfg = load_config(fn)
	for k, v in cfg.workdirs.items():
		if WORKDIRS.get(k, v) != v:
			print("WARNING: %s overrides workdir %s" % (fn, k,), file=sys.stderr)
		WORKDIRS[k] = v
	return cfg
Ejemplo n.º 2
0
	def __init__(self, server_url, verbose=False, flags=None, subjob_cookie=None, infoprints=False, print_full_jobpath=False):
		self.url = server_url
		self.subjob_cookie = subjob_cookie
		self.history = []
		self.verbose = verbose
		self.monitor = None
		self.flags = flags or []
		self.job_method = None
		self.last_error_time = None
		# Workspaces should be per Automata
		from accelerator.job import WORKDIRS
		WORKDIRS.update(self.list_workdirs())
		self.update_method_info()
		self.clear_record()
		# Only do this when run from shell.
		if infoprints:
			from accelerator.workarounds import SignalWrapper
			siginfo = SignalWrapper(['SIGINFO', 'SIGUSR1'])
			self.siginfo_check = siginfo.check
		else:
			self.siginfo_check = lambda: False
		self.print_full_jobpath = print_full_jobpath
Ejemplo n.º 3
0
    def __init__(self, config, options, server_url):
        """
		Setup objects:

		  Methods

		  WorkSpaces

		"""
        self.config = config
        self.debug = options.debug
        self.server_url = server_url
        self._update_methods()
        self.target_workdir = self.config['target_workdir']
        self.workspaces = {}
        for name, path in self.config.workdirs.items():
            self.workspaces[name] = workspace.WorkSpace(
                name, path, config.slices)
        WORKDIRS.clear()
        WORKDIRS.update({k: v.path for k, v in self.workspaces.items()})
        self.DataBase = database.DataBase(self)
        self.update_database()
        self.broken = False
Ejemplo n.º 4
0
def _pool_init(workdirs):
    # The pool system will send SIGTERM when the pool is closed, so
    # restore the original behaviour for that.
    signal.signal(signal.SIGTERM, signal.SIG_DFL)
    WORKDIRS.update(workdirs)
Ejemplo n.º 5
0
def execute_process(workdir,
                    jobid,
                    slices,
                    concurrency,
                    result_directory,
                    common_directory,
                    input_directory,
                    index=None,
                    workdirs=None,
                    server_url=None,
                    subjob_cookie=None,
                    parent_pid=0):
    WORKDIRS.update(workdirs)

    g.job = jobid
    setproctitle('launch')
    path = os.path.join(workdir, jobid)
    try:
        os.chdir(path)
    except Exception:
        print("Cannot cd to workdir", path)
        exit(1)

    g.params = params = job_params()
    method_ref = import_module(params.package + '.a_' + params.method)
    g.sliceno = -1

    g.job = CurrentJob(jobid, params, result_directory, input_directory)
    g.slices = slices

    g.options = params.options
    g.datasets = params.datasets
    g.jobs = params.jobs

    method_ref.options = params.options
    method_ref.datasets = params.datasets
    method_ref.jobs = params.jobs

    g.server_url = server_url
    g.running = 'launch'
    statmsg._start('%s %s' % (
        jobid,
        params.method,
    ), parent_pid)

    def dummy():
        pass

    prepare_func = getattr(method_ref, 'prepare', dummy)
    analysis_func = getattr(method_ref, 'analysis', dummy)
    synthesis_func = getattr(method_ref, 'synthesis', dummy)

    synthesis_needs_analysis = 'analysis_res' in getarglist(synthesis_func)

    fd2pid, names, masters, slaves = iowrapper.setup(
        slices, prepare_func is not dummy, analysis_func is not dummy)

    def switch_output():
        fd = slaves.pop()
        os.dup2(fd, 1)
        os.dup2(fd, 2)
        os.close(fd)

    if analysis_func is dummy:
        q = None
    else:
        q = LockFreeQueue()
    iowrapper.run_reader(fd2pid, names, masters, slaves, q=q)
    for fd in masters:
        os.close(fd)

    # A chain must be finished from the back, so sort on that.
    sortnum_cache = {}

    def dw_sortnum(name):
        if name not in sortnum_cache:
            dw = dataset._datasetwriters.get(name)
            if not dw:  # manually .finish()ed
                num = -1
            elif dw.previous and dw.previous.startswith(jobid + '/'):
                pname = dw.previous.split('/')[1]
                num = dw_sortnum(pname) + 1
            else:
                num = 0
            sortnum_cache[name] = num
        return sortnum_cache[name]

    prof = {}
    if prepare_func is dummy:
        prof['prepare'] = 0  # truthish!
    else:
        t = monotonic()
        switch_output()
        g.running = 'prepare'
        g.subjob_cookie = subjob_cookie
        setproctitle(g.running)
        with statmsg.status(g.running):
            g.prepare_res = method_ref.prepare(**args_for(method_ref.prepare))
            to_finish = [
                dw.name for dw in dataset._datasetwriters.values()
                if dw._started
            ]
            if to_finish:
                with statmsg.status("Finishing datasets"):
                    for name in sorted(to_finish, key=dw_sortnum):
                        dataset._datasetwriters[name].finish()
        c_fflush()
        prof['prepare'] = monotonic() - t
    switch_output()
    setproctitle('launch')
    from accelerator.extras import saved_files
    if analysis_func is dummy:
        prof['per_slice'] = []
        prof['analysis'] = 0
    else:
        t = monotonic()
        g.running = 'analysis'
        g.subjob_cookie = None  # subjobs are not allowed from analysis
        with statmsg.status(
                'Waiting for all slices to finish analysis') as update:
            g.update_top_status = update
            prof['per_slice'], files, g.analysis_res = fork_analysis(
                slices, concurrency, analysis_func, args_for(analysis_func),
                synthesis_needs_analysis, slaves, q)
            del g.update_top_status
        prof['analysis'] = monotonic() - t
        saved_files.update(files)
    t = monotonic()
    g.running = 'synthesis'
    g.subjob_cookie = subjob_cookie
    setproctitle(g.running)
    with statmsg.status(g.running):
        synthesis_res = synthesis_func(**args_for(synthesis_func))
        if synthesis_res is not None:
            blob.save(synthesis_res, temp=False)
        if dataset._datasetwriters:
            with statmsg.status("Finishing datasets"):
                for name in sorted(dataset._datasetwriters, key=dw_sortnum):
                    dataset._datasetwriters[name].finish()
    if dataset._datasets_written:
        blob.save(dataset._datasets_written,
                  'DS/LIST',
                  temp=False,
                  _hidden=True)
    c_fflush()
    t = monotonic() - t
    prof['synthesis'] = t

    from accelerator.subjobs import _record
    return None, (prof, saved_files, _record)
Ejemplo n.º 6
0
def _name2job(cfg, n):
    if n.startswith(':'):
        # resolve through urd
        assert cfg.urd, 'No urd configured'
        a = n[1:].rsplit(':', 1)
        if len(a) == 1:
            raise JobNotFound('looks like a partial :urdlist:[entry] spec')
        entry = a[1] or '-1'
        try:
            entry = int(entry, 10)
        except ValueError:
            pass
        path, tildes = split_tildes(a[0])
        path = path.split('/')
        if len(path) < 3:
            path.insert(0, environ.get('USER', 'NO-USER'))
        if len(path) < 3:
            path.append('latest')
        path = '/'.join(map(url_quote, path))
        try:
            urdres = urd_call_w_tildes(cfg, path, tildes)
        except UrdError as e:
            print(e, file=sys.stderr)
            urdres = None
        if not urdres:
            raise JobNotFound('urd list %r not found' % (a[0], ))
        from accelerator.build import JobList
        joblist = JobList(Job(e[1], e[0]) for e in urdres.joblist)
        res = joblist.get(entry)
        if not res:
            raise JobNotFound('%r not found in %s' % (
                entry,
                path,
            ))
        return res
    if re.match(r'[^/]+-\d+$', n):
        # Looks like a jobid
        return Job(n)
    m = re.match(r'([^/]+)-LATEST$', n)
    if m:
        # Looks like workdir-LATEST
        wd = m.group(1)
        if wd not in WORKDIRS:
            raise NoSuchWorkdirError('Not a valid workdir: "%s"' % (wd, ))
        path = join(WORKDIRS[wd], n)
        try:
            n = readlink(path)
        except OSError as e:
            raise JobNotFound('Failed to read %s: %s' % (
                path,
                e,
            ))
        return Job(n)
    if '/' not in n:
        # Must be a method then
        return method2job(cfg, n)
    if exists(join(n, 'setup.json')):
        # Looks like the path to a jobdir
        path, jid = split(realpath(n))
        job = Job(jid)
        if WORKDIRS.get(job.workdir, path) != path:
            print("### Overriding workdir %s to %s" % (
                job.workdir,
                path,
            ))
        WORKDIRS[job.workdir] = path
        return job
    raise JobNotFound("Don't know what to do with %r." % (n, ))