Exemple #1
0
def get_settings(client_id, name="", cv=None, config_path=""):
    from dace.config import Config

    if cv is None:
        clientpath = "./client_configs/" + client_id + ".conf"
        if os.path.isfile(clientpath):
            Config.load(clientpath)
        else:
            Config.load()

    if cv is None:
        cv = Config.get()
    ret = {}
    for i, (cname, cval) in enumerate(sorted(cv.items())):
        cpath = tuple(list(config_path) + [cname])
        try:
            meta = Config.get_metadata(*cpath)

            # A dict contains more elements
            if meta['type'] == 'dict':
                ret[cname] = {
                    "value": get_settings(client_id, cname, cval, cpath),
                    "meta": meta
                }
                continue
            # Other values can be included directly
            ret[cname] = {"value": cval, "meta": meta}
        except KeyError:
            print('WARNING: No metadata for configuration key', cpath)

    return ret
Exemple #2
0
def main():
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("-l",
                        "--localhost",
                        action="store_true",
                        help="Bind to localhost only")

    parser.add_argument(
        "-r",
        "--remotedace",
        action="store_true",
        help="Use ssh commands instead of locally running dace")

    parser.add_argument("-rd",
                        "--restoredace",
                        action="store_true",
                        help="Restore the backup file")

    parser.add_argument(
        "-e",
        "--executor",
        action="store_true",
        help="Run as an executor server instead of DIODE server")

    parser.add_argument("-p", "--port", type=int, help="Port to listen on")

    args = parser.parse_args()

    if args.restoredace:
        from dace.config import Config
        Config.load("./dace.conf.bak")
        Config.save()

    remote_execution = args.remotedace

    es = ExecutorServer()
    es_ref.append(es)

    if not args.executor:
        app.run(host='localhost' if args.localhost else "0.0.0.0",
                debug=True,
                port=args.port,
                use_reloader=False)

        es.stop()
    else:
        import atexit

        def tmp():
            es.stop()

        atexit.register(tmp)

        # Wait for an event that will never arrive (passive wait)
        event = threading.Event()
        event.wait()
Exemple #3
0
        def runner():
            print("Trying to get lock")
            with self._run_cv:
                yield "Run starting\n"

                with config_lock:
                    from dace.config import Config
                    Config.load(config_path)
                    # Copy the config - this allows releasing the config lock
                    # without suffering from potential side effects
                    copied_config = ConfigCopy(Config._config)

                self._slot_available = False
                dace_state.set_is_compiled(False)

                terminal_queue = multiprocessing.Queue()
                async_executor = AsyncExecutor(remote=remote_execution)
                async_executor.autoquit = True
                async_executor.executor.output_queue = terminal_queue
                async_executor.executor.set_config(copied_config)
                async_executor.run_async(dace_state)
                async_executor.to_proc_message_queue.put("forcequit")

                while async_executor.running_proc.is_alive():
                    try:
                        new = terminal_queue.get(timeout=1)
                        yield new
                    except:
                        # Check if the sub-process is still running
                        continue

                # Flush remaining outputs
                while not terminal_queue.empty():
                    new = terminal_queue.get(timeout=1)
                    yield new

                with self._oplock:
                    # Delete from the tasklist
                    del self._task_dict[runindex]

                    # Output instrumentation report, if exists
                    if (async_executor.running_proc.exitcode == 0
                            and dace_state.sdfg.is_instrumented()):
                        report = dace_state.sdfg.get_latest_report()
                        yield '\nInstrumentation report:\n%s\n\n' % report

                    yield ('Run finished with exit code %d' %
                           async_executor.running_proc.exitcode)

                    self._slot_available = True
Exemple #4
0
def set_settings(settings_array, client_id):
    from dace.config import Config

    if not os.path.isdir("./client_configs"):
        os.mkdir("./client_configs/")
    clientpath = "./client_configs/" + client_id + ".conf"

    if os.path.isfile(clientpath):
        Config.load(clientpath)
    else:
        Config.load()

    for path, val in settings_array.items():
        path = path.split("/")
        Config.set(*path, value=val)

    Config.save(clientpath)
Exemple #5
0
def compileProgram(request, language, perfopts=None):
    if not request.json or (('code' not in request.json) and
                            ('sdfg' not in request.json)):
        print("[Error] No input code provided, cannot continue")
        abort(400)

    errors = []
    try:
        optpath = request.json['optpath']
    except:
        optpath = None

    try:
        sdfg_props = request.json['sdfg_props']
    except:
        sdfg_props = None

    if perfopts is None:
        try:
            perf_mode = request.json['perf_mode']
        except:
            perf_mode = None
    else:
        #print("Perfopts: " + str(perfopts))
        perf_mode = perfopts

    client_id = request.json['client_id']

    sdfg_dict = {}
    sdfg_eval_order = []

    with config_lock:  # Lock the config - the config may be modified while holding this lock, but the config MUST be restored.

        from dace.config import Config
        config_path = "./client_configs/" + client_id + ".conf"
        if os.path.isfile(config_path):
            Config.load(config_path)
        else:
            Config.load()

        dace_state = None
        in_sdfg = None
        if "sdfg" in request.json:
            in_sdfg = request.json['sdfg']
            if isinstance(in_sdfg, list):
                if len(in_sdfg) > 1:
                    # TODO: Allow multiple sdfg inputs
                    raise NotImplementedError("More than 1 SDFG provided")

                in_sdfg = in_sdfg[0]

            if isinstance(in_sdfg, str):
                in_sdfg = json.loads(in_sdfg)

            if isinstance(in_sdfg, dict):
                # Generate callbacks (needed for elements referencing others)
                def loader_callback(name: str):
                    # Check if already available and if yes, return it
                    if name in sdfg_dict:
                        return sdfg_dict[name]

                    # Else: This function has to recreate the given sdfg
                    sdfg_dict[name] = dace.SDFG.from_json(
                        in_sdfg[name], {
                            'sdfg': None,
                            'callback': loader_callback
                        })
                    sdfg_eval_order.append(name)
                    return sdfg_dict[name]

                for k, v in in_sdfg.items():
                    # Leave it be if the sdfg was already created
                    # (this might happen with SDFG references)
                    if k in sdfg_dict: continue
                    if isinstance(v, str):
                        v = json.loads(v)
                    sdfg_dict[k] = dace.SDFG.from_json(
                        v, {
                            'sdfg': None,
                            'callback': loader_callback
                        })
                    sdfg_eval_order.append(k)
            else:
                in_sdfg = dace.SDFG.from_json(in_sdfg)
                sdfg_dict[in_sdfg.name] = in_sdfg
        else:
            print("Using code to compile")
            code = request.json['code']
            if (isinstance(code, list)):
                if len(code) > 1:
                    print("More than 1 code file provided!")
                    abort(400)
                code = code[0]
            if language == "octave":
                statements = octave_frontend.parse(code, debug=False)
                statements.provide_parents()
                statements.specialize()
                sdfg = statements.generate_code()
                sdfg.set_sourcecode(code, "matlab")
            elif language == "dace":
                dace_state = create_DaceState(code, sdfg_dict, errors)

        # The DaceState uses the variable names in the dace code. This is not useful enough for us, so we translate
        copied_dict = {}
        for k, v in sdfg_dict.items():
            copied_dict[v.name] = v
        sdfg_dict = copied_dict

        if len(errors) == 0:
            if optpath is not None:
                for sdfg_name, op in optpath.items():
                    try:
                        sp = sdfg_props[sdfg_name]
                    except:
                        # In any error case, just ignore the properties
                        sp = None
                    print("Applying opts for " + sdfg_name)
                    print("Dict: " + str(sdfg_dict.keys()))
                    sdfg_dict[sdfg_name] = applyOptPath(sdfg_dict[sdfg_name],
                                                        op,
                                                        sdfg_props=sp)

        code_tuple_dict = {}
        # Deep-copy the SDFG (codegen may change the SDFG it operates on)
        codegen_sdfgs = copy.deepcopy(sdfg_dict)
        codegen_sdfgs_dace_state = copy.deepcopy(sdfg_dict)
        if len(errors) == 0:
            if sdfg_eval_order:
                sdfg_eval = [(n, codegen_sdfgs[n])
                             for n in reversed(sdfg_eval_order)]
            else:
                sdfg_eval = codegen_sdfgs.items()

            for n, s in sdfg_eval:
                try:
                    if Config.get_bool('diode', 'general',
                                       'library_autoexpand'):
                        s.expand_library_nodes()

                    code_tuple_dict[n] = codegen.generate_code(s)
                except dace.sdfg.NodeNotExpandedError as ex:
                    code_tuple_dict[n] = [str(ex)]
                except Exception:  # Forward exception to output code
                    code_tuple_dict[n] = [
                        'Code generation failed:\n' + traceback.format_exc()
                    ]

        if dace_state is None:
            if "code" in request.json:
                in_code = request.json['code']
            else:
                in_code = ""
            dace_state = DaceState(in_code, "tmp.py", remote=remote_execution)
            dace_state.set_sdfg(
                list(codegen_sdfgs_dace_state.values())[0],
                list(codegen_sdfgs_dace_state.keys())[0])
            if len(dace_state.errors) > 0:
                print("ERRORS: " + str(dace_state.errors))
                errors.extend(dace_state.errors)

        # The config won't save back on its own, and we don't want it to - these changes are transient

        if len(errors) > 0:
            return errors

        # Only return top-level SDFG
        return ({k: v
                 for k, v in sdfg_dict.items()
                 if v.parent is None}, code_tuple_dict, dace_state)
Exemple #6
0
    def addRun(self, client_id, compilation_output_tuple, more_options):

        config_path = "./client_configs/" + client_id + ".conf"
        if not os.path.isdir("./client_configs/"):
            os.mkdir("./client_configs/")
        if not os.path.isfile(config_path):
            # Config not (yet) available, load default and copy
            with config_lock:
                from dace.config import Config
                Config.load()
                Config.save(config_path)

        if isinstance(compilation_output_tuple, str):
            # Group command
            gc = compilation_output_tuple
            val = {
                'cid': client_id,
                'cmd': 'control',
                'index': self._run_num,
                'operation': None,
                'config_path': config_path,
                'state': "pending"
            }
            if gc == "start":
                val['operation'] = 'startgroup'
            elif gc == "end":
                val['operation'] = 'endgroup'
            else:

                def g():
                    yield '{ "error": "Unknown group operation" }'

                return g

            with self._oplock:
                self._executor_queue.put(val)
                self._task_dict[self._run_num] = val
                self._run_num += 1
            return

        with self._oplock:
            val = {
                'index': self._run_num,
                'type': 'run',
                'cid': client_id,
                'config_path': config_path,
                'cmd': 'run',
                'cot': compilation_output_tuple,
                'opt': more_options,
                'state': 'pending',
                'reset-perfdata': False
            }
            self._executor_queue.put(val)

            self._task_dict[self._run_num] = val
            self._run_num += 1

        def error_gen():
            yield '{ "error": "Run was scheduled. Please poll until ready or longpoll." }'

        return error_gen
Exemple #7
0
    stdout, stderr = process.communicate()
    if Config.get_bool('debugprint'):
        print(stdout.decode('utf-8'), flush=True)
        if stderr is not None:
            print(stderr.decode('utf-8'), flush=True)
    if output_stream is not None:
        output_stream.write(stdout.decode('utf-8'), flush=True)
    output.write(stdout.decode('utf-8'))
    if stderr is not None:
        output.write(stderr.decode('utf-8'))

    # An error occurred, raise exception
    if process.returncode != 0:
        raise subprocess.CalledProcessError(process.returncode, command,
                                            output.getvalue())


# Allow configuring and compiling a prepared build folder from the commandline.
# This is useful for remote execution.
if __name__ == "__main__":
    import argparse

    argparser = argparse.ArgumentParser()
    argparser.add_argument("path", type=str)
    argparser.add_argument("outname", type=str)
    args = vars(argparser.parse_args())

    Config.load(os.path.join(args["path"], "dace.conf"))

    configure_and_compile(args["path"], args["outname"])
Exemple #8
0
                        action="store_true",
                        help="Restore the backup file")

    parser.add_argument(
        "-e",
        "--executor",
        action="store_true",
        help="Run as an executor server instead of DIODE server")

    parser.add_argument("-p", "--port", type=int, help="Port to listen on")

    args = parser.parse_args()

    if args.restoredace:
        from dace.config import Config
        Config.load("./dace.conf.bak")
        Config.save()

    remote_execution = args.remotedace

    es = ExecutorServer()
    es_ref.append(es)

    if not args.executor:
        app.run(host='localhost' if args.localhost else "0.0.0.0",
                debug=True,
                port=args.port,
                use_reloader=False)

        es.stop()
    else:
Exemple #9
0
        def runner():
            print("Trying to get lock")
            with self._run_cv:
                output_feeder("Run starting\n")

                perfmode = perfopts['mode']
                perfcores = perfopts['core_counts']

                with config_lock:
                    from dace.config import Config
                    Config.load(config_path)
                    if perfmode == "noperf":
                        Config.set("instrumentation",
                                   "enable_papi",
                                   value=False)
                    else:
                        Config.set("instrumentation",
                                   "enable_papi",
                                   value=True)
                        Config.set("instrumentation",
                                   "papi_mode",
                                   value=perfmode)
                        Config.set(
                            "instrumentation",
                            "sql_database_file",
                            value=ExecutorServer.getPerfdataDir(client_id) +
                            "/perfdata.db")
                        Config.set("instrumentation",
                                   "thread_nums",
                                   value=str(perfcores))

                        # Check if perfcounters are available
                        from dace.codegen.instrumentation.papi import PAPISettings, PerfPAPIInfo
                        ppi = PerfPAPIInfo()
                        ppi.load_info()
                        if not ppi.check_counters(
                            [PAPISettings.perf_default_papi_counters()]):
                            yield '{"error": "PAPI Counter check failed. Either your machine does not provide the required counters or /proc/sys/kernel/perf_event_paranoid is not set correctly"}'
                            del self._task_dict[runindex]
                            self._slot_available = True
                            return

                    # Copy the config - this allows releasing the config lock without suffering from potential side effects
                    copied_config = ConfigCopy(Config._config)

                self._slot_available = False
                dace_state.set_is_compiled(False)

                async_executor = AsyncExecutor(remote=remote_execution)
                async_executor.autoquit = True
                async_executor.executor.output_generator = output_feeder
                async_executor.executor.set_config(copied_config)
                async_executor.run_async(dace_state)
                async_executor.to_thread_message_queue.put("forcequit")

                while async_executor.running_thread.is_alive():
                    try:
                        new = terminal_queue.get(block=True, timeout=1)
                        yield new
                    except:
                        # Check if the thread is still running
                        continue

                # Flush remaining outputs
                while not terminal_queue.empty():
                    new = terminal_queue.get(block=True, timeout=1)
                    yield new

                with self._oplock:
                    # Delete from the tasklist
                    del self._task_dict[runindex]

                    print("Run done, notifying")
                    self._slot_available = True
Exemple #10
0
    def consume_programs(self):

        try:
            cmd = self._executor_queue.get(timeout=3)

            if cmd['cmd'] == "run":
                while True:
                    with self._run_cv:
                        if self._slot_available:
                            break
                    import time
                    time.sleep(0.5)

                with self._run_cv:
                    self._slot_available = False
                    print("Running task")

                    self._task_dict[cmd['index']]['state'] = 'running'

                    runner = self.run(
                        cmd['cot'], {
                            'index': cmd['index'],
                            'config_path': cmd['config_path'],
                            'client_id': cmd['cid'],
                            'reset-perfdata': cmd['reset-perfdata'],
                            'perfopts': cmd['opt']['perfopts']
                        })
                    print("Wait for oplock")
                    with self._oplock:
                        self._current_runs[cmd['cid']] = runner

                    import time

                    # Wait a predefined time for clients to catch up on the outputs
                    time.sleep(RUNNING_TIMEOUT)
                    with self._oplock:
                        run_locally = True
                        try:
                            x = self._current_runs[cmd['cid']]
                        except:
                            run_locally = False

                    if run_locally:
                        print("running locally")

                        def tmp():
                            with self._oplock:
                                del self._current_runs[cmd['cid']]
                                try:
                                    c = self._orphaned_runs[cmd['cid']]
                                except:
                                    self._orphaned_runs[cmd['cid']] = []
                                self._orphaned_runs[cmd['cid']].append([])
                            print("Starting runner")
                            for x in runner():
                                self._orphaned_runs[cmd['cid']][-1] += x

                        # Because this holds locks (and the output should be generated even if nobody asks for it immediately), this is run when the timeout for direct interception expires
                        tmp()
            elif cmd['cmd'] == 'control':
                # Control operations that must be synchronous with execution (e.g. for cleanup, storage operations)
                with self._oplock:
                    self._task_dict[cmd['index']]['state'] = 'running'

                if cmd['operation'] == 'startgroup':
                    from diode.db_scripts.db_setup import db_setup
                    perf_tmp_dir = ExecutorServer.getPerfdataDir(cmd['cid'])
                    perfdata_path = os.path.join(perf_tmp_dir, "perfdata.db")

                    # Clean database and create tables
                    db_setup(perf_tmp_dir)

                elif cmd['operation'] == 'remove_group':
                    perfdir = ExecutorServer.getPerfdataDir(cmd['cid'])
                    perfdata_path = os.path.join(perfdir, "perfdata.db")
                    os.remove(perfdata_path)
                    os.rmdir(perfdir)

                elif cmd['operation'] == 'endgroup':
                    print("Ending group")
                    from diode.db_scripts.sql_to_json import MergeRuns, Conserver
                    from dace.config import Config

                    config_path = cmd['config_path']

                    with config_lock:
                        Config.load(config_path)
                        repetitions = Config.get("execution", "general",
                                                 "repetitions")

                    perf_tmp_dir = ExecutorServer.getPerfdataDir(cmd['cid'])
                    perfdata_path = os.path.join(perf_tmp_dir, "perfdata.db")
                    can_path = os.path.join(perf_tmp_dir, 'current.can')

                    mr = MergeRuns()
                    mr.mergev2(perfdata_path)
                    print("Merged into " + perfdata_path)

                    cons = Conserver()
                    # TODO: Add sdfgs
                    cons.conserveAll(perfdata_path,
                                     can_path,
                                     "",
                                     repetitions,
                                     clear_existing=False)

                    print("Merged and Conserved!")
                    self._perfdata_available[cmd['cid']] = can_path

                with self._oplock:
                    del self._task_dict[cmd['index']]

        except queue.Empty:
            return