示例#1
0
    def __init__(self, testbed, platform=None, quiet=False):

        self.total_job_size = None

        self.testbed = testbed
        self.platform = choose_platform(platform, self.testbed.platform())

        self.quiet = quiet

        if not self.quiet:
            self._progress = Progress("building file")
            self._jobs_executed = 0

        if getattr(testbed, "generate_per_node_id_binary", False):
            from multiprocessing.pool import ThreadPool
            self.pool = ThreadPool()
示例#2
0
    def _run(self, args, map_fn):
        files = [
            os.path.join(args.results_dir, result_folder)
            for result_folder in os.listdir(args.results_dir)
            if os.path.isdir(os.path.join(args.results_dir, result_folder))
        ]

        if map_fn is map:
            progress = Progress("run")
            progress.start(len(files))
        else:
            progress = None

        fn = partial(self._do_run, progress=progress)

        return [
            x for l in map_fn(fn, files) if l is not None for x in l
            if x is not None
        ]
示例#3
0
    def __init__(self,
                 platform,
                 log_mode,
                 generate_per_node_id_binary=False,
                 quiet=False):

        self.total_job_size = None

        self.platform = platform
        self.log_mode = log_mode

        self.quiet = quiet

        if not self.quiet:
            self._progress = Progress("building file")
            self._jobs_executed = 0

        if generate_per_node_id_binary:
            from multiprocessing.pool import ThreadPool
            self.pool = ThreadPool()
        else:
            self.pool = None
示例#4
0
class Runner(object):
    required_safety_periods = True

    executable = 'python3 -OO -X faulthandler run.py'

    local_log = "local.log"

    def __init__(self):
        self._progress = Progress("running locally")
        self.total_job_size = None
        self._jobs_executed = 0

    def add_job(self, options, name, estimated_time):

        if not self._progress.has_started():
            self._progress.start(self.total_job_size)

        # Check for overwriting results files
        if os.path.exists(name):
            raise RuntimeError(
                f"Would overwriting {name}, terminating to avoid doing so.")

        print(
            f'{self.executable} {options} > {name} (overwriting={os.path.exists(name)})'
        )

        with open(self.local_log, 'w') as log_file, \
             open(name,'w') as out_file:
            subprocess.call(f"{self.executable} {options}",
                            stdout=out_file,
                            stderr=log_file,
                            shell=True)

        self._progress.print_progress(self._jobs_executed)

        self._jobs_executed += 1

    def mode(self):
        return "PARALLEL"
示例#5
0
文件: analysis.py 项目: MBradbury/slp
    def run_single(self, summary_file, result_finder, flush=False, **kwargs):
        """Perform the analysis and write the output to the :summary_file:"""
        def worker(ipath):
            result = self.analyse_and_summarise_path_wrapped(
                path, flush, **kwargs)

            # Skip 0 length results
            if result.number_of_repeats == 0:
                raise RuntimeError("There are 0 repeats.")

            line = "|".join(fn(result) for fn in self.values.values())

            # Try to force a cleanup of the memory
            result = None

            # Try to recover some memory
            try_to_free_memory()

            return line

        summary_file_path = os.path.join(self.results_directory, summary_file)

        # The output files we need to process.
        # These are sorted to give anyone watching the output a sense of progress.
        files = sorted(result_finder(self.results_directory))

        with open(summary_file_path, 'w') as out:

            print("|".join(self.values.keys()), file=out)

            progress = Progress("analysing file")
            progress.start(len(files))

            for num, infile in enumerate(files):
                path = os.path.join(self.results_directory, infile)

                print(f'Analysing {path}')

                try:
                    line = worker(path)

                    print(line, file=out)
                except Exception as ex:
                    print(f"Error processing {path} with {ex}")
                    print(traceback.format_exc())

                progress.print_progress(num)

            print(f'Finished writing {summary_file}')
示例#6
0
 def __init__(self, sim_name):
     self.sim_name = sim_name
     self._sim = submodule_loader.load(simulator.sim, self.sim_name)
     self._progress = Progress("building file")
     self.total_job_size = None
     self._jobs_executed = 0
示例#7
0
class Runner(object):
    required_safety_periods = True

    def __init__(self, sim_name):
        self.sim_name = sim_name
        self._sim = submodule_loader.load(simulator.sim, self.sim_name)
        self._progress = Progress("building file")
        self.total_job_size = None
        self._jobs_executed = 0

    def add_job(self, options, name, estimated_time):
        print(name)

        if not self._progress.has_started():
            self._progress.start(self.total_job_size)

        # Create the target directory
        target_directory = name[:-len(".txt")]

        data.util.create_dirtree(target_directory)

        # Parse options
        options = shlex.split(options)
        module, argv = options[0], options[1:]
        module_path = module.replace(".", "/")

        a = self.parse_arguments(module, argv)

        # Build the binary
        print(f"Building for {self.sim_name}")

        build_result = self._sim.build(module, a)

        print(f"Build finished with result {build_result}...")

        # Previously there have been problems with the built files not
        # properly having been flushed to the disk before attempting to move them.

        print(f"Copying files from {module_path} to {target_directory}...")

        files_to_copy = (
            "Analysis.py",
            "Arguments.py",
            "CommandLine.py",
            "Metrics.py",
            "__init__.py",
        )
        for name in files_to_copy:
            shutil.copy(os.path.join(module_path, name), target_directory)

        files_to_move = {
            "tossim": (
                "app.xml",
                "_TOSSIM.so",
                "TOSSIM.py",
            ),
            "cooja": (
                "main.exe",
                "main.ihex",
            ),
        }
        for name in files_to_move.get(self.sim_name, []):
            shutil.move(os.path.join(module_path, name), target_directory)

        print("All Done!")

        self._progress.print_progress(self._jobs_executed)

        self._jobs_executed += 1

    def mode(self):
        return "CLUSTER"

    @staticmethod
    def parse_arguments(module, argv):
        arguments_module = algorithm.import_algorithm(module,
                                                      extras=["Arguments"])

        a = arguments_module.Arguments.Arguments()
        a.parse(argv)
        return a

    @staticmethod
    def build_arguments(a):
        build_args = a.build_arguments()

        configuration = Configuration.create(a.args.configuration, a.args)

        build_args.update(configuration.build_arguments())

        return build_args
示例#8
0
class Runner:
    required_safety_periods = False

    def __init__(self,
                 platform,
                 log_mode,
                 generate_per_node_id_binary=False,
                 quiet=False):

        self.total_job_size = None

        self.platform = platform
        self.log_mode = log_mode

        self.quiet = quiet

        if not self.quiet:
            self._progress = Progress("building file")
            self._jobs_executed = 0

        if generate_per_node_id_binary:
            from multiprocessing.pool import ThreadPool
            self.pool = ThreadPool()
        else:
            self.pool = None

    def _detect_os_of_build(self, module_path):

        if os.path.exists(os.path.join(module_path, "build")):
            if os.path.exists(
                    os.path.join(module_path, "build", self.platform,
                                 "app.c")):
                return "tinyos"

        if os.path.exists(os.path.join(module_path, "symbols.c")):
            return "contiki"

        raise RuntimeError("Failed to detect OS")

    def add_job(self, options, name, estimated_time=None):

        if not self.quiet:
            print(name)

            if not self._progress.has_started():
                self._progress.start(self.total_job_size)

        # Create the target directory
        target_directory = name[:-len(".txt")]

        data.util.create_dirtree(target_directory)

        # Get the job arguments

        # If options is a tuple then we have just been given the
        # module name and the parsed arguments.
        if isinstance(options, tuple):
            module, a = options
        else:
            options = shlex.split(options)
            module, argv = options[0], options[1:]

            a = self.parse_arguments(module, argv)

        module_path = module.replace(".", "/")

        # Check that the topology supports the chosen platform
        # Some topologies only support one platform type
        configuration = Configuration.create(a.args.configuration, a.args)

        if hasattr(configuration.topology, "platform"):
            if configuration.topology.platform != self.platform:
                raise RuntimeError(
                    "The topology's platform ({}) does not match the chosen platform ({})"
                    .format(configuration.topology.platform, self.platform))

        # Build the binary

        # These are the arguments that will be passed to the compiler
        build_args = self.build_arguments(a)
        build_args["PLATFORM"] = self.platform

        if not self.quiet:
            print(f"Building for {build_args}")

        build_result = Builder.build_actual(module_path,
                                            self.platform,
                                            enable_fast_serial=False,
                                            **build_args)

        if not self.quiet:
            print(
                f"Build finished with result {build_result}, waiting for a bit..."
            )

        # For some reason, we seemed to be copying files before
        # they had finished being written. So wait a  bit here.
        time.sleep(1)

        if not self.quiet:
            print(f"Copying files to '{target_directory}'")

        # Detect the OS from the presence of one of these files:
        os_of_build = self._detect_os_of_build(module_path)

        files_to_copy = {
            "tinyos": (
                "app.c",
                "ident_flags.txt",
                "main.exe",
                "main.ihex",
                "main.srec",
                "tos_image.xml",
                "wiring-check.xml",
            ),
            "contiki": (
                "main.exe",
                "symbols.h",
                "symbols.c",
            ),
        }
        for name in files_to_copy[os_of_build]:
            try:
                src = os.path.join(module_path, "build", self.platform, name)
                dest = target_directory

                shutil.copy(src, dest)

                #print("Copying {} -> {}".format(src, dest))
            except IOError as ex:
                # Ignore expected fails
                if name not in {"main.srec", "wiring-check.xml"}:
                    print(f"Not copying {name} due to {ex}")

        # Copy any generated class files
        for class_file in glob.glob(os.path.join(module_path, "*.class")):
            try:
                shutil.copy(class_file, target_directory)
            except shutil.Error as ex:
                if str(ex).endswith("are the same file"):
                    continue
                else:
                    raise

        if self.pool is not None:
            target_ihex = os.path.join(target_directory, "main.ihex")

            if not self.quiet:
                print(
                    f"Creating per node id binaries using '{target_ihex}'...")

            def fn(node_id):
                output_ihex = os.path.join(target_directory,
                                           f"main-{node_id}.ihex")
                self.create_tos_node_id_ihex(target_ihex, output_ihex, node_id)

            self.pool.map(fn, configuration.topology.nodes)

        if not self.quiet:
            print("All Done!")

            self._progress.print_progress(self._jobs_executed)

            self._jobs_executed += 1

        return a, module, module_path, target_directory

    def mode(self):
        return "PLATFORM"

    def platform_name(self):
        return self.platform

    def create_tos_node_id_ihex(self, source, target, node_id):
        try:
            (objcopy, objdump) = PLATFORM_TOOLS[self.platform]
        except KeyError:
            raise KeyError(
                f"Unable to find the platform tools for '{self.platform}'")

        command = " ".join([
            "tos-set-symbols", "--objcopy {}".format(objcopy),
            "--objdump {}".format(objdump), "--target ihex", source, target,
            "TOS_NODE_ID={}".format(node_id),
            "ActiveMessageAddressC__addr={}".format(node_id)
        ])

        #print(command)
        subprocess.check_call(command, shell=True)

    @staticmethod
    def parse_arguments(module, argv):
        arguments_module = algorithm.import_algorithm(module,
                                                      extras=["Arguments"])

        a = arguments_module.Arguments.Arguments()
        a.parse(argv)

        return a

    def build_arguments(self, a):
        build_args = a.build_arguments()

        configuration = Configuration.create(a.args.configuration, a.args)
        build_args.update(configuration.build_arguments())

        try:
            build_args.update(LOG_MODES[self.log_mode])
        except KeyError:
            raise RuntimeError(
                f"Unknown testbed log mode {self.log_mode}. Available: {LOG_MODES.keys()}"
            )

        return build_args
示例#9
0
文件: analysis.py 项目: MBradbury/slp
    def run(self,
            summary_file,
            result_finder,
            nprocs=None,
            testbed=False,
            flush=False,
            **kwargs):
        """Perform the analysis and write the output to the :summary_file:.
        If :nprocs: is not specified then the number of CPU cores will be used.
        """

        if testbed:
            # Do not attempt to verify that same seed runs have the same results.
            # The testbed are not deterministic like that!
            kwargs["verify_seeds"] = False

            # Need to remove parameters that testbed runs do not have
            #for name in simulator.common.testbed_missing_global_parameter_names:
            #    del self.values[name]

        # Skip the overhead of the queue with 1 process.
        # This also allows easy profiling
        if nprocs is not None and nprocs == 1:
            return self.run_single(summary_file, result_finder, flush,
                                   **kwargs)

        def worker(inqueue, outqueue):
            while True:
                item = inqueue.get()

                if item is None:
                    return

                path = item

                try:
                    result = self.analyse_and_summarise_path(
                        path, flush, **kwargs)

                    # Skip 0 length results
                    if result.number_of_repeats == 0:
                        outqueue.put((path, None, "There are 0 repeats"))
                        continue

                    line = "|".join(fn(result) for fn in self.values.values())

                    # Try to force a cleanup of the memory
                    result = None

                    outqueue.put((path, line, None))

                    # Try to recover some memory
                    try_to_free_memory()

                except Exception as ex:
                    outqueue.put((path, None, (ex, traceback.format_exc())))

        if nprocs is None:
            nprocs = multiprocessing.cpu_count()

            print(f"Using {nprocs} threads")

        inqueue = multiprocessing.Queue()
        outqueue = multiprocessing.Queue()

        pool = multiprocessing.Pool(nprocs, worker, (inqueue, outqueue))

        summary_file_path = os.path.join(self.results_directory, summary_file)

        # The output files we need to process.
        # These are sorted to give anyone watching the output a sense of progress.
        files = sorted(result_finder(self.results_directory))

        total = len(files)

        for infile in files:
            path = os.path.join(self.results_directory, infile)
            inqueue.put(path)

        # Push the queue sentinel
        for i in range(nprocs):
            inqueue.put(None)

        with open(summary_file_path, 'w') as out:

            print("|".join(self.values.keys()), file=out)

            progress = Progress("analysing file")
            progress.start(len(files))

            for num in range(total):
                (path, line, error) = outqueue.get()

                print(f'Analysing {path}')

                if error is None:
                    print(line, file=out)
                else:
                    (ex, tb) = error
                    print(f"Error processing {path} with {ex}")
                    print(tb)

                progress.print_progress(num)

            print(f'Finished writing {summary_file_path}')

        inqueue.close()
        inqueue.join_thread()

        outqueue.close()
        outqueue.join_thread()

        pool.close()
        pool.join()
示例#10
0
 def __init__(self):
     self._progress = Progress("running locally")
     self.total_job_size = None
     self._jobs_executed = 0