コード例 #1
0
    def _run_comparison_table(self, args):
        results_to_compare = ('normal latency', 'ssd', 'captured', 'fake',
                              'received ratio', 'tfs', 'pfs')

        adaptive_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=results_to_compare)

        template_results = results.Results(template.result_file_path,
                                           parameters=('fake period',
                                                       'temp fake duration',
                                                       'pr(tfs)', 'pr(pfs)'),
                                           results=results_to_compare)

        result_table = comparison.ResultTable(template_results,
                                              adaptive_results)

        self._create_table(
            "adaptive-template-comparison", result_table,
            lambda x: x[2] not in {0.2, 0.3, 0.4})  #(fp, dur, ptfs, ppfs)

        self._create_table(
            "adaptive-template-comparison-low-prob", result_table,
            lambda x: x[2] in {0.2, 0.3, 0.4})  #(fp, dur, ptfs, ppfs)
コード例 #2
0
    def _run_table(self, args):
        adaptive_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=(
                'sent',
                'delivered',
                'time taken',
                'normal latency',
                'ssd',
                'captured',
                'fake',
                'received ratio',
                'tfs',
                'pfs',
                'energy impact per node per second',
                #'norm(sent,time taken)', 'norm(norm(sent,time taken),network size)',
                #'norm(norm(norm(sent,time taken),network size),source rate)'
            ))

        result_table = fake_result.ResultTable(adaptive_results)

        self._create_table(self.algorithm_module.name + "-results",
                           result_table,
                           show=args.show)
コード例 #3
0
ファイル: results_transformer.py プロジェクト: MBradbury/slp
    def transform(self, result_names):

        all_result_names = tuple(set(result_names) | set(self.comparison_functions.keys()))

        module_results = [
            results.Results(
                self.sim_name, module.result_file_path(self.sim_name),
                parameters=module.local_parameter_names,
                results=all_result_names)

            for module
            in self.algorithm_modules
        ]

        for (module, module_result) in zip(self.algorithm_modules, module_results):
            self.safety_factor_indexes[module.name] = module_result.parameter_names.index("safety factor")

        if self.remove_redundant_parameters:
            self._remove_redundant_parameters(module_results)

        # Combine everything
        combined_results = self._combine_results(module_results)

        # Find the dominating data
        dominating_data, self.dominated_data = self._filter_strictly_worse(combined_results, all_result_names)

        # Split up the data back into the individual chunks
        split_results = self._convert_dominating_to_individual(dominating_data)

        # Reassign it
        for (module_result, module) in zip(module_results, self.algorithm_modules):
            module_result.data = split_results[module.name]
            module_result.global_parameter_names = self.global_parameter_names

        return module_results
コード例 #4
0
    def _run_table(self, args):
        result_file_path = self.get_results_file_path(args.sim,
                                                      testbed=args.testbed)

        adaptive_results = results.Results(
            args.sim,
            result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=(
                'repeats',
                #'sent', 'delivered',
                'norm(norm(fake,time taken),network size)',
                'time taken',
                'normal latency',
                'ssd',
                'captured',
                'fake',
                'received ratio',
                'attacker distance',
                #'tfs', 'pfs',
                #'norm(sent,time taken)', 'norm(norm(sent,time taken),network size)',
                #'norm(norm(norm(sent,time taken),network size),source rate)'
            ))

        result_table = fake_result.ResultTable(adaptive_results)

        self._create_table(self.algorithm_module.name + "-results",
                           result_table,
                           show=args.show)
コード例 #5
0
    def _run_graph_bar(self, args):
        graph_parameters = {
            'normal latency': ('Normal Message Latency (ms)', 'right top'),
            'ssd': ('Sink-Source Distance (hops)', 'right top'),
            'captured': ('Capture Ratio (%)', 'right top'),
            'sent': ('Total Messages Sent', 'right top'),
            'received ratio': ('Receive Ratio (%)', 'right top'),
            'attacker distance': ('Meters', 'right top'),
            'norm(sent,time taken)': ('Messages Sent per Second', 'left top'),
            'norm(norm(sent,time taken),network size)':
            ('Messages Sent per Node per Second', 'left top'),
        }

        vary = "genetic header"

        parameters = self.algorithm_module.local_parameter_names
        if vary not in parameters:
            parameters += (vary, )

        slp_tdma_das_ga_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=parameters + ('fitness function', ),
            results=tuple(graph_parameters.keys()))

        def xextractor(xname):
            return "/".join(
                x.replace("slot-with-path-", "").replace(
                    "low-asymmetry", "low") for x in xname)

        def vextractor(vvalue):
            return int(
                vvalue.replace(".h", "").replace("dist",
                                                 "").replace("slot", ""))

        for (yaxis, (yaxis_label, key_position)) in graph_parameters.items():
            name = '{}-v-{}'.format(yaxis.replace(" ", "_"),
                                    "gh")  #vary.replace(" ", "-"))

            g = bar.Grapher(self.algorithm_module.graphs_path,
                            name,
                            xaxis=('communication model', 'fitness function'),
                            yaxis=yaxis,
                            vary=vary,
                            yextractor=scalar_extractor)

            g.nokey = True
            g.yaxis_label = yaxis_label
            g.key_position = key_position
            g.xextractor = xextractor
            g.vextractor = vextractor
            g.xtics_around_cluster = True

            g.create(slp_tdma_das_ga_results)

            summary.GraphSummary(
                os.path.join(self.algorithm_module.graphs_path, name),
                os.path.join(algorithm.results_directory_name,
                             '{}-{}'.format(self.algorithm_module.name,
                                            name))).run()
コード例 #6
0
    def _run_average_graph(self, args):
        from data.graph import combine_versus

        graph_parameters = {
            'normal latency': ('Normal Message Latency (ms)', 'left top'),
            'ssd': ('Sink-Source Distance (hops)', 'left top'),
            'captured': ('Capture Ratio (%)', 'right top'),
            'sent': ('Total Messages Sent', 'left top'),
            'received ratio': ('Receive Ratio (%)', 'left bottom'),
        }

        phantom_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=tuple(graph_parameters.keys()),
            source_period_normalisation="NumSources"
        )

        custom_yaxis_range_max = {
            'captured': 80,
            'sent': 30000
        }

        combine = ["short walk length", "long walk length"]

        parameters = [
            ('source period', ' seconds'),
        ]

        for (parameter_name, parameter_unit) in parameters:
            for (yaxis, (yaxis_label, key_position)) in graph_parameters.items():

                name = '{}-v-{}-i-{}'.format(
                    yaxis.replace(" ", "_"),
                    parameter_name.replace(" ", "-"),
                    "=".join(combine).replace(" ", "-")
                )

                g = combine_versus.Grapher(
                    self.algorithm_module.graphs_path, name,
                    xaxis='network size', yaxis=yaxis, vary=parameter_name, combine=combine, combine_function=np.mean,
                    yextractor=scalar_extractor
                )

                g.xaxis_label = 'Network Size'
                g.yaxis_label = yaxis_label
                g.vary_label = parameter_name.title()
                g.vary_prefix = parameter_unit
                g.key_position = key_position

                if yaxis in custom_yaxis_range_max:
                    g.yaxis_range_max = custom_yaxis_range_max[yaxis]

                g.create(phantom_results)

                summary.GraphSummary(
                    self.algorithm_module.graphs_path,
                    self.algorithm_module.name + '-' + name
                ).run()
コード例 #7
0
ファイル: CommandLine.py プロジェクト: MBradbury/slp
    def _run_comparison_table(self, args):
        results_to_compare = ('normal latency', 'ssd', 'captured',
                              'fake', 'received ratio', 'tfs', 'pfs')

        our_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=results_to_compare)

        adaptive_spr_results = results.Results(
            adaptive_spr.result_file_path,
            parameters=('fake period', 'temp fake duration', 'pr(tfs)', 'pr(pfs)'),
            results=results_to_compare)

        result_table = comparison.ResultTable(adaptive_spr_results, our_results)

        self._create_table("{}-{}-comparison".format(self.algorithm_module.name, adaptive_spr.name), result_table)
コード例 #8
0
    def _run_table(self, args):
        phantom_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=('normal latency', 'ssd', 'captured', 'sent', 'received ratio'))

        result_table = fake_result.ResultTable(phantom_results)

        self._create_table("{}-results".format(self.algorithm_module.name), result_table)
コード例 #9
0
ファイル: results_transformer.py プロジェクト: MBradbury/slp
    def transform(self, result_names):
        return [
            results.Results(
                module.result_file_path,
                parameters=module.local_parameter_names,
                results=result_names)

            for module
            in self.algorithm_modules
        ]
コード例 #10
0
ファイル: CommandLine.py プロジェクト: Chen-Gu/slp
    def _run_table(self, args):
        adaptive_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=('normal latency', 'ssd', 'attacker distance'))

        result_table = fake_result.ResultTable(adaptive_results)

        self._create_table(self.algorithm_module.name + "-results",
                           result_table)
コード例 #11
0
ファイル: CommandLine.py プロジェクト: Chen-Gu/slp
    def _run_graph(self, args):
        graph_parameters = {
            'normal latency': ('Normal Message Latency (ms)', 'left top'),
            'ssd': ('Sink-Source Distance (hops)', 'left top'),
            'captured': ('Capture Ratio (%)', 'left top'),
            'fake': ('Fake Messages Sent', 'left top'),
            'sent': ('Total Messages Sent', 'left top'),
            'received ratio': ('Receive Ratio (%)', 'left bottom'),
            'tfs': ('Number of TFS Created', 'left top'),
            'pfs': ('Number of PFS Created', 'left top'),
            'attacker distance': ('Meters', 'left top'),
            'good move ratio': ('Good Move Ratio (%)', 'right top'),
            'norm(norm(sent,time taken),num_nodes)':
            ('Messages Sent per node per second', 'right top'),
        }

        adaptive_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=tuple(graph_parameters.keys()),
            source_period_normalisation="NumSources")

        varying = [("source period", " seconds"), ("communication model", "~")]

        error_bars = set(
        )  # {'received ratio', 'good move ratio', 'norm(norm(sent,time taken),num_nodes)'}

        for (vary, vary_prefix) in varying:
            for (yaxis, (yaxis_label,
                         key_position)) in graph_parameters.items():
                name = '{}-v-{}'.format(yaxis.replace(" ", "_"),
                                        vary.replace(" ", "-"))

                g = versus.Grapher(self.algorithm_module.graphs_path,
                                   name,
                                   xaxis='network size',
                                   yaxis=yaxis,
                                   vary=vary,
                                   yextractor=scalar_extractor)

                g.xaxis_label = 'Network Size'
                g.yaxis_label = yaxis_label
                g.vary_label = vary.title()
                g.vary_prefix = vary_prefix

                g.error_bars = yaxis in error_bars

                #g.nokey = True
                g.key_position = key_position

                g.create(adaptive_results)

                summary.GraphSummary(
                    os.path.join(self.algorithm_module.graphs_path, name),
                    '{}-{}'.format(self.algorithm_module.name, name)).run()
コード例 #12
0
ファイル: CommandLine.py プロジェクト: MBradbury/slp
    def _run_table(self, args):
        selected_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=('normal latency', 'ssd', 'captured', 'fake',
                     'received ratio', 'tfs'))

        result_table = fake_result.ResultTable(selected_results)

        self._create_table(self.algorithm_module.name + "-results",
                           result_table)
コード例 #13
0
ファイル: CommandLine.py プロジェクト: MBradbury/slp
    def _run_table(self, args):
        phantom_walkabouts_results = results.Results(
            args.sim,
            self.algorithm_module.result_file_path(args.sim),
            parameters=self.algorithm_module.local_parameter_names,
            results=('normal latency', 'ssd', 'captured', 'sent',
                     'received ratio'))

        result_table = fake_result.ResultTable(phantom_walkabouts_results)

        self._create_table("{}-results".format(self.algorithm_module.name),
                           result_table,
                           orientation='landscape',
                           show=args.show)
コード例 #14
0
ファイル: CommandLine.py プロジェクト: Chen-Gu/slp
    def _run_table(self, args):
        protectionless_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=('sent', 'norm(norm(sent,time taken),network size)',
                     'normal latency', 'captured', 'received ratio'))

        fmt = TableDataFormatter(convert_to_stddev=args.show_stddev)

        result_table = fake_result.ResultTable(protectionless_results, fmt)

        self._create_table(self.algorithm_module.name + "-results",
                           result_table,
                           show=args.show)
コード例 #15
0
ファイル: CommandLine.py プロジェクト: Chen-Gu/slp
    def _run_table(self, args):
        template_results = results.Results(
            args.sim,
            self.algorithm_module.result_file_path(args.sim),
            parameters=self.algorithm_module.local_parameter_names,
            results=('normal latency', 'ssd', 'captured', 'fake',
                     'received ratio', 'tfs', 'pfs'))

        result_table = fake_result.ResultTable(template_results)

        self._create_table(self.algorithm_module.name + "-results",
                           result_table,
                           show=args.show,
                           param_filter=lambda fp, dur, ptfs, ppfs: ptfs not in
                           {0.2, 0.3, 0.4})
コード例 #16
0
ファイル: CommandLine.py プロジェクト: MBradbury/slp
    def _run_table(self, args):
        adaptive_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=(
                #'sent', 'time taken',
                'normal latency', 'ssd', 'captured',
                'fake', 'dummy normal', 'received ratio', 'tfs', 'pfs', 'tailfs'
                #'norm(sent,time taken)', 'norm(norm(sent,time taken),network size)',
                #'norm(norm(norm(sent,time taken),network size),source rate)'
            ))

        result_table = fake_result.ResultTable(adaptive_results)

        self._create_table(self.algorithm_module.name + "-results", result_table)
コード例 #17
0
    def _run_graph(self, args):
        graph_parameters = {
            'normal latency': ('Normal Message Latency (ms)', 'left top'),
            'ssd': ('Sink-Source Distance (hops)', 'left top'),
            'captured': ('Capture Ratio (%)', 'left top'),
            'sent': ('Total Messages Sent', 'left top'),
            'received ratio': ('Receive Ratio (%)', 'left bottom'),
            'attacker distance': ('Meters', 'left top'),
            'crash': ('Number of crash messages sent', 'left top'),
        }

        slp_tdma_das_crash_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=tuple(graph_parameters.keys()),
            network_size_normalisation="UseNumNodes")

        for (vary, vary_prefix) in [("source period", " seconds")]:
            for (yaxis, (yaxis_label,
                         key_position)) in graph_parameters.items():
                name = '{}-v-{}'.format(yaxis.replace(" ", "_"),
                                        vary.replace(" ", "-"))

                g = versus.Grapher(self.algorithm_module.graphs_path,
                                   name,
                                   xaxis='network size',
                                   yaxis=yaxis,
                                   vary=vary,
                                   yextractor=scalar_extractor)

                g.xaxis_label = 'Number of Nodes'
                g.yaxis_label = yaxis_label
                g.vary_label = vary.title()
                g.vary_prefix = vary_prefix
                g.key_position = key_position

                g.create(slp_tdma_das_crash_results)

                summary.GraphSummary(
                    os.path.join(self.algorithm_module.graphs_path, name),
                    os.path.join(
                        algorithm.results_directory_name,
                        '{}-{}'.format(self.algorithm_module.name,
                                       name))).run()
コード例 #18
0
ファイル: CommandLine.py プロジェクト: Chen-Gu/slp
    def _run_graph(self, args):
        graph_parameters = {
            'normal latency': ('Normal Message Latency (ms)', 'left top'),
            'ssd': ('Sink-Source Distance (hops)', 'left top'),
            'captured': ('Capture Ratio (%)', 'left top'),
            'fake': ('Fake Messages Sent', 'left top'),
            'sent': ('Total Messages Sent', 'left top'),
            'received ratio': ('Receive Ratio (%)', 'left bottom'),
            'tfs': ('Number of TFS Created', 'left top'),
            'pfs': ('Number of PFS Created', 'left top'),
            'tailfs': ('Number of TailFS Created', 'left top'),
            'attacker distance':
            ('Attacker Distance From Source (Meters)', 'left top'),
        }

        adaptive_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=tuple(graph_parameters.keys()))

        for (yaxis, (yaxis_label, key_position)) in graph_parameters.items():
            name = '{}-v-source-period'.format(yaxis.replace(" ", "_"))

            g = versus.Grapher(self.algorithm_module.graphs_path,
                               name,
                               xaxis='network size',
                               yaxis=yaxis,
                               vary='source period',
                               yextractor=scalar_extractor)

            g.xaxis_label = 'Network Size'
            g.yaxis_label = yaxis_label
            g.vary_label = 'Source Period'
            g.vary_prefix = ' seconds'
            g.key_position = key_position

            g.create(adaptive_results)

            summary.GraphSummary(
                os.path.join(self.algorithm_module.graphs_path, name),
                os.path.join(algorithm.results_directory_name,
                             '{}-{}'.format(self.algorithm_module.name,
                                            name))).run()
コード例 #19
0
    def __init__(self, sim_name, result_file, tafn_to_safety_period, fmt=None):
        self._sim_name = sim_name
        self._result_names = ('received ratio',
                              'normal latency', 'ssd', 'captured',
                              'time after first normal', 'repeats')

        self._results = results.Results(
            sim_name,
            result_file,
            parameters=tuple(),
            results=self._result_names
        )

        self.tafn_to_safety_period = tafn_to_safety_period

        self.fmt = fmt
        if fmt is None:
            from data.table.data_formatter import TableDataFormatter
            self.fmt = TableDataFormatter()
コード例 #20
0
    def _run_graph(self, args):
        graph_parameters = {
            'safety period': ('Safety Period (seconds)', 'left top'),
            'time taken': ('Time Taken (seconds)', 'left top'),
            #'ssd': ('Sink-Source Distance (hops)', 'left top'),
            #'captured': ('Capture Ratio (%)', 'left top'),
            #'sent': ('Total Messages Sent', 'left top'),
            #'received ratio': ('Receive Ratio (%)', 'left bottom'),
        }

        protectionless_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=tuple(graph_parameters.keys()),
            source_period_normalisation="NumSources")

        for (yaxis, (yaxis_label, key_position)) in graph_parameters.items():
            name = '{}-v-configuration'.format(yaxis.replace(" ", "_"))

            g = versus.Grapher(self.algorithm_module.graphs_path,
                               name,
                               xaxis='network size',
                               yaxis=yaxis,
                               vary='configuration',
                               yextractor=scalar_extractor)

            g.generate_legend_graph = True

            g.xaxis_label = 'Network Size'
            g.yaxis_label = yaxis_label
            g.vary_label = ''
            g.vary_prefix = ''

            g.nokey = True
            g.key_position = key_position

            g.create(protectionless_results)

            summary.GraphSummary(
                os.path.join(self.algorithm_module.graphs_path, name),
                '{}-{}'.format(self.algorithm_module.name, name)).run()
コード例 #21
0
ファイル: CommandLine.py プロジェクト: MBradbury/slp
    def _run_graph(self, args):
        graph_parameters = {
            'normal latency': ('Normal Message Latency (ms)', 'left top'),
            'ssd': ('Sink-Source Distance (hops)', 'left top'),
            'captured': ('Capture Ratio (%)', 'right top'),
            'sent': ('Total Messages Sent', 'left top'),
            'received ratio': ('Receive Ratio (%)', 'left bottom'),
            'paths reached end': ('Paths Reached End (%)', 'right top'),
        }

        phantom_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=tuple(graph_parameters.keys()))

        parameters = [('source period', ' seconds'), ('walk length', ' hops')]

        for (parameter_name, parameter_unit) in parameters:
            for (yaxis, (yaxis_label,
                         key_position)) in graph_parameters.items():
                name = '{}-v-{}'.format(yaxis.replace(" ", "_"),
                                        parameter_name.replace(" ", "-"))

                g = versus.Grapher(self.algorithm_module.graphs_path,
                                   name,
                                   xaxis='network size',
                                   yaxis=yaxis,
                                   vary=parameter_name,
                                   yextractor=scalar_extractor)

                g.xaxis_label = 'Network Size'
                g.yaxis_label = yaxis_label
                g.vary_label = parameter_name.title()
                g.vary_prefix = parameter_unit
                g.key_position = key_position

                g.create(phantom_results)

                summary.GraphSummary(
                    os.path.join(self.algorithm_module.graphs_path, name),
                    self.algorithm_module.name + '-' + name).run()
コード例 #22
0
    def _run_table(self, args):
        adaptive_results = results.Results(
            args.sim,
            self.algorithm_module.result_file_path(args.sim),
            parameters=self.algorithm_module.local_parameter_names,
            results=(
                #'sent',
                #'norm(norm(sent,time taken),network size)',
                'norm(norm(fake,time taken),network size)',
                'delivered',
                'time taken',
                'captured',
                'received ratio',  #'ssd', 'attacker distance',
                'fake nodes at end',
                'fake nodes at end when captured'))

        result_table = fake_result.ResultTable(adaptive_results)

        self._create_table(self.algorithm_module.name + "-results",
                           result_table,
                           show=args.show)
コード例 #23
0
    def _run_scatter_graph(self, args):
        from data.graph import scatter

        graph_parameters = {
            'normal latency': ('Normal Message Latency (ms)', 'left top'),
            'ssd': ('Sink-Source Distance (hops)', 'left top'),
            'captured': ('Capture Ratio (%)', 'right top'),
            'sent': ('Total Messages Sent', 'left top'),
            'received ratio': ('Receive Ratio (%)', 'left bottom'),
        }

        phantom_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=tuple(graph_parameters.keys()),
            source_period_normalisation="NumSources"
        )

        combine = ["short walk length", "long walk length"]

        for (yaxis, (yaxis_label, key_position)) in graph_parameters.items():

            name = '{}-comb-{}'.format(yaxis.replace(" ", "_"), "=".join(combine).replace(" ", "-"))

            g = scatter.Grapher(
                self.algorithm_module.graphs_path, name,
                xaxis='network size', yaxis=yaxis, combine=combine,
                yextractor=scalar_extractor
            )

            g.xaxis_label = 'Network Size'
            g.yaxis_label = yaxis_label
            g.key_position = key_position

            g.create(phantom_results)

            summary.GraphSummary(
                self.algorithm_module.graphs_path,
                self.algorithm_module.name + '-' + name
            ).run()
コード例 #24
0
    def _run_table(self, args):
        protectionless_results = results.Results(
            self.algorithm_module.result_file_path,
            parameters=self.algorithm_module.local_parameter_names,
            results=(
                'sent',
                'delivered',
                'time taken',
                #'energy impact',
                #'energy impact per node',
                'energy impact per node per second',
                'norm(norm(sent,time taken),num_nodes)',
                'normal latency',
                'ssd',
                'attacker distance',
            ))

        fmt = TableDataFormatter(convert_to_stddev=args.show_stddev)

        result_table = fake_result.ResultTable(protectionless_results, fmt)

        self._create_table(self.algorithm_module.name + "-results",
                           result_table,
                           show=args.show)
コード例 #25
0
    def _load_existing_results(self, argument_names):
        print("Loading existing results...")
        results_file_path = self.algorithm_module.result_file_path(
            self.sim_name)
        try:
            results_summary = results.Results(
                self.sim_name,
                results_file_path,
                parameters=argument_names[len(self._global_parameter_names):],
                results=('repeats', ))

            # (size, config, attacker_model, noise_model, communication_model, distance, period) -> repeats
            self._existing_results = {
                tuple(map(str, k)): v
                for (k, v) in results_summary.parameter_set().items()
            }
        except IOError as e:
            message = str(e)
            if 'No such file or directory' in message:
                raise RuntimeError(
                    f"The results file {results_file_path} is not present. Perhaps rerun the command with '--no-skip-complete'?"
                )
            else:
                raise
コード例 #26
0
    def __init__(self, data, style=STYLE_DEFAULT, *args, **kwds):

        # init wx.Dialog
        if style == STYLE_DEFAULT:
            kwds["style"] = wx.DEFAULT_DIALOG_STYLE
        elif style == STYLE_AUTO_START_CLOSE:
            kwds["style"] = wx.THICK_FRAME
        else:
            assert False
        wx.Dialog.__init__(self, *args, **kwds)

        # get parameters
        self.data = data
        self.style = style

        self.log = None
        self.docs = None
        self.results = None
        self.start = None
        self.stop = None

        if style == STYLE_DEFAULT:
            self.SetTitle("Script Runner")
            self.start = wx.Button(self, -1, "Start")
            self.stop = wx.Button(self, -1, "Stop")
        elif style == STYLE_AUTO_START_CLOSE:
            self.stop = wx.Button(self, -1, "Stop")
        else:
            assert False

        # bind event
        if self.stop is not None:
            self.Bind(wx.EVT_BUTTON, self.onStop, self.stop)
        if self.start is not None:
            self.Bind(wx.EVT_BUTTON, self.onStart, self.start)

        self.Bind(wx.EVT_CLOSE, self.onClose)

        # timer for activity gauge
        #        self.Bind(wx.EVT_TIMER, self.handler_timer)
        #        self.timer = wx.Timer(self)

        # process management
        self.process = None
        self.pid = None
        self.exit_code = None

        self.Bind(wx.EVT_END_PROCESS, self.OnProcessEnded)
        EVT_CUSTOM_STARTPROCESS(self, self.OnStartProcess)
        EVT_CUSTOM_ENDSCRIPTS(self, self.OnEndScripts)
        EVT_CUSTOM_EXECSTATUS(self, self.on_execstatus)

        # scripts runner
        self.evtScriptRun = threading.Event()
        self.evtReaderRun = threading.Event()
        self.thdScriptRun = threading.Thread(target=self.thread_scripts_runner,
                                             name="scripts_runner")

        if self.data[RUN_TYPE] == RUN_SCRIPT:
            self.results = dres.Results("tmp_controler")
        elif self.data[RUN_TYPE] == RUN_DOC:
            self.docs = ddoc.Documentation()
        else:
            raise Exception("No type run define")

            # create control
        self.lstboxScript = wx.ListBox(self, -1, size=wx.Size(400, 200))
        self.lstboxScript.SetFont(
            wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL,
                    wx.FONTWEIGHT_NORMAL))
        self.activity = wx.Gauge(self, -1, size=wx.Size(400, 30))

        sizer_1 = wx.BoxSizer(wx.VERTICAL)
        sizer_1.Add((400, 20), 0, 0, 0)
        sizer_1.Add(self.lstboxScript, 0, wx.ALL, 5)
        sizer_1.Add(self.activity, 0, wx.ALL, 5)
        sizer_1.Add((400, 20), 0, 0, 0)

        if self.start is not None:
            sizer_1.Add(
                self.start, 0, wx.ALL | wx.ALIGN_BOTTOM
                | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
        if self.stop is not None:
            sizer_1.Add(
                self.stop, 0, wx.ALL | wx.ALIGN_BOTTOM
                | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)

        self.SetSizer(sizer_1)
        sizer_1.Fit(self)
        self.Layout()

        self.init_GUI()

        # Start
        if style == STYLE_DEFAULT:
            pass
        elif style == STYLE_AUTO_START_CLOSE:
            self.start_running_script()
        else:
            assert False
コード例 #27
0
    def __init__(self, name, *p, **pp):

        wx.Panel.__init__(self, *p, **pp)

        self.tree = wx.TreeCtrl(self,-1)
        self.tree.SetFont(wx.Font(10, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
        self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnActivate, self.tree)

        self.Bind(wx.EVT_TREE_ITEM_MENU, self.OnItemMenu, self.tree)


        il = wx.ImageList(16, 16)
        self.im_script      = il.Add( wx.Bitmap("images/script.png", wx.BITMAP_TYPE_PNG))
        self.im_pool        = il.Add( wx.Bitmap("images/database.png", wx.BITMAP_TYPE_PNG))
        self.im_folder      = il.Add( wx.Bitmap("images/folder_database.png", wx.BITMAP_TYPE_PNG))
        self.im_project     = il.Add( wx.Bitmap("images/application_view_tile.png", wx.BITMAP_TYPE_PNG))

        self.im_script_ko   = il.Add( wx.Bitmap("images/script_delete.png", wx.BITMAP_TYPE_PNG))
        self.im_script_warn = il.Add( wx.Bitmap("images/script_error.png", wx.BITMAP_TYPE_PNG))
        self.im_script_ok   = il.Add( wx.Bitmap("images/script_go.png", wx.BITMAP_TYPE_PNG))

        self.im_result   = il.Add( wx.Bitmap("images/table_multiple.png", wx.BITMAP_TYPE_PNG))

        self.im_case_ok   = il.Add( wx.Bitmap("images/bullet_green.png", wx.BITMAP_TYPE_PNG))
        self.im_case_ko   = il.Add( wx.Bitmap("images/bullet_red.png", wx.BITMAP_TYPE_PNG))
        self.im_case   = il.Add( wx.Bitmap("images/bullet_purple.png", wx.BITMAP_TYPE_PNG))
        
        self.il = il
        self.tree.SetImageList(il)
        
        self.lst_res   = list()
        self.lst_dest  = list()
        self.sel_item  = None
        self.log       = None
        self.path      = None
        
        self.wildcard_dbm = "Project file (*.dbm)|*.dbm|All files (*.*)|*.*"
        self.wildcard_csv = "Csv file (*.csv)|*.csv|All files (*.*)|*.*"

        
        self.res = dres.Results(name)
        self.update()
        
        # toolbar
        tsize = (16,16)
        self.tb1 = wx.ToolBar(self, -1)
        self.tb1.SetToolBitmapSize(tsize)
        
        
        bmp_new = wx.ArtProvider.GetBitmap(wx.ART_NEW, wx.ART_TOOLBAR, tsize)
        bmp_save = wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE, wx.ART_TOOLBAR, tsize)
        bmp_saveas = wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE_AS, wx.ART_TOOLBAR, tsize)
        bmp_open = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, tsize)
        bmp_export_csv = wx.Bitmap("images/page_white_c.png", wx.BITMAP_TYPE_PNG)
        id_new = wx.NewId()
        id_save = wx.NewId()
        id_saveas = wx.NewId()
        id_open = wx.NewId()
        id_export_csv = wx.NewId()
        
        
    
        self.tb1.AddLabelTool(id_new,       "New",          bmp_new,)
        self.tb1.AddSeparator()
        self.tb1.AddLabelTool(id_open,      "Open",         bmp_open)
        self.tb1.AddLabelTool(id_save,      "Save",         bmp_save)
        self.tb1.AddLabelTool(id_saveas,    "Save As",      bmp_saveas)
        self.tb1.AddSeparator()
        self.tb1.AddLabelTool(id_export_csv,"Export csv",   bmp_export_csv)
        
        
        self.tb1.SetToolShortHelp(id_new,       "New result")
        self.tb1.SetToolShortHelp(id_open,      "Open a result")
        self.tb1.SetToolShortHelp(id_save,      "Save curent result")
        self.tb1.SetToolShortHelp(id_saveas,    "Save As curent result")
        self.tb1.SetToolShortHelp(id_export_csv,"Export to csv")
        
        self.Bind(wx.EVT_TOOL, self.on_tool_new,    id=id_new)
        self.Bind(wx.EVT_TOOL, self.on_tool_open,   id=id_open)
        self.Bind(wx.EVT_TOOL, self.on_tool_save,   id=id_save)
        self.Bind(wx.EVT_TOOL, self.on_tool_saveas, id=id_saveas)
        self.Bind(wx.EVT_TOOL, self.on_tool_csv,    id=id_export_csv)

        
        self.tb1.Realize()



        # sizer
        sizer_1 = wx.BoxSizer(wx.VERTICAL)
        sizer_1.Add(self.tb1, 0, wx.ALL, 2)
        sizer_1.Add(self.tree, 1, wx.ALL| wx.EXPAND, 2)

        self.SetSizer(sizer_1)
        sizer_1.Fit(self)
        self.Layout()
コード例 #28
0
    def _run_min_max_versus(self, args):
        graph_parameters = {
            'normal latency': ('Normal Message Latency (ms)', 'left top'),
            #            'ssd': ('Sink-Source Distance (hops)', 'left top'),
            'captured': ('Capture Ratio (%)', 'right top'),
            #            'normal': ('Normal Messages Sent', 'left top'),
            'fake': ('Fake Messages Sent', 'left top'),
            'sent': ('Total Messages Sent', 'left top'),
            'received ratio': ('Receive Ratio (%)', 'left bottom'),
            #            'tfs': ('Number of TFS Created', 'left top'),
            #            'pfs': ('Number of PFS Created', 'left top'),
            'attacker distance':
            ('Attacker-Source Distance (meters)', 'left top'),
            "attacker distance percentage":
            ('Normalised Attacker Distance (%)', 'left top'),
            #'norm(sent,time taken)': ('Messages Sent per Second', 'left top'),
            #'norm(fake,time taken)': ('Messages Sent per Second', 'left top'),
            'norm(norm(sent,time taken),network size)':
            ('Messages Sent per Second per Node', 'left top'),
            'norm(norm(fake,time taken),network size)':
            ('Fake Messages Sent per Second per node', 'left top'),
            #            'norm(normal,time taken)': ('Messages Sent per Second', 'left top'),
            #            'norm(norm(fake,time taken),source rate)': ('~', 'left top'),
        }

        custom_yaxis_range_max = {
            'captured': 15,
            'received ratio': 100,
            'attacker distance': 160,
            'normal latency': 120,
            'norm(norm(sent,time taken),network size)': 15,
            'norm(norm(fake,time taken),network size)': 15,
        }

        def filter_params(all_params):
            return (all_params['source period'] == '0.125'
                    or all_params['noise model'] == 'meyer-heavy'
                    or all_params['configuration'] != 'SourceCorner')

        def adaptive_filter_params(all_params):
            return filter_params(all_params) or all_params['approach'] in {
                "PB_SINK_APPROACH", "PB_ATTACKER_EST_APPROACH"
            }

        protectionless_analysis = protectionless.Analysis.Analyzer(
            args.sim, protectionless.results_path(args.sim))

        protectionless_results = results.Results(
            args.sim,
            protectionless.result_file_path(args.sim),
            parameters=protectionless.local_parameter_names,
            results=list(
                set(graph_parameters.keys())
                & set(protectionless_analysis.results_header().keys())),
            results_filter=filter_params)

        adaptive_spr_notify_results = results.Results(
            args.sim,
            self.algorithm_module.result_file_path(args.sim),
            parameters=self.algorithm_module.local_parameter_names,
            results=graph_parameters.keys(),
            results_filter=filter_params)

        adaptive_results = results.Results(
            args.sim,
            adaptive.result_file_path(args.sim),
            parameters=adaptive.local_parameter_names,
            results=graph_parameters.keys(),
            results_filter=adaptive_filter_params)

        template_results = results.Results(
            args.sim,
            template.result_file_path(args.sim),
            parameters=template.local_parameter_names,
            results=graph_parameters.keys(),
            results_filter=filter_params)

        def graph_min_max_versus(result_name, xaxis):
            name = 'min-max-{}-versus-{}-{}'.format(adaptive.name, result_name,
                                                    xaxis)

            if result_name == "attacker distance":
                # Just get the distance of attacker 0 from node 0 (the source in SourceCorner)
                def yextractor(yvalue):
                    print(yvalue)
                    return scalar_extractor(yvalue, key=(0, 0))
            else:
                yextractor = scalar_extractor

            g = min_max_versus.Grapher(args.sim,
                                       self.algorithm_module.graphs_path(
                                           args.sim),
                                       name,
                                       xaxis=xaxis,
                                       yaxis=result_name,
                                       vary='approach',
                                       yextractor=yextractor)

            g.xaxis_label = xaxis.title()
            g.yaxis_label = graph_parameters[result_name][0]
            g.key_position = graph_parameters[result_name][1]

            g.xaxis_font = "',16'"
            g.yaxis_font = "',16'"
            g.xlabel_font = "',14'"
            g.ylabel_font = "',14'"
            g.line_width = 3
            g.point_size = 1
            g.nokey = True
            g.legend_font_size = 16

            g.min_label = ['Static - Lowest']
            g.max_label = ['Static - Highest']
            g.comparison_label = ['Dynamic', 'DynamicSpr']
            g.vary_label = ''

            if xaxis == 'network size':
                g.xvalues_to_tic_label = lambda x: f'"{x}x{x}"'

            if result_name in custom_yaxis_range_max:
                g.yaxis_range_max = custom_yaxis_range_max[result_name]

            def vvalue_converter(name):
                try:
                    return {
                        "PB_FIXED1_APPROACH": "Fixed1",
                        "PB_FIXED2_APPROACH": "Fixed2",
                        "PB_RND_APPROACH": "Rnd",
                    }[name]
                except KeyError:
                    return name

            g.vvalue_label_converter = vvalue_converter

            g.generate_legend_graph = True

            if result_name in protectionless_results.result_names:
                g.create([template_results],
                         [adaptive_results, adaptive_spr_notify_results],
                         baseline_results=protectionless_results)
            else:
                g.create([template_results],
                         [adaptive_results, adaptive_spr_notify_results])

            summary.GraphSummary(
                os.path.join(self.algorithm_module.graphs_path(args.sim),
                             name),
                os.path.join(
                    algorithm.results_directory_name,
                    '{}-{}'.format(self.algorithm_module.name,
                                   name).replace(" ", "_"))).run()

        for result_name in graph_parameters.keys():
            graph_min_max_versus(result_name, 'network size')
コード例 #29
0
    def _run_min_max_ilp_versus(self, args):
        graph_parameters = {
            'normal latency': ('Normal Message Latency (ms)', 'left top'),
            #            'ssd': ('Sink-Source Distance (hops)', 'left top'),
            'captured': ('Capture Ratio (%)', 'right top'),
            #            'normal': ('Normal Messages Sent', 'left top'),
            'sent': ('Total Messages Sent', 'left top'),
            'received ratio': ('Receive Ratio (%)', 'left bottom'),
            'attacker distance':
            ('Attacker-Source Distance (meters)', 'left top'),
            'norm(sent,time taken)': ('Messages Sent per Second', 'left top'),
            #            'norm(norm(sent,time taken),network size)': ('Messages Sent per Second per Node', 'left top'),
            #            'norm(normal,time taken)': ('Messages Sent per Second', 'left top'),
        }

        custom_yaxis_range_max = {
            'captured': 25,
            'received ratio': 100,
            'attacker distance': 120,
            'normal latency': 4000,
            'norm(sent,time taken)': 8000,
            'norm(norm(sent,time taken),network size)': 15,
        }

        def filter_params(all_params):
            return (all_params['source period'] == '0.125'
                    or all_params['noise model'] == 'meyer-heavy'
                    or all_params['configuration'] != 'SourceCorner')

        def adaptive_filter_params(all_params):
            return filter_params(all_params) or all_params['approach'] in {
                "PB_SINK_APPROACH", "PB_ATTACKER_EST_APPROACH"
            }

        def ilprouting_filter_params(all_params):
            return filter_params(
                all_params) or all_params["pr direct to sink"] != "0.2"

        protectionless_analysis = protectionless.Analysis.Analyzer(
            args.sim, protectionless.results_path(args.sim))

        protectionless_results = results.Results(
            args.sim,
            protectionless.result_file_path(args.sim),
            parameters=protectionless.local_parameter_names,
            results=list(
                set(graph_parameters.keys())
                & set(protectionless_analysis.results_header().keys())),
            results_filter=filter_params)

        adaptive_spr_notify_results = results.Results(
            args.sim,
            self.algorithm_module.result_file_path(args.sim),
            parameters=self.algorithm_module.local_parameter_names,
            results=graph_parameters.keys(),
            results_filter=filter_params)

        adaptive_results = results.Results(
            args.sim,
            adaptive.result_file_path(args.sim),
            parameters=adaptive.local_parameter_names,
            results=graph_parameters.keys(),
            results_filter=adaptive_filter_params)

        ilprouting_results = results.Results(
            args.sim,
            ilprouting.result_file_path(args.sim),
            parameters=ilprouting.local_parameter_names,
            results=graph_parameters.keys(),
            results_filter=ilprouting_filter_params)

        sim = submodule_loader.load(simulator.sim, args.sim)

        def graph_min_max_versus(result_name, xaxis):
            name = 'min-max-ilp-versus-{}-{}'.format(result_name, xaxis)

            if result_name == "attacker distance":
                # Just get the distance of attacker 0 from node 0 (the source in SourceCorner)
                def yextractor(yvalue):
                    print(yvalue)
                    return scalar_extractor(yvalue, key=(0, 0))
            else:
                yextractor = scalar_extractor

            vary = [
                'approach', 'approach',
                ('buffer size', 'max walk length', 'pr direct to sink',
                 'msg group size')
            ]

            g = min_max_versus.Grapher(args.sim,
                                       self.algorithm_module.graphs_path(
                                           args.sim),
                                       name,
                                       xaxis=xaxis,
                                       yaxis=result_name,
                                       vary=vary,
                                       yextractor=yextractor)

            g.xaxis_label = xaxis.title()
            g.yaxis_label = graph_parameters[result_name][0]
            g.key_position = graph_parameters[result_name][1]

            g.xaxis_font = "',16'"
            g.yaxis_font = "',16'"
            g.xlabel_font = "',14'"
            g.ylabel_font = "',14'"
            g.line_width = 3
            g.point_size = 1
            g.nokey = True
            g.legend_font_size = 16

            #g.min_label = ['Static - Lowest']
            #g.max_label = ['Static - Highest']
            g.comparison_label = ['Dynamic', 'DynamicSpr', 'ILPRouting']
            g.vary_label = ''

            if xaxis == 'network size':
                g.xvalues_to_tic_label = lambda x: f'"{x}x{x}"'

            if result_name in custom_yaxis_range_max:
                g.yaxis_range_max = custom_yaxis_range_max[result_name]

            def vvalue_converter(name):
                if isinstance(name, tuple):
                    (buffer_size, max_walk_length, pr_direct_to_sink,
                     msg_group_size) = name

                    return f"Group Size {msg_group_size}"

                try:
                    return {
                        "PB_FIXED1_APPROACH": "Fixed1",
                        "PB_FIXED2_APPROACH": "Fixed2",
                        "PB_RND_APPROACH": "Rnd",
                    }[name]
                except KeyError:
                    return name

            g.vvalue_label_converter = vvalue_converter

            # Want to pretend SeqNosOOOReactiveAttacker is SeqNosReactiveAttacker
            def correct_data_key(data_key):
                data_key = list(data_key)
                data_key[sim.global_parameter_names.index(
                    'attacker model')] = "SeqNosReactiveAttacker()"
                return tuple(data_key)

            g.correct_data_key = correct_data_key

            g.generate_legend_graph = True

            if result_name in protectionless_results.result_names:
                g.create([], [
                    adaptive_results, adaptive_spr_notify_results,
                    ilprouting_results
                ],
                         baseline_results=protectionless_results)
            else:
                g.create([], [
                    adaptive_results, adaptive_spr_notify_results,
                    ilprouting_results
                ])

            summary.GraphSummary(
                os.path.join(self.algorithm_module.graphs_path(args.sim),
                             name),
                os.path.join(
                    algorithm.results_directory_name,
                    '{}-{}'.format(self.algorithm_module.name,
                                   name).replace(" ", "_"))).run()

        for result_name in graph_parameters.keys():
            graph_min_max_versus(result_name, 'network size')
コード例 #30
0
def all_results(algorithms):
    modules = [algorithm.import_algorithm(algo) for algo in algorithms]

    module_results = [
        results.Results(module.result_file_path,
                        parameters=module.local_parameter_names,
                        results=result_names) for module in modules
    ]

    for (name, module_result) in zip(algorithms, module_results):
        safety_factor_indexes[name] = module_result.parameter_names.index(
            "safety factor")

    parameters = {}
    for result in module_results:
        parameters.update(dict(result.parameters()))

    parameters_to_remove = [k for (k, v) in parameters.items() if len(v) == 1]

    new_global_parameters = tuple([
        name for name in global_parameter_names[:-1]
        if name not in parameters_to_remove
    ])

    results_data = [
        transform_results_data(result.data, parameters_to_remove)
        for result in module_results
    ]

    combined_data = {}

    for (algo_name, result_data) in zip(algorithms, results_data):

        safety_factor_idx = safety_factor_indexes[algo_name]

        for (global_params, items1) in result_data.items():

            if global_params not in combined_data:
                combined_data[global_params] = {}

            for (source_period, items2) in items1.items():

                if source_period not in combined_data[global_params]:
                    combined_data[global_params][source_period] = {}

                for (local_params, algo_results) in items2.items():

                    safety_factor = local_params[safety_factor_idx]

                    if safety_factor not in combined_data[global_params][
                            source_period]:
                        combined_data[global_params][source_period][
                            safety_factor] = {}

                    new_local_params = transform_key(local_params,
                                                     safety_factor_idx)

                    combined_data[global_params][source_period][safety_factor][
                        (algo_name, new_local_params)] = algo_results

    return new_global_parameters, combined_data