Ejemplo n.º 1
0
    def __init__(self,
                 directory='report',
                 report='report.md',
                 timeplots='timeplots.md'):
        self.directory = directory  # set output directory for report files
        self.main_file = "{0}/{1}".format(self.directory, report)
        self.timeplots_collection = "{0}/{1}".format(self.directory, timeplots)

        if not os.path.exists(self.directory):
            os.makedirs(self.directory)

        with open(self.main_file, 'w+') as outfile:
            outfile.write("# Benchmark report\n")
            outfile.write(
                "[Timeplot collection is here]({0})\n".format(timeplots))

        with open(self.timeplots_collection, 'w+') as outfile:
            outfile.write("# Timeplot collection report\n")
            outfile.write("[Main report in here]({0}) \n\n".format(report))

        self.reports_added = 0  # keep track of added reports

        self.timeplots_added = 0  # keep track of number of timeplots added

        self.scenario = Scenario()
Ejemplo n.º 2
0
 def scenario_delete(self, scenario_name, repo):
     scenario = Scenario(scenario_name, repo=repo, db=self.db)
     scenario.delete()
     if scenario.name in repo.scenarios:
         repo.scenarios.remove(scenario.name)
         repo.save()
     return scenario
Ejemplo n.º 3
0
 def scenario_update(self, scenario_name, repo, data):
     scenario = Scenario(scenario_name, repo, data=data, db=self.db)
     scenario.save()
     if scenario.name not in repo.scenarios:
         repo.scenarios.append(scenario.name)
         repo.save()
     return scenario
Ejemplo n.º 4
0
 def job_delete(self, job_name):
     job_data = self.db.read('job', job_name)
     if not job_data:
         return False
     repo = Repository(job_data['repo'], db=self.db)
     scenario = Scenario(job_data['scenario'], repo=repo, db=self.db)
     job = Job(job_name, repo, scenario, db=self.db)
     job.delete()
     return job
Ejemplo n.º 5
0
class Core(object):
    def __init__(self, parsed_opts):
        FileManager.create_tmp_env()
        self.opts = parsed_opts
        self.nb_vulnerabilities = 0

    def display_banner(self):
        banner = """
  _   _       ____
 | | | |_ __ |  _ \__      ___ __
 | | | | '_ \| |_) \ \ /\ / / '_ \\
 | |_| | |_) |  __/ \ V  V /| | | |
  \___/| .__/|_|     \_/\_/ |_| |_|
       |_|
        """
        print banner

    def run(self):
        self.display_banner()
        print "[+] UpPwn is running !"
        self.test_scenario()
        print "[+] Scenario successfully loaded"
        self.run_proxy()
        self.run_modules()
        print "[+] Work is done, {} vulnerabilities have been found !".format(
            self.nb_vulnerabilities)
        return True

    def test_scenario(self):
        Xvfb.start()
        self.up_s = Scenario(self.opts.scenario)
        self.up_s.build(self.opts.cookies)
        self.up_s.load()
        #if not self.up_s.has_succeeded(): return False

    def run_proxy(self):
        print "[+] Mitm proxy start"
        self.proxy = Proxy()
        self.proxy.start()

    def run_modules(self):
        print "[+] Upload vulnerabilities detection start"
        self.up_s.run()
        #time.sleep(120)

    def stop(self):
        print "[+] Close properly the proxy"
        time.sleep(
            15
        )  # All tcp connection have to be ended before stopping the MiTM / Has to be fixed
        self.proxy.stop()
        Xvfb.stop()
        print "[+] UpPwn stopped !"
Ejemplo n.º 6
0
def test_load_absent():
    scenario = Scenario('absent', REPO, db=DB)
    scenario.load()
    assert_equals(scenario.exists, False)
Ejemplo n.º 7
0
def test_delete():
    scenario = Scenario('some_random_name_for_present', REPO, db=DB)
    result = scenario.delete()
    assert_equals(result, True)
Ejemplo n.º 8
0
def test_dump():
    scenario = Scenario('another_random_name_for_present', REPO)
    result = scenario.dump()
    assert_equals(type(result), dict)
Ejemplo n.º 9
0
def test_load_present():
    scenario = Scenario('some_random_name_for_present', REPO, db=DB)
    scenario.load()
    assert_equals(scenario.data, 42)
Ejemplo n.º 10
0
def test_save():
    scenario = Scenario('some_random_name_for_present', REPO, db=DB)
    scenario.load()
    scenario.data = 42
    result = scenario.save()
    assert_equals(type(result), dict)
Ejemplo n.º 11
0
from nose.tools import assert_equals
from tests.common import DB, PREFIX

from lib.repository import Repository
from lib.scenario import Scenario
from lib.job import Job


REPO = Repository('another_random_repo_name', db=DB)
REPO.save()

SCENARIO = Scenario('random_scenario', REPO, db=DB)
SCENARIO.data = '#!/bin/bash\nsleep 1\necho done'
SCENARIO.save()


def test_load_absent():
    job = Job('missing_name', REPO, SCENARIO, db=DB)
    job.load()
    assert_equals(job.exists, False)

def test_save():
    job = Job('present_name', REPO, SCENARIO, db=DB)
    job.attribute = 51
    result = job.save()
    assert_equals(type(result), dict)

def test_load_present():
    job = Job('present_name', REPO, SCENARIO, db=DB)
    job.load()
    assert_equals(job.attribute, 51)
Ejemplo n.º 12
0
 def test_scenario(self):
     Xvfb.start()
     self.up_s = Scenario(self.opts.scenario)
     self.up_s.build(self.opts.cookies)
     self.up_s.load()
Ejemplo n.º 13
0
    'P15 (' + str(get_planet_temperature_by_position(15)) + '°)',
    SERVER_ECONOMY_SPEED, [], p15_playstyle)
p1 = Planet('PM', 188, get_planet_temperature_by_position(15), 15, mizerable)
mizerable.planets.append(p1)

p14_playstyle = PlayStyle('P14 playstyle', 14, False, 7)
microwave = Account(
    'P14 (' + str(get_planet_temperature_by_position(14)) + '°)',
    SERVER_ECONOMY_SPEED, [], p14_playstyle)
p2 = Planet('PM', 188, get_planet_temperature_by_position(14), 14, microwave)
microwave.planets.append(p2)

p13_playstyle = PlayStyle('P13 playstyle', 13, False, 7)
p14acc = Account('P13 (' + str(get_planet_temperature_by_position(13)) + '°)',
                 SERVER_ECONOMY_SPEED, [], p13_playstyle)
p3 = Planet('PM', 188, get_planet_temperature_by_position(13), 13, p14acc)
p14acc.planets.append(p3)

p10_playstyle = PlayStyle('P10 playstyle', 10, False, 7)
miz = Account('P10 (' + str(get_planet_temperature_by_position(10)) + '°)',
              SERVER_ECONOMY_SPEED, [], p10_playstyle)
p4 = Planet('PM', 188, get_planet_temperature_by_position(10), 10, miz)
miz.planets.append(p4)

accounts = [mizerable, microwave, p14acc, miz]

scenario = Scenario('P15 vs P14 vs P10', accounts, SIMULATION_DAY_LENGTH)
scenario.run()

renderer = Renderer()
renderer.display_scenario(scenario)
Ejemplo n.º 14
0
class Report:
    """
    Class Report is used to collect results of benchmarking
        and to create final report.
    """
    def __init__(self,
                 directory='report',
                 report='report.md',
                 timeplots='timeplots.md'):
        self.directory = directory  # set output directory for report files
        self.main_file = "{0}/{1}".format(self.directory, report)
        self.timeplots_collection = "{0}/{1}".format(self.directory, timeplots)

        if not os.path.exists(self.directory):
            os.makedirs(self.directory)

        with open(self.main_file, 'w+') as outfile:
            outfile.write("# Benchmark report\n")
            outfile.write(
                "[Timeplot collection is here]({0})\n".format(timeplots))

        with open(self.timeplots_collection, 'w+') as outfile:
            outfile.write("# Timeplot collection report\n")
            outfile.write("[Main report in here]({0}) \n\n".format(report))

        self.reports_added = 0  # keep track of added reports

        self.timeplots_added = 0  # keep track of number of timeplots added

        self.scenario = Scenario()

    def fetch_benchmark_output(self, input_file):
        """ Fetch new results from @input_file """

        self.scenario.load_result(input_file)
        filter_dict(self.scenario.scenario, FILTER_KEYS)
        self.aggregate()

    def init_aggregator(self, benchmark=None):
        self.aggregator = Aggregator(benchmark)

    def aggregate(self, benchmark=None):
        """" Init iterator for benchmark scenarios """

        #self.aggregator = Aggregator(benchmark)
        th = self._get_throughput()  # calculate throughput

        if self.aggregator.throughput:
            self.aggregator.throughput[-1].append(th)
        else:
            self.aggregator.throughput.append(th)

    def _get_throughput(self):
        """ Calculate throughput of the benchmark """
        throughput = 0
        for result in self.scenario.results:
            # get duration of the benchmarking
            try:
                duration = float(result['duration'])
            except:
                raise InvalidBenchmarkResult('duration format is not valid')

            # number of operations in the benchmarking
            try:
                count = int(result['count'])
            except:
                raise InvalidBenchmarkResult(
                    'count is not given, or format is not int')

            # get size of each value to calculate the throughput
            try:
                value_size = int(self.scenario.bench_config['value_size'])
            except:
                raise InvalidBenchmarkResult(
                    'value size is not given, or format is not int')

            throughput += count * value_size / duration / len(
                self.scenario.results)

        return int(throughput)

    def add_aggregation(self):
        # count reports added to a report file
        self.reports_added += 1

        fig_name = 'fig' + str(self.reports_added) + '.png'

        # filter results form scenario config before dumping to the report

        with open(self.main_file, 'a+') as outfile:
            # refer the figure in the report
            outfile.write("\n # Report {0} \n".format(str(self.reports_added)))

            # add benchmark config
            outfile.write('**Benchmark config:** \n')
            outfile.write('```yaml \n')
            yaml.dump(self.scenario.config, outfile, default_flow_style=False)
            outfile.write('\n```')

        # check if more then one output was collected
        if sum(map(len, self.aggregator.throughput)) > 1:
            # create a bar plot
            self._bar_plot(fig_name)

            # incerst bar plot to the report
            with open(self.main_file, 'a+') as outfile:
                outfile.write(
                    "\n![Fig: throughput vs parameter]({0})".format(fig_name))

        # add the table of the data sets
        self._add_table()

    @staticmethod
    def humanize_bitrate(value_in_bytes):
        ''' Shortens large values of bitrate to KiB/s or MiB/s '''

        byte, kbyte, mbyte = 'Byte/s', 'KiB/s', 'MiB/s',
        dim = {byte: 1, kbyte: 1024, mbyte: 1048576}
        if value_in_bytes > dim[mbyte]:
            return value_in_bytes / dim[mbyte], dim[mbyte], mbyte

        if value_in_bytes > dim[kbyte]:
            return value_in_bytes / dim[kbyte], dim[kbyte], kbyte

        return value_in_bytes, dim[byte], byte

    def humanize_bytes(self, key, value):
        if key in BYTE_KEYS:
            return humanize.naturalsize(value, binary=True)
        return value

    def _bar_plot(self, fig_name):
        # define range  from prime parameter
        prime_parameter = re.sub('[\n|...]', '',
                                 yaml.dump(self.aggregator.benchmark.prime.id))
        second_parameter = re.sub(
            '[\n|...]', '', yaml.dump(self.aggregator.benchmark.second.id))

        ticks_labels = self.aggregator.benchmark.prime.range
        ticks_labels = [
            self.humanize_bytes(prime_parameter, tick) for tick in ticks_labels
        ]

        # af first results are plot vs counting number of samples
        rng = [i for i, tmp in enumerate(ticks_labels)]

        # number of data sets combined in the figure
        if len(self.aggregator.throughput) == 0:
            raise InvalidBenchmarkResult("results are empty")

        max_throughput = 0
        for thr in self.aggregator.throughput:
            try:
                max_throughput = max(max_throughput, max(thr))
            except TypeError:
                max_throughput = max(self.aggregator.throughput)
        max_throughput, reduce_times, dim_name = self.humanize_bitrate(
            max_throughput)

        # figure settings
        n_plots = len(
            self.aggregator.throughput[0])  # number of plots in the figure
        n_samples = len(rng)  # number of samples for each data set
        width = rng[-1] / (n_samples * n_plots + 1)  # bar width
        gap = width / 10  # gap between bars
        diff_y = 0.1  # minimal relative difference in throughput between neighboring bars

        # create figure
        fig, ax = plt.subplots()

        # limmit number of ticks to the number of samples
        plt.xticks(rng)

        # substitute tick labels
        ax.set_xticklabels(ticks_labels)

        # define color cycle
        ax.set_color_cycle(
            ['blue', 'red', 'green', 'yellow', 'black', 'brown'])

        ax.set_xlabel(prime_parameter)
        ax.set_ylabel('throughput, %s' % dim_name)

        # loop over data sets
        for i, th in enumerate(self.aggregator.throughput):
            # define plot label
            legend = " "
            if self.aggregator.benchmark.second.id:
                value = self.aggregator.benchmark.second.range[i]
                legend = "%s=%s" % (second_parameter,
                                    self.humanize_bytes(
                                        second_parameter, value))

            # add bar plot to the figure
            th_humanized = [t / reduce_times for t in th]
            ax.bar(rng, th_humanized, width, label=legend)

            # add space for a bar label on top of the plot
            plt.ylim(ymax=max_throughput * 1.3)

            lgd = ax.legend(loc='upper left', bbox_to_anchor=(1, 1))

            # add labels to bars
            for j, v in enumerate(th):
                text_x = rng[j]
                text_y = max(0, v / reduce_times)
                va = 'bottom'  # alinement of the bar label
                ax.text(text_x,
                        text_y,
                        ' %s/s ' % humanize.naturalsize(v, gnu=True),
                        color='black',
                        fontweight='bold',
                        rotation=90,
                        ha='center',
                        va=va)

            # shift bars for the next plot
            rng = [x + gap + width for x in rng]

        # label axes
        plt.savefig(os.path.join(self.directory, fig_name),
                    bbox_extra_artists=(lgd, ),
                    bbox_inches='tight')
        plt.close()

    def _add_table(self):
        """ Add table with data """

        with open(self.main_file, 'a+') as outfile:
            # create a table
            prime_parameter = re.sub(
                '[\n|.]', '', yaml.dump(self.aggregator.benchmark.prime.id))
            second_parameter = re.sub(
                '[\n|.]', '', yaml.dump(self.aggregator.benchmark.second.id))
            outfile.write('\n ### Throughput: \n')

            # add titles to the columns
            row_title = '| %s | ' % prime_parameter
            row_line = '|---|'
            for item in self.aggregator.benchmark.second.range:
                if self.aggregator.benchmark.second.id:
                    if second_parameter in BYTE_KEYS:
                        item = humanize.naturalsize(item, binary=True)
                    row_title += '%s = %s |' % (second_parameter, item)
                else:
                    row_title += ' |'
                row_line += '---|'
            outfile.write("%s \n%s \n " % (row_title, row_line))
            # fill in the table
            for row, val in enumerate(self.aggregator.benchmark.prime.range):
                if prime_parameter in BYTE_KEYS:
                    val = humanize.naturalsize(val)
                row_values = '| %s |' % val
                for col, _ in enumerate(
                        self.aggregator.benchmark.second.range):
                    row_values += '%s/s |' % str(
                        humanize.naturalsize(
                            self.aggregator.throughput[col][row], binary=True))
                outfile.write('%s \n' % row_values)

    def add_timeplot(self):
        """ Add timeplots to the report """

        files = self._plot_per_interval()

        if files:
            with open(self.timeplots_collection, 'a+') as outfile:
                outfile.write("\n ## Timeplot %s \n" %
                              (str(self.timeplots_added)))
                outfile.write('\n**Config:**\n```yaml \n')

                yaml.dump(self.scenario.config,
                          outfile,
                          default_flow_style=False)
                yaml.dump({'results': self.scenario.results},
                          outfile,
                          default_flow_style=False)
                outfile.write('\n```')
                outfile.write("\n _____________ \n")
                for file in files:
                    outfile.write("\n![Fig]({0}) \n".format(file))

    def _plot_per_interval(self):
        """ Create timeplots """
        file_names = []  # list of the output files

        # time_unit_literal represents the time unit for aggregation of the results
        time_unit_literal = self.scenario.result_output
        time_unit = TIME_UNITS.get(time_unit_literal)

        for result in self.scenario.results:
            # duration of the benchmarking
            try:
                duration = float(result['duration'])
            except:
                raise InvalidBenchmarkResult('duration format is not valid')

            # per_interval represents number of opperations per time unit
            per_interval = result.get('perinterval')

            # plot number of operations vs time if per_interval is not empty
            if per_interval:
                # define time samples
                max_time = min(int(duration), len(per_interval))
                time_line = [i for i in range(time_unit, max_time + time_unit)]

                plt.figure()
                plt.plot(time_line,
                         per_interval[:len(time_line)],
                         'bo--',
                         label=self.timeplots_added)
                plt.xlabel('time, ' + time_unit_literal[4:])
                plt.ylabel('operations per ' + time_unit_literal[4:])

                # define file name of the figure
                file_name = 'plot_per_interval_{0}.png'.format(
                    str(self.timeplots_added))
                file = '{0}/{1}'.format(self.directory, file_name)

                # save figure to file
                plt.savefig(file)
                plt.close()

                # add the file name to the list of files
                file_names.append(file_name)

                # increment timeplot count
                self.timeplots_added += 1
        return file_names
Ejemplo n.º 15
0
 def scenario_load(self, scenario_name, repo):
     scenario = Scenario(scenario_name, repo=repo, db=self.db)
     scenario.load()
     return scenario