Exemple #1
0
    def initialize(self): 
        super(Radosbench, self).initialize()

        logger.info('Pausing for 60s for idle monitoring.')
        with monitoring.monitor("%s/idle_monitoring" % self.run_dir):
            time.sleep(60)

        common.sync_files('%s/*' % self.run_dir, self.out_dir)
Exemple #2
0
 def mkpools(self):
     with monitoring.monitor("%s/pool_monitoring" % self.run_dir):
         if self.pool_per_proc: # allow use of a separate storage pool per process
             for i in range(self.concurrent_procs):
                 for node in settings.getnodes('clients').split(','):
                     node = node.rpartition("@")[2]
                     self.cluster.rmpool('rados-bench-%s-%s' % (node, i), self.pool_profile)
                     self.cluster.mkpool('rados-bench-%s-%s' % (node, i), self.pool_profile, 'radosbench')
         else: # the default behavior is to use a single Ceph storage pool for all rados bench processes
             self.cluster.rmpool('rados-bench-cbt', self.pool_profile)
             self.cluster.mkpool('rados-bench-cbt', self.pool_profile, 'radosbench')
Exemple #3
0
    def _run(self, mode, run_dir, out_dir, max_objects, runtime):
        # We'll always drop caches for rados bench
        self.dropcaches()

        if self.concurrent_ops:
            concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops

        rados_version = self.get_rados_version()

        # Max Objects
        max_objects_str = ''
        if max_objects:
            if rados_version < 10:
                raise ValueError('max_objects not supported by rados_version < 10')
            max_objects_str = '--max-objects %s' % max_objects

        # Operation type 
        op_type = mode
        if mode == 'prefill':
            op_type = 'write'

        if op_type == 'write':
            op_size_str = '-b %s' % self.op_size
        else:
            op_size_str = ''  

        # Write to OMAP
        write_omap_str = ''
        if self.write_omap:
            if rados_version < 10:
                raise ValueError('write_omap not supported by rados_version < 10')
            write_omap_str = '--write-omap'

        run_dir = os.path.join(self.run_dir, run_dir)
        common.make_remote_dir(run_dir)

        # dump the cluster config
        self.cluster.dump_config(run_dir)

        # Run the backfill testing thread if requested (but not for prefill)
        if mode != 'prefill' and 'recovery_test' in self.cluster.config:
            recovery_callback = self.recovery_callback
            self.cluster.create_recovery_test(run_dir, recovery_callback)

        # Run rados bench
        with monitoring.monitor(run_dir) as monitor:
            logger.info('Running radosbench %s test.' % mode)
            ps = []
            for i in range(self.concurrent_procs):
                out_file = '%s/output.%s' % (run_dir, i)
                objecter_log = '%s/objecter.%s.log' % (run_dir, i)
                if self.pool_per_proc:
                    # support previous behavior of 1 storage pool per rados process
                    pool_name_cmd = 'rados-bench-`{fqdn_cmd}`-{i}'
                    pool_name = pool_name_cmd.format(fqdn_cmd=common.get_fqdn_cmd(), i=i)
                    run_name = ''
                else:
                    # default behavior is to use a single storage pool
                    pool_name = self.pool
                    run_name_fmt = '--run-name {object_set_id} `{fqdn_cmd}`-{i}'
                    run_name = run_name_fmt.format(
                        object_set_id=self.object_set_id,
                        fqdn_cmd=common.get_fqdn_cmd(),
                        i=i)
                rados_bench_cmd_fmt = \
                    '{cmd} -c {conf} -p {pool} bench {op_size_arg} {duration} ' \
                    '{op_type} {concurrent_ops_arg} {max_objects_arg} ' \
                    '{write_omap_arg} {run_name} --no-cleanup ' \
                    '2> {stderr} > {stdout}'
                rados_bench_cmd = rados_bench_cmd_fmt.format(
                    cmd=self.cmd_path_full,
                    conf=self.tmp_conf,
                    pool=pool_name,
                    op_size_arg=op_size_str,
                    duration=runtime,
                    op_type=op_type,
                    concurrent_ops_arg=concurrent_ops_str,
                    max_objects_arg=max_objects_str,
                    write_omap_arg=write_omap_str,
                    run_name=run_name,
                    stderr=objecter_log,
                    stdout=out_file)
                p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
                ps.append(p)
            for p in ps:
                p.wait()

        # If we were doing recovery, wait until it's done (but not for prefill).
        if mode != 'prefill' and 'recovery_test' in self.cluster.config:
            self.cluster.wait_recovery_done()

        # Finally, get the historic ops
        self.cluster.dump_historic_ops(run_dir)

        out_dir = os.path.join(self.out_dir, out_dir)
        common.sync_files('%s/*' % run_dir, out_dir)
        self.analyze(out_dir)
Exemple #4
0
import main
import monitoring
import onosCore
from uuid import uuid4

#database name
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///MIRAI4.db"
db = SQLAlchemy(app)


#multiprocessing call, checking module call, monitoring module call
t =  producer.producer()
checking = simpleChecking.check()
truechecking = main.mainclass()
monitor = monitoring.monitor()
clear = onosCore.test()
token = 1
from sqlalchemy import Column, Integer, String

#database class
class Mirai4(db.Model):
    #table name & components (creation)
    __tablename__ = 'mirai4'
    id = db.Column(Integer, primary_key=True)
    containerName = db.Column(String(120))
    result = db.Column(String(50) )
    code = db.Column(String(120))
    newcontainerName = db.Column(String(120))
    token = db.Column(String(120))
    Benchmark("evadeV-5-3", "examples/evade-monitoring.nm", "N=5,RADIUS=3", "Pmax=? [F<=12 \"crash\"]"),
    Benchmark("evadeV-6-3", "examples/evade-monitoring.nm", "N=6,RADIUS=3", "Pmax=? [F<=12 \"crash\"]"),
    Benchmark("refuelA-12-50", "examples/refuel.nm", "N=12,ENERGY=50", "Pmax=? [F<=12 \"empty\"]"),
    Benchmark("refuelB-12-50","examples/refuelB.nm", "N=12,ENERGY=50", "Pmax=? [F<=12 \"empty\"]")
]

if __name__ == "__main__":
    # Wait for termination, never crash.
    sp.set_settings(["--signal-timeout", "100000"])
    parser = argparse.ArgumentParser(description="Run experiments with premise.")
    parser.add_argument("--number-traces", default=50, type=int, help="How many traces to run")
    parser.add_argument("--trace-length", default=500, type=int, help="How long should the traces be?")
    parser.add_argument("--promptness-deadline", default=1000, type=int, help="How long may one iteration take at most?")
    parser.add_argument("--verbose", action='store_true', help="Enable extra output")
    args = parser.parse_args()

    nr_traces = args.number_traces
    trace_length = args.trace_length
    promtness_deadline = args.promptness_deadline # in ms
    configurations = [monitoring.UnfoldingOptions(exact_arithmetic=True),
                      monitoring.UnfoldingOptions(exact_arithmetic=False),
                      monitoring.ForwardFilteringOptions(exact_arithmetic=True, convex_hull_reduction=False),
                      monitoring.ForwardFilteringOptions(exact_arithmetic=True, convex_hull_reduction=True)]
    for benchmark in benchmarks:
        for config in configurations:
            print(f"Running {benchmark.name} with {str(config)}")
            try:
                monitoring.monitor(benchmark.modelpath, benchmark.risk_def, benchmark.constants, trace_length, config, verbose=args.verbose, promptness_deadline=promtness_deadline, simulator_seed=range(nr_traces), model_id=benchmark.name)
            except RuntimeWarning:
                print("Skipped (likely, the folder exists)")
                pass
Exemple #6
0
def main():
    parser = argparse.ArgumentParser(
        description="Run premise simulating from model.")
    methodgroup = parser.add_mutually_exclusive_group(required=True)
    methodgroup.add_argument('--filtering', action='store_true')
    methodgroup.add_argument('--unfolding', action='store_true')
    numericsgroup = parser.add_mutually_exclusive_group(required=True)
    numericsgroup.add_argument('--exact', action='store_true')
    numericsgroup.add_argument('--float', action='store_false', dest="exact")
    parser.add_argument("--name",
                        default="default_name",
                        help="Name for stat output")
    parser.add_argument("--model", required=True, help="Path to models")
    parser.add_argument("--constants",
                        default="",
                        type=str,
                        help="Constant definition string")
    parser.add_argument("--risk",
                        required=True,
                        help="Property defining the state risk")
    parser.add_argument("--number-traces",
                        default=50,
                        type=int,
                        help="How many traces to run")
    parser.add_argument("--trace-length",
                        default=500,
                        type=int,
                        help="How long should the traces be?")
    parser.add_argument("--promptness-deadline",
                        default=1000,
                        type=int,
                        help="How long may one iteration take at most?")
    parser.add_argument("--verbose",
                        action='store_true',
                        help="Enable extra output")
    parser.add_argument("--seed",
                        help="Set a random seed for reproducible experiments")
    parser.add_argument("--no-convexhull-reduction",
                        help="Disable Convexhull Reduction",
                        dest="convexhull",
                        action='store_false')
    args = parser.parse_args()

    trace_length = args.trace_length
    promptness_deadline = args.promptness_deadline  # in ms
    if args.seed is None:
        seed = [random.getrandbits(64) for _ in range(args.number_traces)]
    else:
        random.seed(args.seed)
        seed = [random.getrandbits(64) for _ in range(args.number_traces)]

    if args.filtering:
        options = monitoring.ForwardFilteringOptions(
            exact_arithmetic=args.exact, convex_hull_reduction=args.convexhull)
    elif args.unfolding:
        options = monitoring.UnfoldingOptions(exact_arithmetic=args.exact)
    else:
        RuntimeError("Unknown method!")

    monitoring.monitor(args.model, args.risk, args.constants, trace_length,
                       options, args.verbose, seed, promptness_deadline,
                       args.name)