Пример #1
0
def detect_interlacing(path):
    a = Invocation('ffmpeg -vf idet -vframes 100 -an -f rawvideo -y /dev/null -i {0}')
    a(path)
    a.run()
    if a.returncode or a.exited:
        return False
    result = a.stdout[1].split('\n')
    for line in result:
        if line.startswith('[Parsed_idet_'):
            match = re.search('TFF:([0-9]+) BFF:([0-9]+) Progressive:([0-9]+) Undetermined:([0-9]+)', line)
            if match == None:
                return False
            tff = float(match.group(1))
            bff = float(match.group(2))
            progressive = float(match.group(3))
            undetermined = float(match.group(4))
            total = tff + bff + progressive + undetermined
            if total == 0:
                return False
            tff = tff / total
            bff = bff / total
            progressive = progressive / total
            undetermined = undetermined / total
            if undetermined < 0.05 and progressive < 0.8:
                if tff >= 0.8 or bff >= 0.8:
                    # It's probably interlaced.
                    return True
    return False
Пример #2
0
def detect_plaintext(path):
    a = Invocation('file -b -e elf -e tar -e compress -e cdf -e apptype -i {0}')
    a(path)
    a.run()
    if a.returncode or a.exited:
        return None
    result = a.stdout[0]
    if result.startswith('text/x-') or result == 'text/plain':
        return {
            'type': result[:result.find(';')],
            'metadata': None,
            'processor_state': None,
            'flags': None
        }
    return None
Пример #3
0
def detect_imagemagick(path):
    a = Invocation('identify -verbose {0}')
    a(path)
    a.run()
    try:
        result = a.stdout[0].split('\n')
        # Get mime type and dimensions
        mimetype = None
        metadata = None
        for line in result:
            line = line.lstrip(' ')
            if line.startswith('Mime type: '):
                mimetype = line[11:]
            match = re.search('(\d+)x(\d+)', line)
            if line.startswith('Geometry: '):
                metadata = { 'dimensions': { 'width': int(match.group(1)), 'height': int(match.group(2)) } }
        if mimetype in [ 'image/png', 'image/jpeg', 'image/svg+xml' ]:
            return {
                'type': mimetype,
                'metadata': metadata,
                'processor_state': None,
                'flags': None
            }
        # Check for other formats
        for line in result:
            line = line.lstrip(' ')
            if line == 'Format: XCF (GIMP image)':
                return {
                    'type': 'image/x-gimp-xcf',
                    'metadata': metadata,
                    'processor_state': None,
                    'flags': None
                }

        return {
            'type': 'image',
            'metadata': metadata,
            'processor_state': None,
            'flags': None
        }
    except:
        return None
Пример #4
0
def get_invocations(benchmark: Benchmark):
    """
    Returns a list of invocations that invoke the tool for the given benchmark.
    It can be assumed that the current directory is the directory from which execute_invocations.py is executed.
    For QCOMP 2020, this should return a list of invocations for all tracks in which the tool can take part. For each track an invocation with default settings has to be provided and in addition, an optimized setting (e.g., the fastest engine and/or solution technique for this benchmark) can be specified. Only information about the model type, the property type and the state space size are allowed to be used to tweak the parameters.
   
    If this benchmark is not supported, an empty list has to be returned.
    """
    # decide whether we want to use storm-dft (which supports galileo fault trees without repair)
    use_storm_dft = False
    if benchmark.is_galileo():
        has_repair = False
        for p in benchmark.get_file_parameters():
            if p["name"] == "R" and p["value"] == True:
                has_repair = True
        if not has_repair:
            use_storm_dft = True

    # Gather the precision settings for the corresponding track
    track_settings = dict()
    track_comments = dict()
    track_settings['correct'] = ' --exact '
    track_comments['correct'] = 'Use exact arithmethic with rationals.'
    track_settings[
        'floating-point-correct'] = ' --exact floats --general:precision 1e-20 '
    track_comments[
        'floating-point-correct'] = 'Use exact arithmethic with floats. The precision needs to be set to increase precision when printing the result to stdout '
    track_settings['epsilon-correct'] = ' --sound --precision 1e-6 '
    track_comments['epsilon-correct'] = 'Use sound model checking methods.'
    track_settings['probably-epsilon-correct'] = ' --sound --precision 5e-2 '
    track_comments['probably-epsilon-correct'] = 'Use sound model checking.'
    track_settings['often-epsilon-correct'] = ' --timebounded:precision 1e-3 '
    track_comments[
        'often-epsilon-correct'] = 'Use potentially unsound but fast solution methods. Use default precision (1e-6) everywhere except for timebounded queries, for which solution methods give epsilon guarantees.'
    track_settings[
        'often-epsilon-correct-10-min'] = ' --signal-timeout 60 --general:precision 1e-12 --gmm++:precision 1e-12 --native:precision 1e-12 --minmax:precision 1e-12 --timebounded:precision 1e-6 ' + (
            "" if use_storm_dft else "--lra:precision 1e-12 ")
    track_comments[
        'often-epsilon-correct-10-min'] = 'Only force termination 60 seconds after receiving SIGTERM. Use potentially unsound but fast solution methods. Take a high precision to make sure that we make use of the 10 minutes. Time bounded queries can not be answered that precisely due to numerics.'

    invocations = []

    for trackId in track_settings:

        if not is_benchmark_supported(benchmark, trackId):
            continue
        # Check whether this is a job for storm-dft
        if use_storm_dft:
            # We now have to obtain the correct property.
            # Unfortunately, this is necessary because the gallileo files do not contain any information of the property
            # The code below might easily break if we pick a different benchmark set
            benchmark_settings = "--dftfile {} ".format(
                benchmark.get_galileo_filename())
            if benchmark.is_time_bounded_probabilistic_reachability():
                time_bound = 1
                for p in benchmark.get_parameters():
                    if p["name"] == "TIME_BOUND":
                        time_bound = p["value"]
                benchmark_settings += "--timebound {} --max".format(time_bound)
            elif benchmark.is_unbounded_expected_time():
                benchmark_settings += "--expectedtime --min"

            benchmark_settings += track_settings[trackId]
            default_inv = Invocation()
            default_inv.track_id = trackId
            default_inv.identifier = "default"
            default_inv.note = "Use Storm-dft with the requested property. " + track_comments[
                trackId]
            default_inv.add_command(
                "~/storm/build/bin/storm-dft {}".format(benchmark_settings))
            invocations.append(default_inv)
            continue  # with next trackId

        # Gather options that are needed for this particular benchmark for any invocation of Storm
        preprocessing_steps = []
        benchmark_settings = ""
        if (benchmark.is_prism()
                or benchmark.is_prism_ma()) and not benchmark.is_pta():
            benchmark_settings = "--prism {} --prop {} {}".format(
                benchmark.get_prism_program_filename(),
                benchmark.get_prism_property_filename(),
                benchmark.get_property_name())
            if benchmark.get_open_parameter_def_string() != "":
                benchmark_settings += " --constants {}".format(
                    benchmark.get_open_parameter_def_string())
            if benchmark.is_ctmc():
                benchmark_settings += " --prismcompat"
        else:
            # For jani input, it might be the case that preprocessing is necessary using moconv
            moconv_options = []
            features = benchmark.get_jani_features()
            for f in [
                    "arrays", "derived-operators", "functions",
                    "state-exit-rewards"
            ]:
                if f in features: features.remove(f)
            if "nondet-selection" in features:
                moconv_options.append("--remove-disc-nondet")
                features.remove("nondet-selection")
            if len(features) != 0:
                print("Unsupported jani feature(s): {}".format(features))
            if benchmark.is_pta():
                moconv_options.append("--digital-clocks")
                if benchmark.get_model_short_name() == "wlan-large":
                    # This is actually a stochastic timed automaton. Distributions have to be unrolled first
                    moconv_options.append(" --unroll-distrs")
            if len(moconv_options) != 0:
                preprocessing_steps.append(
                    "~/modest/modest convert {} {} --output {} --overwrite".
                    format(benchmark.get_janifilename(),
                           " ".join(moconv_options),
                           "converted_" + benchmark.get_janifilename()))
                if benchmark.get_open_parameter_def_string() != "":
                    preprocessing_steps[-1] += " --experiment {}".format(
                        benchmark.get_open_parameter_def_string())
                benchmark_settings = "--jani {} --janiproperty {}".format(
                    "converted_" + benchmark.get_janifilename(),
                    benchmark.get_property_name())
            else:
                benchmark_settings = "--jani {} --janiproperty {}".format(
                    benchmark.get_janifilename(),
                    benchmark.get_property_name())
                if benchmark.get_open_parameter_def_string() != "":
                    benchmark_settings += " --constants {}".format(
                        benchmark.get_open_parameter_def_string())

        benchmark_settings += track_settings[trackId]
        benchmark_settings += " --ddlib sylvan --sylvan:maxmem 6114 --sylvan:threads 4"
        benchmark_comment = "Use sylvan as library for Dds, restricted to 6GB memory and 4 threads. " + track_comments[
            trackId]

        # default settings
        default_inv = Invocation()
        default_inv.identifier = "default"
        # default_inv.note = benchmark_comment
        default_inv.track_id = trackId
        for prep in preprocessing_steps:
            default_inv.add_command(prep)
        default_inv.add_command(
            "~/storm/build/bin/storm {}".format(benchmark_settings))
        invocations.append(default_inv)

        # specific settings
        # Storm-static selects for each benchmark the best config among sparse, hybrid, ddbisim and exact (same for all tracks)
        # We obtained this via previous experiments:
        best_configs = dict()
        best_configs["beb.4-8-7.LineSeized"] = "hybrid"
        best_configs["beb.5-16-15.LineSeized"] = "N/A"
        best_configs["bitcoin-attack.20-6.P_MWinMax"] = "ddbisim"
        best_configs["bluetooth.1.time"] = "ddbisim"
        best_configs["cabinets.3-2-true.Unavailability"] = "sparse"
        best_configs["cabinets.3-2-true.Unreliability"] = "ddbisim"
        best_configs["cluster.128-2000-20.premium_steady"] = "sparse"
        best_configs["cluster.128-2000-20.qos1"] = "hybrid"
        best_configs["cluster.64-2000-20.below_min"] = "hybrid"
        best_configs["consensus.4-4.disagree"] = "sparse"
        best_configs["consensus.4-4.steps_min"] = "sparse"
        best_configs["consensus.6-2.disagree"] = "ddbisim"
        best_configs["consensus.6-2.steps_min"] = "ddbisim"
        best_configs["coupon.15-4-5.collect_all_bounded"] = "ddbisim"
        best_configs["coupon.15-4-5.exp_draws"] = "ddbisim"
        best_configs["coupon.9-4-5.collect_all_bounded"] = "ddbisim"
        best_configs["coupon.9-4-5.exp_draws"] = "ddbisim"
        best_configs["crowds.5-20.positive"] = "ddbisim"
        best_configs["crowds.6-20.positive"] = "ddbisim"
        best_configs["csma.3-4.all_before_max"] = "hybrid"
        best_configs["csma.3-4.time_max"] = "hybrid"
        best_configs["csma.4-2.all_before_max"] = "hybrid"
        best_configs["csma.4-2.time_max"] = "hybrid"
        best_configs["dpm.4-8-5.PmaxQueuesFullBound"] = "N/A"
        best_configs["dpm.6-6-5.PminQueue1Full"] = "sparse"
        best_configs["eajs.5-250-11.ExpUtil"] = "ddbisim"
        best_configs["eajs.6-300-13.ExpUtil"] = "ddbisim"
        best_configs["echoring.100.MaxOffline1"] = "sparse"
        best_configs["egl.10-2.messagesB"] = "ddbisim"
        best_configs["egl.10-2.unfairA"] = "hybrid"
        best_configs["egl.10-8.messagesB"] = "ddbisim"
        best_configs["egl.10-8.unfairA"] = "hybrid"
        best_configs["elevators.b-11-9.goal"] = "sparse"
        best_configs["embedded.8-12.actuators"] = "exact"
        best_configs["embedded.8-12.up_time"] = "exact"
        best_configs["exploding-blocksworld.10.goal"] = "N/A"
        best_configs["firewire-pta.30-5000.eventually"] = "sparse"
        best_configs["firewire.false-36-800.deadline"] = "ddbisim"
        best_configs["fms.8.productivity"] = "N/A"
        best_configs["ftpp.2-2-true.Unavailability"] = "N/A"
        best_configs["ftwc.8-5.TimeMax"] = "sparse"
        best_configs["ftwc.8-5.TimeMin"] = "sparse"
        best_configs["haddad-monmege.100-0.7.exp_steps"] = "exact"
        best_configs["haddad-monmege.100-0.7.target"] = "exact"
        best_configs["hecs.false-1-1.Unreliability"] = "sparse"
        best_configs["hecs.false-2-2.Unreliability"] = "sparse"
        best_configs["hecs.false-3-2.Unreliability"] = "N/A"
        best_configs["herman.15.steps"] = "ddbisim"
        best_configs["kanban.5.throughput"] = "hybrid"
        best_configs["majority.2100.change_state"] = "sparse"
        best_configs["mapk_cascade.4-30.activated_time"] = "sparse"
        best_configs["mapk_cascade.4-30.reactions"] = "hybrid"
        best_configs["nand.40-4.reliable"] = "hybrid"
        best_configs["nand.60-4.reliable"] = "hybrid"
        best_configs[
            "oscillators.8-10-0.1-1-0.1-1.0.power_consumption"] = "sparse"
        best_configs["oscillators.8-10-0.1-1-0.1-1.0.time_to_synch"] = "sparse"
        best_configs["pacman.100.crash"] = "hybrid"
        best_configs["pacman.60.crash"] = "hybrid"
        best_configs["philosophers.16-1.MaxPrReachDeadlock"] = "hybrid"
        best_configs["philosophers.16-1.MaxPrReachDeadlockTB"] = "hybrid"
        best_configs["philosophers.16-1.MinExpTimeDeadlock"] = "hybrid"
        best_configs["philosophers.20-1.MaxPrReachDeadlock"] = "hybrid"
        best_configs["philosophers.20-1.MaxPrReachDeadlockTB"] = "N/A"
        best_configs["philosophers.20-1.MinExpTimeDeadlock"] = "N/A"
        best_configs["pnueli-zuck.10.live"] = "hybrid"
        best_configs["pnueli-zuck.5.live"] = "hybrid"
        best_configs["polling.18-16.s1_before_s2"] = "hybrid"
        best_configs["rabin.10.live"] = "hybrid"
        best_configs["readers-writers.40.exp_time_many_requests"] = "sparse"
        best_configs["readers-writers.40.prtb_many_requests"] = "hybrid"
        best_configs["rectangle-tireworld.11.goal"] = "exact"
        best_configs["resource-gathering.1300-100-100.expgold"] = "ddbisim"
        best_configs["resource-gathering.1300-100-100.expsteps"] = "sparse"
        best_configs["resource-gathering.1300-100-100.prgoldgem"] = "hybrid"
        best_configs["sms.3-true.Unavailability"] = "hybrid"
        best_configs["sms.3-true.Unreliability"] = "sparse"
        best_configs["speed-ind.2100.change_state"] = "sparse"
        best_configs["stream.1000.exp_buffertime"] = "sparse"
        best_configs["stream.1000.pr_underrun"] = "sparse"
        best_configs["stream.1000.pr_underrun_tb"] = "sparse"
        best_configs["tireworld.45.goal"] = "N/A"
        best_configs["triangle-tireworld.441.goal"] = "N/A"
        best_configs["vgs.5-10000.MaxPrReachFailedTB"] = "N/A"
        best_configs["vgs.5-10000.MinExpTimeFailed"] = "N/A"
        best_configs["wlan-large.2.E_or"] = "sparse"
        best_configs["wlan-large.2.P_max"] = "sparse"
        best_configs["wlan.4-0.cost_min"] = "hybrid"
        best_configs["wlan.4-0.sent"] = "hybrid"
        best_configs["wlan.5-0.cost_min"] = "hybrid"
        best_configs["wlan.5-0.sent"] = "hybrid"
        best_configs["wlan.6-0.cost_min"] = "hybrid"
        best_configs["wlan.6-0.sent"] = "hybrid"
        best_configs["zenotravel.4-2-2.goal"] = "hybrid"
        best_configs["zeroconf-pta.200.incorrect"] = "exact"
        best_configs["zeroconf.1000-8-false.correct_max"] = "sparse"
        best_configs["zeroconf.1000-8-false.correct_min"] = "sparse"

        try:
            config = best_configs[benchmark.get_identifier()]
        except KeyError:
            print(
                "Unable to find best config for {}. Is this a new benchmark?".
                format(benchmark.get_identifier()))
            config = "N/A"

        if config in ["N/A", "sparse"]:
            # This is like the default config and thus does not need a rerun
            continue
        elif config == "hybrid":
            benchmark_settings += " --engine hybrid"
        elif config == "ddbisim":
            benchmark_settings += " --engine dd-to-sparse --bisimulation"
        elif config == "exact":
            if trackId in ["correct", "floating-point-correct"]:
                benchmark_settings += " --engine sparse"
            else:
                benchmark_settings += " --engine sparse --exact"
        else:
            assert False, "Unhandled config"

        specific_inv = Invocation()
        specific_inv.identifier = "specific"
        specific_inv.track_id = trackId
        for prep in preprocessing_steps:
            specific_inv.add_command(prep)
        specific_inv.add_command(
            "~/storm/build/bin/storm {}".format(benchmark_settings))
        invocations.append(specific_inv)

    return invocations
Пример #5
0
def get_invocations(benchmark : Benchmark):
    """
    Returns a list of invocations that invoke the tool for the given benchmark.
    It can be assumed that the current directory is the directory from which execute_invocations.py is executed.
    For QCOMP 2020, this should return a list of invocations for all tracks in which the tool can take part. For each track an invocation with default settings has to be provided and in addition, an optimized setting (e.g., the fastest engine and/or solution technique for this benchmark) can be specified. Only information about the model type, the property type and the state space size are allowed to be used to tweak the parameters.
   
    If this benchmark is not supported, an empty list has to be returned.
    """

    if not is_benchmark_supported(benchmark):
        return []

    # Gather options that are needed for this particular benchmark for any invocation of Storm
    preprocessing_steps = []
    benchmark_settings = ""
    if benchmark.is_prism() or benchmark.is_prism_ma() and not benchmark.is_pta():
        benchmark_settings = "{} {}".format(benchmark.get_prism_program_filename(), benchmark.get_prism_property_filename(), benchmark.get_property_name())
        if benchmark.get_open_parameter_def_string() != "":
            benchmark_settings += " -const {}".format(benchmark.get_open_parameter_def_string())
    else:
        # For jani input, it might be the case that preprocessing is necessary using moconv
        moconv_options = []
        features = benchmark.get_jani_features()
        for f in ["arrays", "derived-operators", "functions", "state-exit-rewards"]:
            if f in features: features.remove(f)
        if "nondet-selection" in features:
            moconv_options.append("--remove-disc-nondet")
            features.remove("nondet-selection")
        if len(features) != 0:
            print("Unsupported jani feature(s): {}".format(features))
        if benchmark.is_pta():
            moconv_options.append("--digital-clocks")

        if len(moconv_options) != 0:
            preprocessing_steps.append("mono /modest/moconv.exe {} {} --output {} --overwrite".format(benchmark.get_janifilename(), " ".join(moconv_options), "converted_" + benchmark.get_janifilename()))
            if benchmark.get_open_parameter_def_string() != "":
                preprocessing_steps[-1] += " --experiment {}".format(benchmark.get_open_parameter_def_string())
            benchmark_settings = "--jani {} --janiproperty {}".format("converted_" + benchmark.get_janifilename(), benchmark.get_property_name())
        else:
            benchmark_settings = "--jani {} --janiproperty {}".format(benchmark.get_janifilename(), benchmark.get_property_name())
            if benchmark.get_open_parameter_def_string() != "":
                benchmark_settings += " --constants {}".format(benchmark.get_open_parameter_def_string())

    invocations = []


    # default settings
    default_inv = Invocation()
    default_inv.identifier = "default"
    default_inv.track_id = "epsilon-correct"
    if len(preprocessing_steps) != 0:
        for prep in preprocessing_steps:
            default_inv.add_command(prep)
    default_inv.add_command("~/stamina/stamina/bin/stamina {}".format(benchmark_settings))
    invocations.append(default_inv)

    # specific settings                     !!!!only information about model type, property type and state space size via benchmark.get_num_states_tweak() may be used for tweaking
    specific_inv = Invocation()
    specific_inv.identifier = "specific"
    specific_inv.track_id = "epsilon-correct"
    if len(preprocessing_steps) != 0:
        for prep in preprocessing_steps:
            specific_inv.add_command(prep)
    specific_inv.add_command("~/stamina/stamina/bin/stamina {}".format(benchmark_settings))
    invocations.append(specific_inv)

    #### TODO: add default and specific invocations for other track_ids 'correct', 'probably-epsilon-correct', 'often-epsilon-correct', 'often-epsilon-correct-10-min'
    ### remember that different tracks have different precisions

    return invocations
Пример #6
0
def get_invocations(benchmark: Benchmark):
    """
    Returns a list of invocations that invoke the tool for the given benchmark.
    It can be assumed that the current directory is the directory from which execute_invocations.py is executed.
    For QCOMP 2020, this should return a list of invocations for all tracks in which the tool can take part. For each track an invocation with default settings has to be provided and in addition, an optimized setting (e.g., the fastest engine and/or solution technique for this benchmark) can be specified. Only information about the model type, the property type and the state space size are allowed to be used to tweak the parameters.
   
    If this benchmark is not supported, an empty list has to be returned.
    """

    if not is_benchmark_supported(benchmark):
        return []

    # Gather options that are needed for this particular benchmark for any invocation of Storm
    preprocessing_steps = []
    benchmark_settings = ""
    epsilon = "1e-3"
    if benchmark.is_prism():
        # set parameters
        # --graphsolver-iterative-tolerance 1e-4 since the maximal difference is 1e-4
        benchmark_settings = "--model-input-files {} --model-input-type prism --property-input-files {} --property-input-names {} --translate-messages false --value-floating-point-output-native true --graphsolver-iterative-stop-criterion relative --graphsolver-iterative-tolerance {}".format(
            benchmark.get_prism_program_filename(),
            benchmark.get_prism_property_filename(),
            benchmark.get_property_name(), epsilon)
    else:
        # put properties in saparate files
        benchmark_settings = "--model-input-files {} --model-input-type jani --property-input-names {} --translate-messages false --value-floating-point-output-native true --graphsolver-iterative-stop-criterion relative --graphsolver-iterative-tolerance {}".format(
            benchmark.get_janifilename(), benchmark.get_property_name(),
            epsilon)
    if benchmark.get_open_parameter_def_string() != "":
        benchmark_settings += " --const {}".format(
            benchmark.get_open_parameter_def_string())

    memsize = "10240m"
    invocations = []

    # default settings
    default_inv = Invocation()
    default_inv.identifier = "default"
    default_inv.track_id = "often-epsilon-correct"
    if len(preprocessing_steps) != 0:
        for prep in preprocessing_steps:
            default_inv.add_command(prep)
    default_inv.add_command(
        "java -Xms{} -Xmx{} -jar ./epmc-standard.jar check {}".format(
            memsize, memsize, benchmark_settings))
    invocations.append(default_inv)

    #if (benchmark.is_ctmc() or benchmark.is_dtmc()):
    #for tId in ["floating-point-correct", "epsilon-correct", "often-epsilon-correct"]:
    ## specific settings                     !!!!only information about model type, property type and state space size via benchmark.get_num_states_tweak() may be used for tweaking
    #specific_inv = Invocation()
    #specific_inv.identifier = "specific"
    #specific_inv.track_id = tId
    #if len(preprocessing_steps) != 0:
    #for prep in preprocessing_steps:
    #specific_inv.add_command(prep)
    #if tId == "floating-point-correct":
    #epsilon = "1e-14"
    #if tId == "epsilon-correct":
    #epsilon = "1e-6"
    #if tId == "often-epsilon-correct":
    #epsilon = "1e-3"
    #specific_inv.add_command("java -Xms{} -Xmx{} -jar ./epmc-qcomp.jar check {} --graph-solver-stopping-criterion relative --graphsolver-iterative-tolerance {} --engine on-the-fly-eliminator".format(memsize, memsize, benchmark_settings, epsilon))
    #invocations.append(specific_inv)

    #### TODO: add default and specific invocations for other track_ids 'correct', 'floating-point-correct', 'probably-epsilon-correct', 'often-epsilon-correct', 'often-epsilon-correct-10-min'
    ### remember that different tracks have different precisions

    return invocations
Пример #7
0
def detect_ffprobe(path):
    a = Invocation('ffprobe -print_format json -loglevel quiet -show_format -show_streams {0}')
    a(path)
    a.run()
    if a.returncode or a.exited:
        return None
    result = json.loads(a.stdout[0])

    audio_streams = 0
    video_streams = 0
    image_streams = 0
    subtitle_streams = 0
    font_streams = 0
    # We shouldn't penalize people for unknown streams, I just figured we could make a note of it
    unknown_streams = 0

    metadata = dict()
    state = dict()
    flags = dict()
    state['streams'] = list()
    index = 0

    for stream in result["streams"]:
        s = detect_stream(stream)
        if s == None:
            continue
        # Set up some metadata
        if s['metadata'] != None:
            if 'duration' in s['metadata']:
                metadata['duration'] = s['metadata']['duration']
            if 'dimensions' in s['metadata']:
                metadata['dimensions'] = s['metadata']['dimensions']
        t = s['type']
        if not s or not t:
            unknown_streams += 1
        else:
            state['streams'].append({
                'type': t,
                'info': s['processor_state'],
                'index': index
            })
            if t.startswith('image'):
                image_streams += 1
            elif t == 'video':
                video_streams += 1
                flags = s['flags']
            elif t == 'audio':
                audio_streams += 1
            elif t == 'subtitle':
                subtitle_streams += 1
            elif t == 'font':
                font_streams += 1
            else:
                unknown_streams += 1
        index += 1
    metadata = ffprobe_addExtraMetadata(metadata, result)
    if audio_streams == 1 and video_streams == 0:
        metadata['has_audio'] = True
        metadata['has_video'] = False
        state['has_audio'] = True
        state['has_video'] = False
        return {
            'type': 'audio',
            'processor_state': state,
            'metadata': metadata,
            'flags': None
        }
    if video_streams > 0:
        metadata['has_audio'] = audio_streams > 0
        metadata['has_video'] = True
        metadata['has_subtitles'] = subtitle_streams > 0
        state['has_audio'] = audio_streams > 0
        state['has_video'] = True
        state['has_fonts'] = font_streams > 0
        state['has_subtitles'] = subtitle_streams > 0
        if subtitle_streams > 0:
            metadata = addSubtitleInfo(metadata, state)
        if detect_interlacing(path):
            state['interlaced'] = True
        return {
            'type': 'video',
            'processor_state': state,
            'metadata': metadata,
            'flags': flags
        }
    return None
Пример #8
0
def get_invocations(benchmark: Benchmark):
    """
    Returns a list of invocations that invoke the tool for the given benchmark.
    It can be assumed that the current directory is the directory from which execute_invocations.py is executed.
    For QCOMP 2020, this should return a list of invocations for all tracks in which the tool can take part. For each track an invocation with default settings has to be provided and in addition, an optimized setting (e.g., the fastest engine and/or solution technique for this benchmark) can be specified. Only information about the model type, the property type and the state space size are allowed to be used to tweak the parameters.
   
    If this benchmark is not supported, an empty list has to be returned.
    """

    if not is_benchmark_supported(benchmark):
        return []

    # Gather options that are needed for this particular benchmark for any invocation of PRISM
    benchmark_instance = get_prism_invocation_model_prop_instance(benchmark)

    invocations = []

    basic_args = "{}".format(prism_mem_args)

    # epsilon-correct (all models but PTAs), default settings
    if (benchmark.get_model_type() != "pta"):
        # Use interval iteration generally (or uniformisation for time-bounded CTMCs)
        default_args = "-ii"
        # Choose engine heuristically
        default_args += " -heuristic speed"
        # Required precision (default anyway)
        default_args += " -e 1e-6"
        # Usual II settings when there is plenty of memory
        default_args += " -ddextraactionvars 100"
        # Increase maxiters (since QComp has a timeout anyway)
        default_args += " -maxiters 1000000"
        default_inv = Invocation()
        default_inv.identifier = "default"
        default_inv.track_id = "epsilon-correct"
        default_inv.add_command(prism_bin + " " + basic_args + " " +
                                default_args + " " + benchmark_instance)
        invocations.append(default_inv)

    # epsilon-correct (all models but PTAs), specific settings
    if (benchmark.get_model_type() != "pta"):
        # Choose method/engine
        # Use interval iteration generally (or uniformisation for time-bounded CTMCs)
        if benchmark.get_model_short_name() == "haddad-monmege":
            specific_args = "-exact"
        elif (benchmark.get_num_states_tweak() == None
              or benchmark.get_num_states_tweak() >= 20000000):
            specific_args = "-ii -mtbdd"
        else:
            specific_args = "-ii -heuristic speed"
        # Required precision (default anyway)
        specific_args += " -e 1e-6"
        # Usual II settings when there is plenty of memory
        specific_args += " -ddextraactionvars 100"
        # Increase maxiters (since QComp has a timeout anyway)
        specific_args += " -maxiters 1000000"
        specific_inv = Invocation()
        specific_inv.identifier = "specific"
        specific_inv.track_id = "epsilon-correct"
        specific_inv.add_command(prism_bin + " " + basic_args + " " +
                                 specific_args + " " + benchmark_instance)
        invocations.append(specific_inv)

    # often-epsilon-correct (all models), default settings
    if (True):
        # Choose engine heuristically
        default_args = "-heuristic speed"
        # Required precision (just use default 1e-6, as agreed for QComp'19)
        default_args += " -e 1e-6"
        # Increase maxiters (since QComp has a timeout anyway)
        default_args += " -maxiters 1000000"
        default_inv = Invocation()
        default_inv.identifier = "default"
        default_inv.track_id = "often-epsilon-correct"
        default_inv.add_command(prism_bin + " " + basic_args + " " +
                                default_args + " " + benchmark_instance)
        invocations.append(default_inv)

    # often-epsilon-correct (all models), specific settings
    if (True):
        # Choose method/engine
        if benchmark.get_model_short_name() == "haddad-monmege":
            specific_args = "-exact"
        elif (benchmark.get_num_states_tweak() == None
              or benchmark.get_num_states_tweak() >= 20000000):
            specific_args = "-mtbdd"
        else:
            specific_args = "-heuristic speed"
        # Required precision (just use default 1e-6, as agreed for QComp'19)
        specific_args += " -e 1e-6"
        # Increase maxiters (since QComp has a timeout anyway)
        specific_args += " -maxiters 1000000"
        specific_inv = Invocation()
        specific_inv.identifier = "specific"
        specific_inv.track_id = "often-epsilon-correct"
        specific_inv.add_command(prism_bin + " " + basic_args + " " +
                                 specific_args + " " + benchmark_instance)
        invocations.append(specific_inv)

    # probably-epsilon-correct (all models but PTAs), default settings
    if (benchmark.get_model_type() != "pta"):
        # Use interval iteration generally (or uniformisation for time-bounded CTMCs)
        if (benchmark.get_model_type() == "ctmc"
                and benchmark.is_time_bounded_probabilistic_reachability()):
            default_args = ""
        else:
            default_args = "-ii -e 5e-2"
        # Choose engine heuristically
        default_args += " -heuristic speed"
        # Usual II settings when there is plenty of memory
        default_args += " -ddextraactionvars 100"
        # Increase maxiters (since QComp has a timeout anyway)
        default_args += " -maxiters 1000000"
        default_inv = Invocation()
        default_inv.identifier = "default"
        default_inv.track_id = "probably-epsilon-correct"
        default_inv.add_command(prism_bin + " " + basic_args + " " +
                                default_args + " " + benchmark_instance)
        invocations.append(default_inv)

    # probably-epsilon-correct (all models but PTAs), specific settings
    if (benchmark.get_model_type() != "pta"):
        # Choose method/engine
        # Use interval iteration generally (or uniformisation for time-bounded CTMCs)
        if benchmark.get_model_short_name() == "haddad-monmege":
            specific_args = "-exact"
        elif (benchmark.get_model_type() == "ctmc"
              and benchmark.is_time_bounded_probabilistic_reachability()):
            specific_args = ""
        elif (benchmark.get_num_states_tweak() == None
              or benchmark.get_num_states_tweak() >= 20000000):
            specific_args = "-ii -e 5e-2 -mtbdd"
        else:
            specific_args = "-ii -e 5e-2 -heuristic speed"
        # Usual II settings when there is plenty of memory
        specific_args += " -ddextraactionvars 100"
        # Increase maxiters (since QComp has a timeout anyway)
        specific_args += " -maxiters 1000000"
        specific_inv = Invocation()
        specific_inv.identifier = "specific"
        specific_inv.track_id = "probably-epsilon-correct"
        specific_inv.add_command(prism_bin + " " + basic_args + " " +
                                 specific_args + " " + benchmark_instance)
        invocations.append(specific_inv)

    return invocations
Пример #9
0
def add_invocations(invocations, track_id, default_cmd):
    default_inv = Invocation()
    default_inv.identifier = "default"
    default_inv.track_id = track_id
    default_inv.add_command(default_cmd)
    invocations.append(default_inv)
Пример #10
0
def get_invocations(benchmark: Benchmark):
    """
    Returns a list of invocations that invoke the tool for the given benchmark.
    It can be assumed that the current directory is the directory from which execute_invocations.py is executed.
    For QCOMP 2020, this should return a list of invocations for all tracks in which the tool can take part. For each track an invocation with default settings has to be provided and in addition, an optimized setting (e.g., the fastest engine and/or solution technique for this benchmark) can be specified. Only information about the model type, the property type and the state space size are allowed to be used to tweak the parameters.

    If this benchmark is not supported, an empty list has to be returned.
    """

    if not is_benchmark_supported(benchmark):
        return []

    prec = dict()
    prec["epsilon-correct"] = "0.000001"
    prec["probably-epsilon-correct"] = "0.05"
    prec["often-epsilon-correct"] = "0.001"
    prec["often-epsilon-correct-10-min"] = "0.001"

    result = []

    for track in prec.keys():

        benchmark_settings = "./pet.sh reachability --precision {} --relative-error --only-result -m {} -p {} --property {}".format(
            prec[track],
            benchmark.get_prism_program_filename(),
            benchmark.get_prism_property_filename(),
            benchmark.get_property_name(),
        )
        if benchmark.get_open_parameter_def_string() != "":
            benchmark_settings += " --const {}".format(
                benchmark.get_open_parameter_def_string())
        if ("haddad" in benchmark.get_prism_program_filename()
                or "gathering" in benchmark.get_prism_program_filename()):
            benchmark_settings = "./fix-syntax " + benchmark_settings

        # default settings PET eps-corr
        default_inv = Invocation()
        default_inv.identifier = "default"
        default_inv.note = "Default settings."
        default_inv.track_id = track
        default_inv.add_command(benchmark_settings)

        result += [default_inv]

        if (track == "epsilon-correct" or benchmark.get_model_type() == "ctmc"
                or "haddad" in benchmark.get_prism_program_filename()
                or "csma" in benchmark.get_prism_program_filename()
                or "wlan" in benchmark.get_prism_program_filename()
                or "gathering" in benchmark.get_prism_program_filename()):
            # smc is prob eps correct, cannot handle ctmc and haddad monmege cannot be parsed by it
            continue
        if benchmark.get_num_states_tweak() is None:
            # need this info
            continue

        smc_settings = "./smc.sh {} {} -prop {} -heuristic RTDP_ADJ -RTDP_ADJ_OPTS 1 -colourParams S:{},Av:10,e:{},d:0.05,p:0.05,post:64".format(
            benchmark.get_prism_program_filename(),
            benchmark.get_prism_property_filename(),
            benchmark.get_property_name(),
            benchmark.get_num_states_tweak(),
            prec[track],
        )
        if benchmark.get_open_parameter_def_string() != "":
            smc_settings += " -const {}".format(
                benchmark.get_open_parameter_def_string())
        if ("haddad" in benchmark.get_prism_program_filename()
                or "gathering" in benchmark.get_prism_program_filename()):
            smc_settings = "./fix-syntax " + smc_settings

        # SMC invocations
        SMC_inv = Invocation()
        SMC_inv.identifier = "specific"
        SMC_inv.note = "Statistical model checking with limited information (no transition probabilities)"
        SMC_inv.track_id = track
        SMC_inv.add_command(smc_settings)

        result += [SMC_inv]

    return result
Пример #11
0
def get_invocations(benchmark: Benchmark):
    """
	Returns a list of invocations that invoke the tool for the given benchmark.
	It can be assumed that the current directory is the directory from which
	execute_invocations.py is executed.

	For QCOMP 2020, this should return a list of invocations for all tracks
	in which the tool can take part. For each track an invocation
	with default settings has to be provided and in addition, an optimized
	setting (e.g., the fastest engine and/or solution technique for this
	benchmark) can be specified.  Only information about the model type,
	the property type and the state space size are allowed to be used to
	tweak the parameters.
   
	If this benchmark is not supported, an empty list has to be returned.
	"""
    #
    if not is_benchmark_supported(benchmark):
        return []
    #
    OUR_TRACKS = {
        # track-id : relative error
        "probably-epsilon-correct": 5e-2,
        "often-epsilon-correct": 1e-3,
        "often-epsilon-correct-10-min": 1e-99,  # run till judgement day
    }
    TWEAKS = "--unsafe-scheduling --fixed-batches"
    # Some Unreliability queries need other tweaks in some tracks
    IS_UNREL = benchmark.get_property_name().find("Unreliability") >= 0
    STWEAKS = {
        "probably-epsilon-correct": {
            "cabinets.3-2-true.jani": "--unsafe-scheduling",
        },
        "often-epsilon-correct": {
            "cabinets.3-2-true.jani": "--unsafe-scheduling",
            "hecs.false-1-1.jani": "--unsafe-scheduling",
            "hecs.false-2-2.jani": "--unsafe-scheduling",
            "hecs.false-3-2.jani": "--unsafe-scheduling",
        },
        "often-epsilon-correct-10-min": {
            "cabinets.3-2-true.jani": "--unsafe-scheduling",
            "hecs.false-1-1.jani": "--unsafe-scheduling",
            "hecs.false-2-2.jani": "--unsafe-scheduling",
            "hecs.false-3-2.jani": "--unsafe-scheduling",
        },
    }
    JVM = "java -Xmx6G -XX:+UseParallelGC"
    RNG = "-s 0"  # seed 0 for reproducibility
    PROP = "--prop " + benchmark.get_property_name()
    PARAMS = ""
    for p in benchmark.get_parameters():
        PARAMS += " --def " + p["name"] + " " + str(p["value"])
    CALL = " ".join([JVM, "-jar DFTRES/DFTRES.jar", RNG, PROP, PARAMS])
    invocations = []
    for track, precision in OUR_TRACKS.items():
        err = "--relErr " + str(precision)
        for invID, tweaks in [("default", ""), ("specific", TWEAKS)]:
            i = Invocation()
            i.identifier = invID
            i.track_id = track
            fname = benchmark.get_janifilename()
            CHECKIT = IS_UNREL and invID == "specific"
            if CHECKIT and track in STWEAKS and fname in STWEAKS[track]:
                tweaks = STWEAKS[track][fname]
            i.add_command(" ".join([CALL, err, tweaks, fname]))
            invocations.append(i)
    return invocations
Пример #12
0
def add_invocations(invocations, track_id, default_cmd, specific_cmd):
	default_inv = Invocation()
	default_inv.identifier = "default"
	default_inv.track_id = track_id
	default_inv.add_command(default_cmd)
	invocations.append(default_inv)
	if default_cmd != specific_cmd:
		specific_inv = Invocation()
		specific_inv.identifier = "specific"
		specific_inv.track_id = track_id
		specific_inv.add_command(specific_cmd)
		invocations.append(specific_inv)
Пример #13
0
def get_invocations(benchmark: Benchmark):
    """
    Returns a list of invocations that invoke the tool for the given benchmark.
    It can be assumed that the current directory is the directory from which execute_invocations.py is executed.
    For QCOMP 2020, this should return a list of invocations for all tracks in which the tool can take part. For each track an invocation with default settings has to be provided and in addition, an optimized setting (e.g., the fastest engine and/or solution technique for this benchmark) can be specified. Only information about the model type, the property type and the state space size are allowed to be used to tweak the parameters.
   
    If this benchmark is not supported, an empty list has to be returned.
    """
    # decide whether we want to use storm-dft (which supports galileo fault trees without repair)
    use_storm_dft = False
    if benchmark.is_galileo():
        has_repair = False
        for p in benchmark.get_file_parameters():
            if p["name"] == "R" and p["value"] == True:
                has_repair = True
        if not has_repair:
            use_storm_dft = True

    # Gather the precision settings for the corresponding track
    track_settings = dict()
    track_comments = dict()
    track_settings['correct'] = ' --exact '
    track_comments['correct'] = 'Use exact arithmethic with rationals.'
    track_settings[
        'floating-point-correct'] = ' --exact floats --general:precision 1e-20 '
    track_comments[
        'floating-point-correct'] = 'Use exact arithmethic with floats. The precision needs to be set to increase precision when printing the result to stdout '
    track_settings['epsilon-correct'] = ' --sound --precision 1e-6 '
    track_comments['epsilon-correct'] = 'Use sound model checking methods.'
    track_settings['probably-epsilon-correct'] = ' --sound --precision 5e-2 '
    track_comments['probably-epsilon-correct'] = 'Use sound model checking.'
    track_settings['often-epsilon-correct'] = ' --timebounded:precision 1e-3 '
    track_comments[
        'often-epsilon-correct'] = 'Use potentially unsound but fast solution methods. Use default precision (1e-6) everywhere except for timebounded queries, for which solution methods give epsilon guarantees.'
    track_settings[
        'often-epsilon-correct-10-min'] = ' --signal-timeout 60 --general:precision 1e-12 --gmm++:precision 1e-12 --native:precision 1e-12 --minmax:precision 1e-12 --timebounded:precision 1e-6 ' + (
            "" if use_storm_dft else "--lra:precision 1e-12 ")
    track_comments[
        'often-epsilon-correct-10-min'] = 'Only force termination 60 seconds after receiving SIGTERM. Use potentially unsound but fast solution methods. Take a high precision to make sure that we make use of the 10 minutes. Time bounded queries can not be answered that precisely due to numerics.'

    invocations = []

    for trackId in track_settings:

        if not is_benchmark_supported(benchmark, trackId):
            continue
        # Check whether this is a job for storm-dft
        if use_storm_dft:
            # We now have to obtain the correct property.
            # Unfortunately, this is necessary because the gallileo files do not contain any information of the property
            # The code below might easily break if we pick a different benchmark set
            benchmark_settings = "--dftfile {} ".format(
                benchmark.get_galileo_filename())
            if benchmark.is_time_bounded_probabilistic_reachability():
                time_bound = 1
                for p in benchmark.get_parameters():
                    if p["name"] == "TIME_BOUND":
                        time_bound = p["value"]
                benchmark_settings += "--timebound {} --max".format(time_bound)
            elif benchmark.is_unbounded_expected_time():
                benchmark_settings += "--expectedtime --min"

            benchmark_settings += track_settings[trackId]
            default_inv = Invocation()
            default_inv.track_id = trackId
            default_inv.identifier = "default"
            default_inv.note = "Use Storm-dft with the requested property. " + track_comments[
                trackId]
            default_inv.add_command(
                "~/storm/build/bin/storm-dft {}".format(benchmark_settings))
            invocations.append(default_inv)
            continue  # with next trackId

        # Gather options that are needed for this particular benchmark for any invocation of Storm
        preprocessing_steps = []
        benchmark_settings = ""
        if (benchmark.is_prism()
                or benchmark.is_prism_ma()) and not benchmark.is_pta():
            benchmark_settings = "--prism {} --prop {} {}".format(
                benchmark.get_prism_program_filename(),
                benchmark.get_prism_property_filename(),
                benchmark.get_property_name())
            if benchmark.get_open_parameter_def_string() != "":
                benchmark_settings += " --constants {}".format(
                    benchmark.get_open_parameter_def_string())
            if benchmark.is_ctmc():
                benchmark_settings += " --prismcompat"
        else:
            # For jani input, it might be the case that preprocessing is necessary using moconv
            moconv_options = []
            features = benchmark.get_jani_features()
            for f in [
                    "arrays", "derived-operators", "functions",
                    "state-exit-rewards"
            ]:
                if f in features: features.remove(f)
            if "nondet-selection" in features:
                moconv_options.append("--remove-disc-nondet")
                features.remove("nondet-selection")
            if len(features) != 0:
                print("Unsupported jani feature(s): {}".format(features))
            if benchmark.is_pta():
                moconv_options.append("--digital-clocks")
                if benchmark.get_model_short_name() == "wlan-large":
                    # This is actually a stochastic timed automaton. Distributions have to be unrolled first
                    moconv_options.append(" --unroll-distrs")
            if len(moconv_options) != 0:
                preprocessing_steps.append(
                    "~/modest/modest convert {} {} --output {} --overwrite".
                    format(benchmark.get_janifilename(),
                           " ".join(moconv_options),
                           "converted_" + benchmark.get_janifilename()))
                if benchmark.get_open_parameter_def_string() != "":
                    preprocessing_steps[-1] += " --experiment {}".format(
                        benchmark.get_open_parameter_def_string())
                benchmark_settings = "--jani {} --janiproperty {}".format(
                    "converted_" + benchmark.get_janifilename(),
                    benchmark.get_property_name())
            else:
                benchmark_settings = "--jani {} --janiproperty {}".format(
                    benchmark.get_janifilename(),
                    benchmark.get_property_name())
                if benchmark.get_open_parameter_def_string() != "":
                    benchmark_settings += " --constants {}".format(
                        benchmark.get_open_parameter_def_string())

        benchmark_settings += track_settings[trackId]
        benchmark_settings += " --engine portfolio --ddlib sylvan --sylvan:maxmem 6114 --sylvan:threads 4"
        benchmark_comment = "Use Storm with protfolio engine. Use sylvan as library for Dds, restricted to 6GB memory and 4 threads. " + track_comments[
            trackId]
        # default settings
        default_inv = Invocation()
        default_inv.identifier = "default"
        # Apparently, a note is not needed
        # default_inv.note = benchmark_comment
        default_inv.track_id = trackId
        for prep in preprocessing_steps:
            default_inv.add_command(prep)
        default_inv.add_command(
            "~/storm/build/bin/storm {}".format(benchmark_settings))
        invocations.append(default_inv)

        # specific settings                     !!!!only information about model type, property type and state space size via benchmark.get_num_states_tweak() may be used for tweaking
        # Omitted because there is no significant benefit
        # if benchmark.get_num_states_tweak() is not None:
        #    specific_inv = Invocation()
        #    specific_inv.identifier = "specific"
        #    specific_inv.track_id = trackId
        #    for prep in preprocessing_steps:
        #        specific_inv.add_command(prep)
        #    specific_inv.add_command("~/storm/build/bin/storm {} --hints:states {}".format(benchmark_settings, benchmark.get_num_states_tweak()))
        #    invocations.append(specific_inv)

    return invocations