예제 #1
0
def test_failures(source, raw_model):
    # Check for unused labels
    raw_model1 = copy.deepcopy(raw_model)
    raw_model1['environment processes']['c1/p2']['labels']['unused_label'] = {
        'declaration': 'int x'
    }
    with pytest.raises(RuntimeError):
        CollectionDecoder(logging, dict()).parse_event_specification(
            source, json.loads(json.dumps(raw_model1)), ProcessCollection())
    # Check for unused actions
    raw_model2 = copy.deepcopy(raw_model)
    raw_model2['environment processes']['c1/p2']['actions']['new'] = {
        'comment': 'Test',
        "statements": []
    }
    with pytest.raises(RuntimeError):
        CollectionDecoder(logging, dict()).parse_event_specification(
            source, json.loads(json.dumps(raw_model2)), ProcessCollection())

    # todo: Implement unused recursive subprocess
    raw_model3 = copy.deepcopy(raw_model)
    raw_model3['environment processes']['c1/p2']['actions']['test'] = {
        'comment': 'Test',
        "process": "(<read> | <read>).{test}"
    }
    with pytest.raises(RuntimeError):
        CollectionDecoder(logging, dict()).parse_event_specification(
            source, json.loads(json.dumps(raw_model3)), ProcessCollection())

    raw_model4 = copy.deepcopy(raw_model)
    raw_model4['environment processes']['c1/p1'][
        'process'] = '(!register_c1p1).{activate[%unknown_label%]}'
    with pytest.raises(RuntimeError):
        CollectionDecoder(logging, dict()).parse_event_specification(
            source, json.loads(json.dumps(raw_model4)), ProcessCollection())
예제 #2
0
def __select_processes_and_models(logger, conf, interfaces, collection):
    chosen = ProcessCollection()

    # Import necessary kernel models
    logger.info("First, add relevant models of kernel functions")
    __import_kernel_models(logger, conf, interfaces, collection, chosen)

    for category in interfaces.categories:
        uncalled_callbacks = interfaces.uncalled_callbacks(category)
        logger.debug("There are {} callbacks in category {!r}".format(len(uncalled_callbacks), category))

        if uncalled_callbacks:
            logger.info("Try to find processes to call callbacks from category {!r}".format(category))
            new = __choose_processes(logger, conf, interfaces, category, chosen, collection)

            # Sanity check
            logger.info("Check again how many callbacks are not called still in category {!r}".format(category))
            uncalled_callbacks = interfaces.uncalled_callbacks(category)
            if uncalled_callbacks and not conf.get('ignore missed callbacks', True):
                raise RuntimeError("There are callbacks from category {!r} which are not called at all in the "
                                   "model: {}".format(category, ', '.join(map(str, uncalled_callbacks))))
            elif uncalled_callbacks:
                logger.warning("There are callbacks from category {!r} which are not called at all in the "
                               "model: {}. Disable option 'ignore missed callbacks' in intermediate model "
                               "configuration properties if you would like to terminate.".
                               format(category, ', '.join(map(str, uncalled_callbacks))))
            logger.info("Added process {!r} have unmatched signals, need to find factory or registration "
                        "and deregistration functions".format(new.name))
            __establish_signal_peers(logger, conf, interfaces, new, chosen, collection)
        else:
            logger.info("Ignore interface category {!r}, since it does not have callbacks to call".format(category))

    return chosen
예제 #3
0
def model_preset():
    source = source_preset()
    raw_model = raw_model_preset()
    parser = CollectionDecoder(logging, dict())
    model = parser.parse_event_specification(source, raw_model,
                                             ProcessCollection())
    model.establish_peers()
    return model
예제 #4
0
def test_export_model(source, model):
    raw1 = json.dumps(model, cls=CollectionEncoder)
    new_model = CollectionDecoder(logging, dict()).parse_event_specification(
        source, json.loads(raw1), ProcessCollection())
    raw2 = json.dumps(new_model, cls=CollectionEncoder)

    raw1 = json.loads(raw1)
    raw2 = json.loads(raw2)
    _compare_models(raw1, raw2)
예제 #5
0
def test_requirements_field(source, raw_model):
    test_raw_model = copy.deepcopy(raw_model)
    assert 'c1/p1' in test_raw_model['environment processes']['c1/p2'][
        'actions']['register_c1p2']['require']['processes']

    # Incorrect process
    test_raw_model['environment processes']['c1/p2']['actions']['register_c1p2']['require']['processes']['c5/p4'] =\
        dict()
    with pytest.raises(ValueError):
        CollectionDecoder(logging, dict()).parse_event_specification(
            source, json.loads(json.dumps(test_raw_model)),
            ProcessCollection())

    # Missing action
    test_raw_model = copy.deepcopy(raw_model)
    test_raw_model['environment processes']['c1/p2']['actions']['register_c1p2']['require']['actions'] = \
        {'c1/p1': ['goaway']}
    with pytest.raises(ValueError):
        CollectionDecoder(logging, dict()).parse_event_specification(
            source, json.loads(json.dumps(test_raw_model)),
            ProcessCollection())
예제 #6
0
def test_savepoint_uniqueness(source, raw_model):
    raw_model = copy.deepcopy(raw_model)

    # Add two savepoints with the same name
    assert 'p2s1' in raw_model['environment processes']['c1/p2']['actions'][
        'register_c1p2']['savepoints']
    new_sp = dict(raw_model["environment processes"]['c1/p2']['actions']
                  ['register_c1p2']['savepoints']['p2s1'])
    raw_model['environment processes']['c2/p1']['actions']['register_c2p1'][
        'savepoints']['p2s1'] = new_sp

    # Expect an error
    with pytest.raises(ValueError):
        CollectionDecoder(logging, dict()).parse_event_specification(
            source, json.loads(json.dumps(raw_model)), ProcessCollection())
예제 #7
0
파일: __init__.py 프로젝트: naumushv/klever
    def generate_environment(self):
        """
        Main function of EMG plugin.

        Plugin generates an environment model for the verification task.

        :return: None
        """
        self.logger.info("Start environment model generator {}".format(
            self.id))

        # Initialization of EMG
        self.logger.info("Import results of source analysis")
        sa = create_source_representation(self.logger, self.conf,
                                          self.abstract_task_desc)

        # Generate processes
        self.logger.info("Generate processes of an environment model")
        collection = ProcessCollection()
        reports = generate_processes(self.logger, self.conf, collection,
                                     self.abstract_task_desc, sa)

        # Send data to the server
        self.logger.info("Send data about generated instances to the server")

        report(self.logger, 'patch', {
            'identifier': self.id,
            'data': reports
        }, self.mqs['report files'], self.vals['report id'],
               get_or_die(self.conf, "main working directory"))
        self.logger.info("An intermediate environment model has been prepared")

        # Import additional aspect files
        translate_intermediate_model(self.logger, self.conf,
                                     self.abstract_task_desc, sa, collection)
        self.logger.info(
            "An environment model has been generated successfully")
예제 #8
0
def _model_factory(model_class):
    """The function allows to build a model with provided processes."""
    files = ['test.c']
    functions = {
        'f1': "static int f1(struct test *)",
        'f2': "static void f2(struct test *)"
    }
    source = Source(files, [], dict())
    for name, declaration_str in functions.items():
        new = Function(name, declaration_str)
        new.definition_file = files[0]
        source.set_source_function(new, files[0])
    spec = {
        "functions models": {
            "f1": model_class.f1_model,
            "f2": model_class.f2_model,
        },
        "environment processes": model_class.environment_models,
        "main process": model_class.entry
    }
    collection = CollectionDecoder(logging, dict()).parse_event_specification(source,
                                                                              json.loads(json.dumps(spec)),
                                                                              ProcessCollection())
    return collection
예제 #9
0
    def generate_environment(self):
        """
        Main function of EMG plugin.

        Plugin generates an environment model for the verification task.

        :return: None
        """
        self.logger.info("Start environment model generator {!r}".format(
            self.id))

        # Initialization of EMG
        self.logger.info("Import results of source analysis")
        sa = create_source_representation(self.logger, self.conf,
                                          self.abstract_task_desc)

        # Generate processes
        self.logger.info("Generate processes of an environment model")
        collection = ProcessCollection()
        generate_processes(self.logger, self.conf, collection,
                           self.abstract_task_desc, sa)
        self.logger.info("An intermediate environment model has been prepared")

        # Import additional aspect files
        program_fragment = self.abstract_task_desc['fragment']
        abstract_task = self.abstract_task_desc
        self.abstract_task_desc = list()
        used_attributed_names = set()
        data_report = {"type": "EMG", "envmodel_attrs": {}, "UDEMSes": {}}
        images = []
        for number, model in enumerate(
                decompose_intermediate_model(self.logger, self.conf,
                                             collection)):
            model.name = str(number)
            if model.attributed_name in used_attributed_names:
                raise ValueError(
                    f"The model with name '{model.attributed_name}' has been already been generated"
                )
            else:
                used_attributed_names.add(model.attributed_name)
            new_description = translate_intermediate_model(
                self.logger, self.conf, copy.deepcopy(abstract_task), sa,
                model, data_report["UDEMSes"], program_fragment, images)

            new_description["environment model attributes"] = model.attributes
            new_description["environment model pathname"] = model.name
            data_report["envmodel_attrs"][model.name] = json.dumps(
                model.attributes, ensure_ascii=True, sort_keys=True, indent=2)
            self.abstract_task_desc.append(new_description)
            self.logger.info(
                f"An environment model '{model.attributed_name}' has been generated successfully"
            )

        if len(self.abstract_task_desc) == 0:
            raise ValueError('There is no generated environment models')

        self.logger.info("Send data report to the server")
        report(self.logger, 'patch', {
            'identifier': self.id,
            'data': data_report
        }, self.mqs['report files'], self.vals['report id'],
               get_or_die(self.conf, "main working directory"))

        # Send images only for full-weight decisions. Bridge fails to accept them for lightweight decisions, but
        # they will be deleted for them anyway, so there is no sense to send them.
        if self.conf['weight'] == "0":
            self.logger.info("Send images to the server")
            for name, dot_file, image_file in images:
                report_image(self.logger, self.id, name, dot_file, image_file,
                             self.mqs['report files'], self.vals['report id'],
                             self.conf['main working directory'])
예제 #10
0
    def parse_event_specification(self, source, raw):
        """
        Parse process descriptions and create corresponding objects to populate the collection.

        :param source: Source code collection.
        :param raw: Dictionary with content of JSON file.
        :return: ProcessCollection
        """
        collection = ProcessCollection()

        self.logger.info(
            "Import processes from provided event categories specification")
        raise_exc = []
        if "functions models" in raw:
            self.logger.info("Import processes from 'kernel model'")
            for name_list, process_desc in raw["functions models"].items():
                names = name_list.split(", ")
                for name in names:
                    self.logger.debug(
                        "Import process which models {!r}".format(name))

                    # Set some default values
                    category = "functions models"
                    try:
                        process = self._import_process(source, name, category,
                                                       process_desc)
                        collection.models[str(process)] = process
                    except Exception as err:
                        self.logger.warning("Cannot parse {!r}: {}".format(
                            name, str(err)))
                        raise_exc.append(name)
        if "environment processes" in raw:
            self.logger.info("Import processes from 'environment processes'")
            for name, process_desc in raw["environment processes"].items():
                self.logger.debug(
                    "Import environment process {!r}".format(name))

                # This simplifies parsing of event specifications for Linux but actually this can be avoided by adding
                # categories to corresponding specifications.
                if '/' in name:
                    category, name = name.split('/')
                else:
                    category = None

                try:
                    process = self._import_process(source, name, category,
                                                   process_desc)
                    if process in collection.environment:
                        raise ValueError(
                            "There is an already imported process {!r} in intermediate environment model"
                            .format(str(process)))
                    collection.environment[str(process)] = process
                except Exception as err:
                    self.logger.warning("Cannot parse {!r}: {}".format(
                        name, str(err)))
                    raise_exc.append(name)

        if "main process" in raw and isinstance(raw["main process"], dict):
            self.logger.info("Import main process")
            try:
                entry_process = self._import_process(source, "entry",
                                                     "entry process",
                                                     raw["main process"])
                collection.entry = entry_process
            except Exception as err:
                self.logger.warning("Cannot main process: {}".format(str(err)))
                raise_exc.append('entry')
        else:
            collection.entry = None

        if raise_exc:
            raise RuntimeError(
                "Some specifications cannot be parsed, inspect log to find problems with: {}"
                .format(', '.join(raise_exc)))

        return collection
예제 #11
0
    def make_scenarios(self, abstract_task_desc, collection, source,
                       specifications):
        """
        This generator reads a manually prepared environment model description and some of them just adds to the already
        generated model and some generated processes with the same names it replaces by new manually prepared one. A user
        can just get an automatically generated model by setting option for a translator and modify it to rerun EMG next
        time to make it generate the model with desired properties without modifying any specifications.

        :param abstract_task_desc: Abstract task dictionary.
        :param collection: ProcessCollection.
        :param source: Source collection.
        :param specifications: dictionary with merged specifications.
        :return: None
        """
        self.conf.setdefault("enforce replacement", True)

        # Import Specifications
        all_instance_maps = specifications.get("manual event models", [])
        fragment_name = abstract_task_desc['fragment']
        descriptions = None
        for imap in all_instance_maps:
            if fragment_name in imap.get('fragments', []):
                self.logger.info(
                    f"Found model for the fragment '{fragment_name}'")
                descriptions = imap.get("model", None)

                contains = ', '.join([
                    i for i in ("functions models", "environment processes",
                                "main process")
                    if i in descriptions and descriptions[i]
                ])
                self.logger.debug(f"The model contains sections: '{contains}'")

        # Import manual process
        if descriptions and ("functions models" in descriptions
                             or "environment processes" in descriptions
                             or "main process" in descriptions):

            parser = CollectionDecoder(self.logger, self.conf)
            manual_processes = parser.parse_event_specification(
                source, descriptions, ProcessCollection())

            # Decide on process replacements
            or_entry = collection.entry
            if manual_processes.entry and (
                    not collection.entry
                    or self.conf.get("enforce replacement")):
                if self.conf.get("keep entry functions") and collection.entry:
                    for or_decl in collection.entry.declarations:
                        if or_decl in manual_processes.entry.declarations:
                            manual_processes.entry.declarations[or_decl] = {
                                **manual_processes.entry.declarations[or_decl],
                                **collection.entry.declarations[or_decl]
                            }
                        else:
                            manual_processes.entry.declarations[
                                or_decl] = collection.entry.declarations[
                                    or_decl]
                    for or_def in collection.entry.definitions:
                        if or_def in manual_processes.entry.definitions:
                            manual_processes.entry.definitions[or_def] = {
                                **manual_processes.entry.definitions[or_def],
                                **collection.entry.definitions[or_def]
                            }
                        else:
                            manual_processes.entry.definitions[
                                or_def] = collection.entry.definitions[or_def]

                or_entry = manual_processes.entry

            # Replace rest processes
            for current, manual in ((collection.models,
                                     manual_processes.models),
                                    (collection.environment,
                                     manual_processes.environment)):
                for key in manual:
                    if key not in current or self.conf.get(
                            "enforce replacement"):
                        current[key] = manual[key]

            collection.entry = or_entry
            collection.establish_peers()
        else:
            self.logger.info(
                "There is no specification for {!r} or it has invalid format".
                format(fragment_name))
예제 #12
0
    def make_scenarios(self, abstract_task_desc, collection, source, specifications):
        """
        Make scenario models according to a custom implementation.

        :param abstract_task_desc: Abstract task dictionary.
        :param collection: ProcessCollection.
        :param source: Source collection.
        :param specifications: dictionary with merged specifications.
        :return: None
        """
        # Get instance maps if possible
        instance_maps = sortedcontainers.SortedDict()
        all_instance_maps = specifications.get("instance maps", [])
        self.conf.setdefault("action comments", DEFAULT_COMMENTS)
        self.conf.setdefault("callback comment", "Invoke callback {0} from {1}.")

        # Get fragment name
        task_name = abstract_task_desc['fragment']

        # Check availability of an instance map for it
        for imap in all_instance_maps.get('instance maps', []):
            if task_name in imap.get('fragments', []):
                instance_maps = imap.get('instance map', sortedcontainers.SortedDict())

        self.logger.info("Import interface categories specification")
        interfaces = InterfaceCollection()
        import_specification(self.logger, self.conf, interfaces, source, specifications["interface specifications"])

        self.logger.info("Import event categories specification")
        decoder = ExtendedProcessDecoder(self.logger, self.conf)
        abstract_processes = decoder.parse_event_specification(source, specifications["event specifications"],
                                                               ExtendedProcessCollection())

        # Remove deleted models
        deleted_models = [func for func in abstract_processes.models if func in source.source_functions and
                          interfaces.is_removed_function(func)]
        if deleted_models:
            self.logger.info("Found deleted models: {}".format(', '.join(deleted_models)))

            for name in deleted_models:
                del abstract_processes.models[name]

        # Now check that we have all necessary interface specifications
        unspecified_functions = [func for func in abstract_processes.models if func in source.source_functions and
                                 func not in [i.name for i in interfaces.function_interfaces]]
        if unspecified_functions:
            raise RuntimeError("You need to specify interface specifications for the following function models: {}"
                               .format(', '.join(unspecified_functions)))

        chosen_processes = process_specifications(self.logger, self.conf, interfaces, abstract_processes)

        self.logger.info("Generate processes from abstract ones")
        instance_maps, data = generate_instances(self.logger, self.conf, source, interfaces, chosen_processes,
                                                 instance_maps)

        # Dump to disk instance map
        instance_map_file = 'instance map.json'
        self.logger.info("Dump information on chosen instances to file {!r}".format(instance_map_file))
        with open(instance_map_file, "w", encoding="utf-8") as fd:
            fd.writelines(ujson.dumps(instance_maps, ensure_ascii=False, sort_keys=True, indent=4,
                                      escape_forward_slashes=False))

        puredecoder = CollectionDecoder(self.logger, self.conf)
        new_pure_collection = puredecoder.parse_event_specification(source, ujson.loads(data), ProcessCollection())
        collection.environment.update(new_pure_collection.environment)
        collection.models.update(new_pure_collection.models)
        collection.establish_peers()
예제 #13
0
 def _remove_unused_processes(self, model: ProcessCollection):
     deleted = model.remove_unused_processes()
     deleted_names = ', '.join(map(str, deleted))
     self.logger.info(
         f"The following processes were deleted from the model '{model.attributed_name}':"
         f" {deleted_names}")
예제 #14
0
    def _factory_iterator(self, processes_to_scenarios: dict,
                          model: ProcessCollection):
        selector = self.strategy(self.logger, self.conf,
                                 processes_to_scenarios, model)
        for batch, related_process in selector():
            new = ProcessCollection(batch.name)
            new.attributes = copy.deepcopy(batch.attributes)
            original_name = batch.attributed_name

            # Do sanity check to catch several savepoints in a model
            sp_scenarios = {
                s
                for s in batch.non_models
                if isinstance(s, Scenario) and s.savepoint
            }
            assert len(sp_scenarios) < 2

            # Set entry process
            if related_process and related_process in batch.environment and batch.environment[related_process] and\
                    batch.environment[related_process].savepoint:
                # There is an environment process with a savepoint
                new.entry = self._process_from_scenario(
                    batch.environment[related_process],
                    model.environment[related_process])
                del batch.environment[related_process]
                new.rename_notion(related_process, str(new.entry))

                # Move declarations and definitions
                if model.entry:
                    new.extend_model_name(str(model.entry), 'Removed')
                    new.copy_declarations_to_init(model.entry)
            elif batch.entry:
                # The entry process has a scenario
                new.entry = self._process_from_scenario(
                    batch.entry, model.entry)
            elif model.entry:
                # Keep as is
                new.entry = self._process_copy(model.entry)
            else:
                new.entry = None

            # Add models if no scenarios provided
            for function_model in model.models:
                if not batch.models.get(function_model):
                    batch.models[function_model] = None

            for attr in ('models', 'environment'):
                batch_collection = getattr(batch, attr)
                collection = getattr(new, attr)
                for key in getattr(model, attr):
                    if key in batch_collection:
                        if batch_collection[key]:
                            collection[key] = self._process_from_scenario(
                                batch_collection[key],
                                getattr(model, attr)[key])
                        else:
                            collection[key] = self._process_copy(
                                getattr(model, attr)[key])
                    else:
                        self.logger.debug(
                            f"Skip process '{key}' in '{new.attributed_name}'")
                        new.copy_declarations_to_init(
                            getattr(model, attr)[key])

            new.establish_peers()
            self._remove_unused_processes(new)

            if new.consistent:
                if new.attributed_name != original_name:
                    self.logger.info("Reduced batch {!r} to {!r}".format(
                        original_name, new.attributed_name))

                # Add missing attributes to the model
                for process_name in model.non_models:
                    added_attributes = []
                    if process_name not in new.attributes:
                        added_attributes.append(process_name)
                        new.extend_model_name(process_name, 'base')
                    added_attributes = ', '.join(added_attributes)
                    self.logger.debug(
                        f"Add to model '{new.attributed_name}' the following attributes: '{added_attributes}'"
                    )

                yield new
            else:
                self.logger.debug(
                    f"Obtained model '{new.attributed_name}' is inconsistent")
예제 #15
0
def specific_model():
    c1p1 = {
        "comment": "Category 1, process 1.",
        "process": "(!register_c1p1).(deregister_c1p1)",
        "actions": {
            "register_c1p1": {
                "parameters": []
            },
            "deregister_c1p1": {
                "parameters": []
            }
        }
    }
    c1p2 = {
        "comment": "Category 1, process 2.",
        "process": "(!register_c1p1).{level_one}",
        "actions": {
            "level_one": {
                "process": "<a>.({level_two} | {level_three}) | {finish}",
                "comment": ""
            },
            "level_two": {
                "process": "(<b> | <c>).{finish}",
                "comment": ""
            },
            "level_three": {
                "process": "<d>.{finish}",
                "comment": ""
            },
            "finish": {
                "process": "(deregister_c1p1)"
            },
            "register_c1p1": {
                "parameters": []
            },
            "deregister_c1p1": {
                "parameters": []
            },
            "a": {
                "comment": "",
                "statements": [],
                "condition": []
            },
            "b": {
                "comment": "",
                "statements": [],
                "condition": []
            },
            "c": {
                "comment": "",
                "statements": [],
                "condition": []
            },
            "d": {
                "comment": "",
                "statements": [],
                "condition": []
            }
        }
    }
    c2p1 = {
        "comment": "Category 2, process 1.",
        "process": "(!register_c2p1).{level_one}",
        "actions": {
            "level_one": {
                "process": "<a>.<b>"
            },
            "register_c2p1": {
                "parameters": []
            },
            "a": {
                "comment": "",
                "statements": [],
                "condition": []
            },
            "b": {
                "comment": "",
                "statements": [],
                "condition": []
            }
        }
    }
    entry = {
        "identifier": "entryy",
        "comment": "Entry process.",
        "process": "<a>.(<b> | <c>)",
        "actions": {
            "a": {
                "comment": "",
                "statements": [],
                "condition": []
            },
            "b": {
                "comment": "",
                "statements": [],
                "condition": []
            },
            "c": {
                "comment": "",
                "statements": [],
                "condition": []
            }
        }
    }
    spec = {
        "name": 'test_model',
        "functions models": {},
        "environment processes": {
            "c1/p1": c1p1,
            "c1/p2": c1p2,
            "c2/p1": c2p1
        },
        "main process": entry
    }
    collection = CollectionDecoder(logging, dict()).parse_event_specification(
        source_preset(), json.loads(json.dumps(spec)), ProcessCollection())
    return collection
예제 #16
0
def model_with_savepoint_requirements():
    c1p1 = {
        "comment": "",
        "process": "(!register).({x} | <a>).{y}",
        "actions": {
            "x": {
                "comment": "",
                "process": "<b>.(<c> | <d>).{y}"
            },
            "y": {
                "comment": "",
                "process": "<e>.<f> | <g>"
            },
            "a": {
                "comment": "",
                "statements": []
            },
            "b": {
                "comment": "",
                "statements": []
            },
            "c": {
                "comment": "",
                "statements": []
            },
            "d": {
                "comment": "",
                "statements": []
            },
            "e": {
                "comment": "",
                "statements": []
            },
            "f": {
                "comment": "",
                "statements": []
            },
            "g": {
                "comment": "",
                "statements": []
            },
            "register": {
                "parameters": [],
                "savepoints": {
                    "s1": {
                        "statements": [],
                        "require": {
                            "actions": {
                                "c1/p1": ["b", "c", "g"]
                            }
                        }
                    },
                    "s2": {
                        "statements": [],
                        "require": {
                            "actions": {
                                "c1/p1": ["e"]
                            }
                        }
                    },
                    "s3": {
                        "statements": [],
                        "require": {
                            "actions": {
                                "c1/p1": ["a"]
                            }
                        }
                    }
                }
            }
        }
    }
    c1p2 = {
        "comment": "",
        "process": "(!register).(<b>.(<c> | <d>) | <a>).(<e>.<f> | <g>)",
        "actions": {
            "a": {
                "comment": "",
                "statements": []
            },
            "b": {
                "comment": "",
                "statements": []
            },
            "c": {
                "comment": "",
                "statements": []
            },
            "d": {
                "comment": "",
                "statements": []
            },
            "e": {
                "comment": "",
                "statements": []
            },
            "f": {
                "comment": "",
                "statements": []
            },
            "g": {
                "comment": "",
                "statements": []
            },
            "register": {
                "parameters": [],
                "savepoints": {
                    "s4": {
                        "statements": [],
                        "require": {
                            "actions": {
                                "c1/p2": ["b", "c", "g"]
                            }
                        }
                    },
                    "s5": {
                        "statements": [],
                        "require": {
                            "actions": {
                                "c1/p2": ["e"]
                            }
                        }
                    },
                    "s6": {
                        "statements": [],
                        "require": {
                            "actions": {
                                "c1/p2": ["a"]
                            }
                        }
                    }
                }
            }
        }
    }
    c1p3 = {
        "comment": "",
        "process": "(!register).({level_one} | (unregister))",
        "actions": {
            "level_one": {
                "comment":
                "",
                "process":
                "<probe>.(<success>.{level_two} | <fail>).<remove>.(unregister)"
            },
            "level_two": {
                "comment": "",
                "process":
                "(<read> | <write>).{level_two} | <remove>.{level_one}"
            },
            "register": {
                "parameters": [],
                "savepoints": {
                    "s7": {
                        "statements": [],
                        "require": {
                            "actions": {
                                "c1/p3": ["probe"]
                            }
                        }
                    },
                    "s8": {
                        "statements": [],
                        "require": {
                            "actions": {
                                "c1/p3": ["probe", "remove", "read"]
                            }
                        }
                    },
                    "s9": {
                        "statements": [],
                        "require": {
                            "processes": {
                                "c1/p1": True
                            },
                            "actions": {
                                "c1/p3": ["probe", "fail"],
                                "c1/p1": ["b", "c", "g"]
                            }
                        }
                    }
                }
            },
            "unregister": {
                "parameters": []
            },
            "probe": {
                "comment": "",
                "statements": []
            },
            "remove": {
                "comment": "",
                "statements": []
            },
            "success": {
                "comment": "",
                "statements": []
            },
            "fail": {
                "comment": "",
                "statements": []
            },
            "read": {
                "comment": "",
                "statements": []
            },
            "write": {
                "comment": "",
                "statements": []
            }
        }
    }
    entry = {
        "comment": "Entry process.",
        "process": "<a>.(<b>.(<d> | <e>) | <c>)",
        "actions": {
            "a": {
                "comment": "",
                "statements": [],
                "condition": [],
                "savepoints": {
                    "s10": {
                        "statements": []
                    },
                    "s11": {
                        "statements": [],
                        "require": {
                            "actions": {
                                "entry_point/main": ["d"]
                            }
                        }
                    },
                    "s12": {
                        "statements": [],
                        "require": {
                            "processes": {
                                "c1/p1": True
                            },
                            "actions": {
                                "c1/p1": ["b", "c", "g"]
                            }
                        }
                    },
                }
            },
            "b": {
                "comment": "",
                "statements": [],
                "condition": []
            },
            "c": {
                "comment": "",
                "statements": [],
                "condition": []
            },
            "d": {
                "comment": "",
                "statements": [],
                "condition": []
            },
            "e": {
                "comment": "",
                "statements": [],
                "condition": []
            }
        }
    }
    spec = {
        "functions models": {},
        "environment processes": {
            "c1/p1": c1p1,
            "c1/p2": c1p2,
            "c1/p3": c1p3
        },
        "main process": entry
    }
    collection = CollectionDecoder(logging, dict()).parse_event_specification(
        source_preset(), json.loads(json.dumps(spec)), ProcessCollection())
    return collection