Ejemplo n.º 1
0
    def load_meta_data(self, path=None, recursively=True):
        """Load meta data of state machine model from the file system

        The meta data of the state machine model is loaded from the file system and stored in the meta property of the
        model. Existing meta data is removed. Also the meta data of root state and children is loaded.

        :param str path: Optional path to the meta data file. If not given, the path will be derived from the state
            machine's path on the filesystem
        """
        meta_data_path = path if path is not None else self.state_machine.file_system_path

        if meta_data_path:
            path_meta_data = os.path.join(meta_data_path,
                                          storage.FILE_NAME_META_DATA)

            try:
                tmp_meta = storage.load_data_file(path_meta_data)
            except ValueError:
                tmp_meta = {}
        else:
            tmp_meta = {}

        # JSON returns a dict, which must be converted to a Vividict
        tmp_meta = Vividict(tmp_meta)

        if recursively:
            root_state_path = None if not path else os.path.join(
                path, self.root_state.state.state_id)
            self.root_state.load_meta_data(root_state_path)

        if tmp_meta:
            # assign the meta data to the state
            self.meta = tmp_meta
            self.meta_signal.emit(MetaSignalMsg("load_meta_data", "all", True))
Ejemplo n.º 2
0
    def load_meta_data(self, path=None):
        """Load meta data of state model from the file system

        The meta data of the state model is loaded from the file system and stored in the meta property of the model.
        Existing meta data is removed. Also the meta data of all state elements (data ports, outcomes,
        etc) are loaded, as those stored in the same file as the meta data of the state.

        This is either called on the __init__ of a new state model or if a state model for a container state is created,
        which then calls load_meta_data for all its children.

        :param str path: Optional file system path to the meta data file. If not given, the path will be derived from
            the state's path on the filesystem
        :return: if meta data file was loaded True otherwise False
        :rtype: bool
        """
        # TODO: for an Execution state this method is called for each hierarchy level again and again, still?? check it!
        # print("1AbstractState_load_meta_data: ", path, not path)
        if not path:
            path = self.state.file_system_path
        # print("2AbstractState_load_meta_data: ", path)
        if path is None:
            self.meta = Vividict({})
            return False
        path_meta_data = os.path.join(path, storage.FILE_NAME_META_DATA)

        # TODO: Should be removed with next minor release
        if not os.path.exists(path_meta_data):
            logger.debug("Because meta data was not found in {0} use backup option {1}"
                         "".format(path_meta_data, os.path.join(path, storage.FILE_NAME_META_DATA_OLD)))
            path_meta_data = os.path.join(path, storage.FILE_NAME_META_DATA_OLD)
            # TODO use the following logger message to debug meta data load process and to avoid maybe repetitive loads
            # if not os.path.exists(path_meta_data):
            #     logger.info("path not found {0}".format(path_meta_data))

        try:
            # print("try to load meta data from {0} for state {1}".format(path_meta_data, self.state))
            tmp_meta = storage.load_data_file(path_meta_data)
        except ValueError as e:
            # if no element which is newly generated log a warning
            # if os.path.exists(os.path.dirname(path)):
            #     logger.debug("Because '{1}' meta data of {0} was not loaded properly.".format(self, e))
            if not path.startswith(constants.RAFCON_TEMP_PATH_STORAGE) and not os.path.exists(os.path.dirname(path)):
                logger.debug("Because '{1}' meta data of {0} was not loaded properly.".format(self, e))
            tmp_meta = {}

        # JSON returns a dict, which must be converted to a Vividict
        tmp_meta = Vividict(tmp_meta)

        if tmp_meta:
            self._parse_for_element_meta_data(tmp_meta)
            # assign the meta data to the state
            self.meta = tmp_meta
            self.meta_signal.emit(MetaSignalMsg("load_meta_data", "all", True))
            return True
        else:
            # print("nothing to parse", tmp_meta)
            return False
Ejemplo n.º 3
0
def test_restore_session(gui):
    from rafcon.core.storage import storage

    # first run
    open_state_machines = {'list_of_hash_path_tab_page_number_tuple': [], 'selected_sm_page_number': None}
    trigger_gui_signals_first_run(gui, open_state_machines)

    gui.restart()

    final_open_state_machines = {'list_of_hash_path_tab_page_number_tuple': [], 'selected_sm_page_number': None}
    trigger_gui_signals_second_run(gui, final_open_state_machines)

    # test selection, page number and path
    # TODO find if there is a proper hash value test
    # TODO find out why core and gui hashes are changing !!! not even fully deterministic !!!
    # TODO find out why dirty flag is once wrong when AUTO_BACKUP is enabled in parallel
    #      (is connected to direct storing while opening)
    assert open_state_machines['selected_sm_page_number'] == final_open_state_machines['selected_sm_page_number']

    final_tuple_list = final_open_state_machines['list_of_hash_path_tab_page_number_tuple']

    assert open_state_machines['selection_state_machine'] == final_open_state_machines['selection_state_machine']

    order_of_pages_to_be_dirty = [False, True, False, False, True, False, True]
    for index, sm_tuple in enumerate(open_state_machines['list_of_hash_path_tab_page_number_tuple']):
        assert index == sm_tuple[PAGE_NUMBER_INDEX]
        if not final_tuple_list[index][CORE_HASH_INDEX] == sm_tuple[CORE_HASH_INDEX]:
            print("CORE hashes page {4} are not equal: {0} != {1}, path: {2} {3}" \
                  "".format(final_tuple_list[index][CORE_HASH_INDEX], sm_tuple[CORE_HASH_INDEX],
                            sm_tuple[PATH_INDEX], sm_tuple[MARKED_DIRTY_INDEX], sm_tuple[PAGE_NUMBER_INDEX]))
            if sm_tuple[PATH_INDEX]:
                sm_file_path = join(sm_tuple[PATH_INDEX], storage.STATEMACHINE_FILE)
                if exists(sm_tuple[PATH_INDEX]) and exists(sm_file_path):
                    print("sm_file_path: ", sm_file_path)
                    print(storage.load_data_file(join(sm_tuple[PATH_INDEX], storage.STATEMACHINE_FILE)))
                else:
                    print("does not exist sm_file_path ", sm_file_path)
            else:
                print("state machine was NOT stored")
        assert final_tuple_list[index][CORE_HASH_INDEX] == sm_tuple[CORE_HASH_INDEX]
        assert final_tuple_list[index][PATH_INDEX] == sm_tuple[PATH_INDEX]
        if not final_tuple_list[index][GUI_HASH_INDEX] == sm_tuple[GUI_HASH_INDEX]:
            print("GUI hashes page {4} are not equal: {0} != {1}, path: {2} {3}" \
                  "".format(final_tuple_list[index][GUI_HASH_INDEX], sm_tuple[GUI_HASH_INDEX],
                            sm_tuple[PATH_INDEX], sm_tuple[MARKED_DIRTY_INDEX], sm_tuple[PAGE_NUMBER_INDEX]))
        assert final_tuple_list[index][GUI_HASH_INDEX] == sm_tuple[GUI_HASH_INDEX]
        assert final_tuple_list[index][PAGE_NUMBER_INDEX] == sm_tuple[PAGE_NUMBER_INDEX]
        # page dirty 0, 4, 6 and not dirty 1, 2, 3, 5
        if not final_tuple_list[index][MARKED_DIRTY_INDEX] == sm_tuple[MARKED_DIRTY_INDEX]:
            print("MARKED DIRTY page {4} is not equal: {0} != {1}, path: {2} {3}" \
                  "".format(final_tuple_list[index][MARKED_DIRTY_INDEX], sm_tuple[MARKED_DIRTY_INDEX],
                            sm_tuple[PATH_INDEX], sm_tuple[MARKED_DIRTY_INDEX], sm_tuple[PAGE_NUMBER_INDEX]))
        assert final_tuple_list[index][MARKED_DIRTY_INDEX] == sm_tuple[MARKED_DIRTY_INDEX]
        if not final_tuple_list[index][MARKED_DIRTY_INDEX] == order_of_pages_to_be_dirty[index]:
            print("Aspect different dirty flag page {4} is not equal: {0} != {1}, path: {2} {3}" \
                  "".format(final_tuple_list[index][MARKED_DIRTY_INDEX], order_of_pages_to_be_dirty[index],
                            sm_tuple[PATH_INDEX], sm_tuple[MARKED_DIRTY_INDEX], sm_tuple[PAGE_NUMBER_INDEX]))
Ejemplo n.º 4
0
    def load_meta_data(self, path=None):
        """Load meta data of state model from the file system

        The meta data of the state model is loaded from the file system and stored in the meta property of the model.
        Existing meta data is removed. Also the meta data of all state elements (data ports, outcomes,
        etc) are loaded, as those stored in the same file as the meta data of the state.

        This is either called on the __init__ of a new state model or if a state model for a container state is created,
        which then calls load_meta_data for all its children.

        :param str path: Optional file system path to the meta data file. If not given, the path will be derived from
            the state's path on the filesystem
        :return: if meta data file was loaded True otherwise False
        :rtype: bool
        """
        if not path:
            path = self.state.file_system_path
        if path is None:
            self.meta = Vividict({})
            return False
        path_meta_data = os.path.join(path, storage.FILE_NAME_META_DATA)
        try:
            tmp_meta = storage.load_data_file(path_meta_data)
        except ValueError as e:
            if not path.startswith(
                    constants.RAFCON_TEMP_PATH_STORAGE) and not os.path.exists(
                        os.path.dirname(path)):
                logger.debug(
                    "Because '{1}' meta data of {0} was not loaded properly.".
                    format(self, e))
            tmp_meta = {}

        # JSON returns a dict, which must be converted to a Vividict
        tmp_meta = Vividict(tmp_meta)

        if tmp_meta:
            self._parse_for_element_meta_data(tmp_meta)
            # assign the meta data to the state
            self.meta = tmp_meta
            self.meta_signal.emit(MetaSignalMsg("load_meta_data", "all", True))
            return True
        else:
            return False
Ejemplo n.º 5
0
def recover_state_machine_from_backup(sm_path,
                                      pid=None,
                                      full_path_dirty_lock=None):

    if full_path_dirty_lock is None:
        full_path_dirty_lock = find_dirty_lock_file_for_state_machine_path(
            sm_path)
    try:
        auto_backup_meta = storage.load_data_file(
            os.path.join(sm_path, FILE_NAME_AUTO_BACKUP))
    except ValueError:
        auto_backup_meta = {}
    last_save_file_system_path = None
    if 'last_saved' in auto_backup_meta and 'file_system_path' in auto_backup_meta[
            'last_saved']:
        last_save_file_system_path = auto_backup_meta['last_saved'][
            'file_system_path']
    elif pid is None:
        pass
    else:  # state machines with old backup format -> backward compatibility check
        reduced_path = sm_path.replace(
            os.path.join(MY_RAFCON_TEMP_PATH, pid, 'runtime_backup'), '')
        if os.path.isdir(reduced_path) and not reduced_path.split(
                os.path.sep)[1] == 'tmp':
            last_save_file_system_path = reduced_path

    # check if already open -> # TODO in future backups has to be integrated better to avoid this
    if last_save_file_system_path is not None \
            and core_singletons.state_machine_manager.is_state_machine_open(last_save_file_system_path):
        logger.info(
            "Backup state machine is already open by other feature {0}".format(
                auto_backup_meta))
        move_dirty_lock_file(full_path_dirty_lock, sm_path)
        return

    state_machine = storage.load_state_machine_from_path(sm_path)

    # move dirty lock file
    move_dirty_lock_file(full_path_dirty_lock, sm_path)

    import rafcon.gui.singleton as gui_singletons
    gui_singletons.state_machine_manager.add_state_machine(state_machine)

    # TODO check this gui wait again
    # avoids that models are not generated and state machines are open without having the root state selected
    import rafcon.gui.utils
    rafcon.gui.utils.wait_for_gui()
    sm_m = gui_singletons.state_machine_manager_model.state_machines[
        state_machine.state_machine_id]
    assert sm_m.state_machine is state_machine

    # correct backup instance and sm-storage-path -> TODO make the add state machine better to reduce complexity, here
    # correct path after add state machine because meta data should be loaded from the backup path
    with sm_m.storage_lock:
        sm_m.state_machine._file_system_path = last_save_file_system_path

        # fix auto backup meta data
        if sm_m.auto_backup:
            if last_save_file_system_path is None:
                del sm_m.auto_backup.meta['last_saved']
            else:
                sm_m.auto_backup.meta['last_saved'][
                    'file_system_path'] = sm_m.state_machine.file_system_path

    # set dirty flag -> TODO think about to make it more reliable still not fully sure that the flag is right
    # backward compatibility check
    if 'last_backup' in auto_backup_meta and 'marked_dirty' in auto_backup_meta[
            'last_backup']:
        state_machine.marked_dirty = auto_backup_meta['last_backup'][
            'marked_dirty']
    else:
        state_machine.marked_dirty = True  # backward compatibility

    return sm_m
Ejemplo n.º 6
0
 def load_and_set_file_content(self, file_system_path):
     """ Implements the abstract method of the ExternalEditor class.
     """
     semantic_data = load_data_file(
         os.path.join(file_system_path, storage.SEMANTIC_DATA_FILE))
     self.model.state.semantic_data = semantic_data
Ejemplo n.º 7
0
def test_restore_session(caplog):
    from rafcon.core.storage import storage

    change_in_gui_config = {
        'AUTO_BACKUP_ENABLED': False,
        'HISTORY_ENABLED': False,
        'SESSION_RESTORE_ENABLED': True
    }

    # first run
    libraries = {
        "ros":
        join(testing_utils.EXAMPLES_PATH, "libraries", "ros_libraries"),
        "turtle_libraries":
        join(testing_utils.EXAMPLES_PATH, "libraries", "turtle_libraries"),
        "generic":
        join(testing_utils.LIBRARY_SM_PATH, "generic")
    }
    testing_utils.run_gui(gui_config=change_in_gui_config, libraries=libraries)
    try:
        open_state_machines = {
            'list_of_hash_path_tab_page_number_tuple': [],
            'selected_sm_page_number': None
        }
        trigger_gui_signals_first_run(open_state_machines)
    finally:
        testing_utils.close_gui(force_quit=False)
        testing_utils.shutdown_environment(caplog=caplog,
                                           expected_warnings=0,
                                           expected_errors=0)

    # second run
    libraries = {
        "ros":
        join(testing_utils.EXAMPLES_PATH, "libraries", "ros_libraries"),
        "turtle_libraries":
        join(testing_utils.EXAMPLES_PATH, "libraries", "turtle_libraries"),
        "generic":
        join(testing_utils.LIBRARY_SM_PATH, "generic")
    }
    testing_utils.run_gui(gui_config=change_in_gui_config, libraries=libraries)

    try:
        final_open_state_machines = {
            'list_of_hash_path_tab_page_number_tuple': [],
            'selected_sm_page_number': None
        }
        trigger_gui_signals_second_run(final_open_state_machines)
    except:
        raise
    finally:
        testing_utils.close_gui()

    print(open_state_machines)
    print(final_open_state_machines)

    # test selection, page number and path
    # TODO find if there is a proper hash value test
    # TODO find out why core and gui hashes are changing !!! not even fully deterministic !!!
    # TODO find out why dirty flag is once wrong when AUTO_BACKUP is enabled in parallel
    #      (is connected to direct storing while opening)
    assert open_state_machines[
        'selected_sm_page_number'] == final_open_state_machines[
            'selected_sm_page_number']

    final_tuple_list = final_open_state_machines[
        'list_of_hash_path_tab_page_number_tuple']

    assert open_state_machines[
        'selection_state_machine'] == final_open_state_machines[
            'selection_state_machine']

    order_of_pages_to_be_dirty = [False, True, False, False, True, False, True]
    for index, sm_tuple in enumerate(
            open_state_machines['list_of_hash_path_tab_page_number_tuple']):
        assert index == sm_tuple[PAGE_NUMBER_INDEX]
        if not final_tuple_list[index][CORE_HASH_INDEX] == sm_tuple[
                CORE_HASH_INDEX]:
            print("CORE hashes page {4} are not equal: {0} != {1}, path: {2} {3}" \
                  "".format(final_tuple_list[index][CORE_HASH_INDEX], sm_tuple[CORE_HASH_INDEX],
                            sm_tuple[PATH_INDEX], sm_tuple[MARKED_DIRTY_INDEX], sm_tuple[PAGE_NUMBER_INDEX]))
            if sm_tuple[PATH_INDEX]:
                sm_file_path = join(sm_tuple[PATH_INDEX],
                                    storage.STATEMACHINE_FILE)
                if exists(sm_tuple[PATH_INDEX]) and exists(sm_file_path):
                    print("sm_file_path: ", sm_file_path)
                    print(
                        storage.load_data_file(
                            join(sm_tuple[PATH_INDEX],
                                 storage.STATEMACHINE_FILE)))
                else:
                    print("does not exist sm_file_path ", sm_file_path)
            else:
                print("state machine was NOT stored")
        assert final_tuple_list[index][CORE_HASH_INDEX] == sm_tuple[
            CORE_HASH_INDEX]
        assert final_tuple_list[index][PATH_INDEX] == sm_tuple[PATH_INDEX]
        if not final_tuple_list[index][GUI_HASH_INDEX] == sm_tuple[
                GUI_HASH_INDEX]:
            print("GUI hashes page {4} are not equal: {0} != {1}, path: {2} {3}" \
                  "".format(final_tuple_list[index][GUI_HASH_INDEX], sm_tuple[GUI_HASH_INDEX],
                            sm_tuple[PATH_INDEX], sm_tuple[MARKED_DIRTY_INDEX], sm_tuple[PAGE_NUMBER_INDEX]))
        assert final_tuple_list[index][GUI_HASH_INDEX] == sm_tuple[
            GUI_HASH_INDEX]
        assert final_tuple_list[index][PAGE_NUMBER_INDEX] == sm_tuple[
            PAGE_NUMBER_INDEX]
        # page dirty 0, 4, 6 and not dirty 1, 2, 3, 5
        if not final_tuple_list[index][MARKED_DIRTY_INDEX] == sm_tuple[
                MARKED_DIRTY_INDEX]:
            print("MARKED DIRTY page {4} is not equal: {0} != {1}, path: {2} {3}" \
                  "".format(final_tuple_list[index][MARKED_DIRTY_INDEX], sm_tuple[MARKED_DIRTY_INDEX],
                            sm_tuple[PATH_INDEX], sm_tuple[MARKED_DIRTY_INDEX], sm_tuple[PAGE_NUMBER_INDEX]))
        assert final_tuple_list[index][MARKED_DIRTY_INDEX] == sm_tuple[
            MARKED_DIRTY_INDEX]
        if not final_tuple_list[index][
                MARKED_DIRTY_INDEX] == order_of_pages_to_be_dirty[index]:
            print("Aspect different dirty flag page {4} is not equal: {0} != {1}, path: {2} {3}" \
                  "".format(final_tuple_list[index][MARKED_DIRTY_INDEX], order_of_pages_to_be_dirty[index],
                            sm_tuple[PATH_INDEX], sm_tuple[MARKED_DIRTY_INDEX], sm_tuple[PAGE_NUMBER_INDEX]))
        # TODO check to put here an assert, too, -> maybe the implementation of this check is bad (because tabs look OK)

    testing_utils.shutdown_environment(caplog=caplog,
                                       expected_warnings=0,
                                       expected_errors=0)