Beispiel #1
0
 def tearDown(self):
     SQLManager.instance().get_session().close()
     SQLManager.instance().drop_all()
     # PathManager.dir_content_remove("outdir")
     shutil.rmtree("outdir", ignore_errors=True)
     OptionManager._drop()
     SQLManager._drop()
Beispiel #2
0
    def run(self):
        """
        Get the dag then execute it.

        The database is setUp here if workflow side models have not been created yet.

        The dag is taken thanks to the :meth:`~.wopmars.framework.parsing.Parser.Parser.parse` method of the parser. And then pruned by the :meth:`~.wopmars.framework.management.WorkflowManager.WorkflowManager.get_dag_to_exec` method
        which will set the right DAG to be executed.
        Then, :meth:`~.wopmars.framework.management.WorkflowManager.WorkflowManager.execute_from` is called with no argument to get the origin nodes.
        """

        # This create_all is supposed to only create workflow-management side models (called "wom_*")
        SQLManager.instance().create_all()

        # if OptionManager.instance()["--cleanup-metadata"]:
        #     Logger.instance().info("Deleting WoPMaRS history...")
        #     SQLManager.instance().drop_table_content_list(SQLManager.wopmars_history_tables)

        # The following lines allow to create types 'input' and 'output' in the db if they don't exist.
        self.__session.get_or_create(TypeInputOrOutput,
                                     defaults={"is_input": True},
                                     is_input=True)
        self.__session.get_or_create(TypeInputOrOutput,
                                     defaults={"is_input": False},
                                     is_input=False)
        self.__session.commit()
        # Get the DAG representing the whole workflow
        self.__dag_tools = self.__parser.parse()
        # Build the DAG which is willing to be executed according
        self.get_dag_to_exec()
        # Aitor has removed this command that removed everything before forceall
        # if OptionManager.instance()["--forceall"] and not OptionManager.instance()["--dry-run"]:
        #     self.erase_output()
        # Start the execution at the root nodes
        self.execute_from()
 def tearDown(self):
     SQLManager.instance().get_session().close()
     SQLManager.instance().drop_all()
     shutil.rmtree(os.path.join(self.test_path, "outdir"),
                   ignore_errors=True)
     OptionManager._drop()
     SQLManager._drop()
Beispiel #4
0
    def setUp(self):
        OptionManager.initial_test_setup()  # Set tests arguments
        SQLManager.instance().create_all()  # Create database with tables

        [
            SQLManager.instance().get_session().add(
                FooBase(name="foo " + str(i))) for i in range(10000)
        ]
        SQLManager.instance().get_session().commit()
Beispiel #5
0
    def setUp(self):

        OptionManager.initial_test_setup()  # Set tests arguments
        SQLManager.instance().create_all()  # Create database with tables
        self.test_path = PathManager.get_test_path()

        self.__s_path_to_example_definition_file_finishing = os.path.join(
            self.test_path, "resource/wopfile/example_def_file1.yml")
        self.__s_path_to_example_definition_file_that_end_with_error = os.path.join(
            self.test_path, "resource/wopfile/example_def_file5_never_ready.yml")

        self.__workflow_manager = WorkflowManager()
Beispiel #6
0
 def setUp(self):
     OptionManager.initial_test_setup()  # Set tests arguments
     SQLManager.instance().create_all()  # Create database with tables
     session = SQLManager.instance().get_session()
     session.get_or_create(TypeInputOrOutput,
                           defaults={"is_input": True},
                           is_input=True)
     session.get_or_create(TypeInputOrOutput,
                           defaults={"is_input": False},
                           is_input=False)
     session.commit()
     self.__test_path = PathManager.get_test_path()
     # self.__test_path = PathManager.get_package_path()
     self.__parser = Parser()
Beispiel #7
0
    def setUp(self):

        OptionManager.initial_test_setup()  # Set tests arguments
        SQLManager.instance().create_all()  # Create database with tables

        self.__local_session = SQLManager.instance().get_session()

        SQLManager()
        self.__t1 = ConcurrentCommitingThread()
        self.__t2 = ConcurrentCommitingThread()
        self.__t3 = ConcurrentCommitingThread()

        self.__t4 = ConcurrentRollBackingThread()
        self.__t5 = ConcurrentRollBackingThread()
        self.__t6 = ConcurrentRollBackingThread()
Beispiel #8
0
    def setUp(self):

        OptionManager.initial_test_setup()  # Set tests arguments
        SQLManager.instance().create_all()  # Create database with tables

        session = SQLManager.instance().get_session()
        session.get_or_create(TypeInputOrOutput, defaults={"is_input": True}, is_input=True)
        session.get_or_create(TypeInputOrOutput, defaults={"is_input": False}, is_input=False)
        session.commit()
        self.__session = SQLManager.instance().get_session()
        self.__reader = Reader()

        self.__testdir_path = PathManager.get_test_path()

        # The good -------------------------------:

        self.__example_def_file1_path = os.path.join(self.__testdir_path, "resource/wopfile/example_def_file1.yml")
        self.__example_def_file3_path = os.path.join(self.__testdir_path, "resource/wopfile/example_def_file3.yml")

        # The ugly (malformed file) --------------------:

        self.__s_example_definition_file_duplicate_rule = os.path.join(self.__testdir_path, "resource/wopfile/example_def_file_duplicate_rule.yml")

        self.__list_f_to_exception_init = [
            os.path.join(self.__testdir_path, s_path) for s_path in [
                "resource/wopfile/example_def_file_wrong_yaml.yml",
                "resource/wopfile/example_def_file_duplicate_rule.yml",
                "resource/wopfile/example_def_file_wrong_grammar.yml",
                "resource/wopfile/example_def_file_wrong_grammar2.yml",
                "resource/wopfile/example_def_file_wrong_grammar3.yml",
                "resource/wopfile/example_def_file_wrong_grammar4.yml"
                ]
        ]

        # The bad (invalid file) ----------------------:

        self.__list_s_to_exception_read = [
            os.path.join(self.__testdir_path, s_path) for s_path in [
                "resource/wopfile/example_def_file1.yml",
                "resource/wopfile/example_def_file_wrong_content2.yml",
                "resource/wopfile/example_def_file_wrong_content3.yml",
                "resource/wopfile/example_def_file_wrong_content4.yml",
                "resource/wopfile/example_def_file_wrong_content5.yml",
                "resource/wopfile/example_def_file_wrong_class_name.yml",
                "resource/wopfile/example_def_file_wrong_rule.yml",
            ]
        ]
Beispiel #9
0
    def run(self):
        thread_session = SQLManager.instance().get_session()
        for i in range(1000):
            foo = FooBase(name="string " + str(i))
            thread_session.add(foo)
        thread_session.rollback()

        thread_session.close()
Beispiel #10
0
    def is_this_tool_wrapper_already_executed(tool_wrapper):
        """
        Check if this tool wrapper is already executed.

        The conditions are:
            - The tool_wrapper outputs exist
            - The tool_wrapper outputs are more recent than inputs
            - If tool_wrapper exist in the database, then it checks if there are the same parameter values

        :param tool_wrapper: The tool_wrapper to be tested
        :type tool_wrapper: :class:`~.wopmars.models.ToolWrapper.ToolWrapper`
        """
        session = SQLManager.instance().get_session()

        # Get latest tool_wrapper
        # same_rule_list = session.query(ToolWrapper).filter(ToolWrapper.tool_python_path == tool_wrapper.tool_python_path)\
        #     .filter(ToolWrapper.execution_id != tool_wrapper.execution_id).all()
        # i = 0
        # while i < len(same_rule_list):
        #     same = False
        #     # two tool_wrapper are equals if they have the same parameters, the same file names and path
        #     # and the same table names and models
        #     if same_rule_list[i] == tool_wrapper and \
        #             same_rule_list[i].does_output_exist() and \
        #             same_rule_list[i].is_output_more_recent_than_input():
        #             same = True
        #     if not same:
        #         del same_rule_list[i]
        #     else:
        #         i += 1

        # # The elements of the list have been removed if none fit the conditions
        # return bool(same_rule_list)

        # return tool_wrapper == tool_wrapper_old \
        #        and tool_wrapper_old.does_output_exist() \
        #        and tool_wrapper_old.is_output_more_recent_than_input()

        is_already_executed = False  # Default, not executed

        # Check if output of tool_wrapper exist and output is more recent than input
        is_already_executed = tool_wrapper.output_file_exists() and tool_wrapper.output_table_exists() \
                           and tool_wrapper.is_output_more_recent_than_input()

        tool_wrapper_old = session.query(ToolWrapper).filter(ToolWrapper.tool_python_path == tool_wrapper.tool_python_path)\
            .filter(ToolWrapper.execution_id != tool_wrapper.execution_id)\
            .order_by(ToolWrapper.id.desc()).first()

        if not (
                tool_wrapper_old is None
        ):  # If tool_wrapper exists in the database, check if same parameter values

            is_already_executed = is_already_executed and (tool_wrapper
                                                           == tool_wrapper_old)

        return is_already_executed
    def get_execution_tables():
        """
        Return all the TableInputOutputInformation objects found in model TableInputOutputInformation.

        :return: ResultSet TableInputOutputInformation objects
        """
        session = SQLManager.instance().get_session()
        execution_id = session.query(func.max(ToolWrapper.execution_id))
        return session.query(TableInputOutputInformation).filter(
            TableInputOutputInformation.toolwrapper_id == ToolWrapper.id
        ).filter(ToolWrapper.execution_id == execution_id).all()
Beispiel #12
0
    def setUp(self):

        self.test_path = PathManager.get_test_path()
        OptionManager.initial_test_setup()  # Set tests arguments
        SQLManager.instance().create_all()  # Create database with tables

        self.__local_session = SQLManager.instance().get_session()
        try:
            for i in range(10):
                self.__local_session.add(FooBase(name="testIODB " + str(i)))
            self.__local_session.commit()
        except Exception as e:
            self.__local_session.rollback()
            self.__local_session.close()
            raise e

        self.__io_base_existing = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        self.__io_base_existing.set_table(FooBase)
        self.__io_base_existing2 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        self.__io_base_existing2.set_table(FooBase)
        self.__io_base_existing3 = TableInputOutputInformation(model_py_path="FooBase2", table_key="FooBase2", table_name="FooBase2")
        self.__io_base_existing3.set_table(FooBase2)
    def set_tables_properties(tables):
        """
        Import the models of the current execution and then associate models with TableInputOutputInformation objects.

        :param tables: the TableInputOutputInformation which need their table properties to be set.
        :type tables: ResultSet(TableInputOutputInformation)
        """
        # import models for avoid references errors between models when dealing with them
        TableInputOutputInformation.import_models(
            set([t.model_py_path for t in tables]))

        for table in tables:
            # keep track of the models used in static variable of TableInputOutputInformation
            TableInputOutputInformation.tablemodelnames.add(
                table.model_py_path)
            # Associate model with the TableInputOutputInformation object
            mod = importlib.import_module(table.model_py_path)
            table_model = eval("mod." + table.model_py_path.split(".")[-1])
            table.set_table(table_model)
            # keep track of table names used in static variable of TableInputOutputInformation
            TableInputOutputInformation.tablenames.add(
                table_model.__tablename__)
            SQLManager.instance().get_session().add(table)
Beispiel #14
0
 def test_clear_history(self):
     cmd_line = [
         "python", "-D", self.__db_url, "-w", self.__example_def_file1,
         "-v", "-d", self.test_path
     ]
     cmd_line_clear_history = [
         "python", "-D", self.__db_url, "-w", self.__example_def_file1,
         "-v", "-d", self.test_path, "--cleanup-metadata"
     ]
     with self.assertRaises(SystemExit):
         WopMars().run(cmd_line)
     with self.assertRaises(SystemExit):
         WopMars().run(cmd_line_clear_history)
     session = SQLManager.instance().get_session()
     self.assertEqual(session.query(Execution).count(), 0)
Beispiel #15
0
    def test_init(self):
        try:
            my_dag = DAG(self.__set_tool)

            dag_from_base = DAG(set(SQLManager.instance().get_session().query(ToolWrapper).all()))
            self.assertEqual(my_dag, dag_from_base)

            # Verifying that the nodes are correctly sorted
            self.assertTrue(self.__toolwrapper_fourth in my_dag.successors(self.__toolwrapper_third))
            self.assertTrue(self.__toolwrapper_fourth in my_dag.successors(self.__toolwrapper_second))
            self.assertTrue(self.__toolwrapper_second in my_dag.successors(self.__toolwrapper_first))
            self.assertTrue(self.__toolwrapper_third in my_dag.successors(self.__toolwrapper_first))
        except:
            # beware, if a test_bak inside the try block fails, 2 exceptions are raised
            raise AssertionError('Should not raise exception')
Beispiel #16
0
 def create_triggers(cls):
     """
     Create an INSERT, UPDATE, DELETE trigger on the models created by the user in order to store the modifications mtime_epoch_millis.
     """
     stmt_list = ["INSERT", "UPDATE", "DELETE"]
     for user_table_name in Base.metadata.tables:
         if user_table_name[:4] != "wom_":
             for statement in stmt_list:
                 data={"statement": str(statement), "user_table_name": user_table_name, "wom_table_name": "wom_{}".format(cls.__qualname__)}
                 if SQLManager.instance().__dict__['d_database_config']['db_connection'] == 'sqlite':
                     sql_trigger = "CREATE TRIGGER IF NOT EXISTS {user_table_name}_{statement} " \
                           "AFTER {statement} ON {user_table_name} BEGIN UPDATE {wom_table_name} " \
                           "SET mtime_epoch_millis = CAST((julianday('now') - 2440587.5)*86400000 AS INTEGER), " \
                           "mtime_human = datetime('now', 'localtime') " \
                                   "WHERE table_name = '{user_table_name}'; END;".format(**data)
                 # elif SQLManager.instance().__dict__['d_database_config']['db_connection'] == 'mysql':
                 #     sql_trigger = "CREATE TRIGGER IF NOT EXISTS {table_key}_{statement} AFTER {statement} " \
                 #       "ON {table_key} for each row UPDATE wom_table_modification_time SET " \
                 #                   "mtime_epoch_millis = ROUND(UNIX_TIMESTAMP(CURTIME(4)) * 1000) " \
                 #       "WHERE table_key = '{table_key}';".format(**data)
                 #     obj_ddl = DDL(sql_trigger)
                 #     SQLManager.instance().create_trigger(Base.metadata.tables[table_key], obj_ddl)
                 # elif SQLManager.instance().__dict__['d_database_config']['db_connection'] == 'postgresql':
                 #     sql_trigger = """
                 #         CREATE OR REPLACE FUNCTION {table_key}_{statement}() RETURNS TRIGGER AS ${table_key}_{statement}$
                 #         BEGIN
                 #         UPDATE wom_table_modification_time SET mtime_epoch_millis = extract(epoch from now())*1000 WHERE table_key = '{table_key}';
                 #         RETURN NULL; -- result is ignored since this is an AFTER trigger
                 #         END;
                 #         ${table_key}_{statement}$ LANGUAGE plpgsql;
                 #         DROP TRIGGER IF EXISTS {table_key}_{statement} ON "{table_key}";
                 #         CREATE TRIGGER {table_key}_{statement} AFTER INSERT ON "{table_key}"
                 #         FOR EACH ROW EXECUTE PROCEDURE {table_key}_{statement}();
                 #         """.format(**data)
                 obj_ddl = DDL(sql_trigger)
                 SQLManager.instance().create_trigger(Base.metadata.tables[user_table_name], obj_ddl)
    def __eq__(self, other):
        """
        Two TableInputOutputInformation object are equals if their table attributes belongs to the same class and if the associated table
        has the same content

        :param other: TableInputOutputInformation
        :return: boolean: True if the table attributes are the same, False if not
        """
        session = SQLManager.instance().get_session()
        if self.model_py_path != other.model_py_path or self.table_key != other.table_key:
            return False
        try:
            self_results = set(session.query(self.__table).all())
            other_results = set(session.query(other.get_table()).all())
            if self_results != other_results:
                return False
        except Exception as e:
            session.rollback()
            raise e
        return True
Beispiel #18
0
 def __init__(self):
     """
     The parser will give the DAG which will be executed.
     The queue_exec is the Thread pool. It will contains the tool threads that will wait for being executed. Each tool
     should appear only once in the queue.
     The list_queue_buffer will be filled with the tool threads that the WorkflowManager couldn't execute.
     The count_exec is a counter that keep trace of the number of tools that are currently executed.
     The dag_tools will contain the dag representing the workflow.
     The dag_to_exec is basically the same dag than dag_tools or a subgraph depending on the options --since or --until
     given by the user.
     The session is used to get back the session without calling again SQLManager.
     """
     self.__parser = Parser()
     self.__queue_exec = UniqueQueue()
     self.__list_queue_buffer = []
     self.__count_exec = 0
     self.__dag_tools = None
     self.__dag_to_exec = None
     self.__already_runned = set()
     self.__session = SQLManager.instance().get_session()
Beispiel #19
0
    def test_are_inputs_ready(self):
        self.assertTrue(self.__toolwrapper_ready.are_inputs_ready())
        self.assertFalse(self.__toolwrapper_not_ready.are_inputs_ready())

        SQLManager.instance().get_session().add_all([FooBase(name="test_bak " + str(i)) for i in range(5)])
        SQLManager.instance().get_session().commit()

        t1 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        t1.set_table(FooBase)
        t1.model_declarative_meta = FooBase
        t1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        toolwrapper_ready2 = FooWrapper2(rule_name="rule2")
        toolwrapper_ready2.relation_toolwrapper_to_tableioinfo.append(t1)
        self.assertTrue(toolwrapper_ready2.are_inputs_ready())
        # this tests does not work with mysql and postgresql
        if not SQLManager.instance().engine.url.drivername in ['mysql', 'postgresql']:
            SQLManager.instance().drop(FooBase.__tablename__)
            self.assertFalse(toolwrapper_ready2.are_inputs_ready())
    def is_ready(self):
        """
        A TableInputOutputInformation object is ready if its table exists and contains entries.

        :return: bool if the table is ready
        """
        session = SQLManager.instance().get_session()
        try:
            results = session.query(self.__table).first()
            if results is None:
                Logger.instance().debug("The table " + self.table_key +
                                        " is empty.")
                return False
        except OperationalError as e:
            Logger.instance().debug("The table " + self.__table.__tablename__ +
                                    " doesn't exist.")
            return False
        except Exception as e:
            session.rollback()
            raise e
            # toodo LG twthread
        return True
Beispiel #21
0
    def get_set_toolwrappers():
        """
        Ask the database for toolwrappers of the current execution.

        The current execution is defined as the one with the highest id (it is auto_incrementing)

        :return: Set([ToolWrapper]) the set of toolwrappers of the current execution.
        """
        session = SQLManager.instance().get_session()
        set_toolwrappers = set()
        try:
            # query asking the db for the highest execution id
            execution_id = session.query(func.max(ToolWrapper.execution_id))
            Logger.instance().debug(
                "Getting toolwrappers of the current execution. id = " +
                str(execution_id.one()[0]))
            set_toolwrappers = set(
                session.query(ToolWrapper).filter(
                    ToolWrapper.execution_id == execution_id).all())
        except NoResultFound as e:
            raise e
        return set_toolwrappers
Beispiel #22
0
    def iterate_wopfile_yml_dic_and_insert_rules_in_db(self, wopfile_path):
        """
        Reads the file given and insert the rules of the workflow in the database.

        The definition file is supposed to be properly formed. The validation of the content of the definition is done
        during the instanciation of the tools.

        :param: s_definition_file: String containing the path to the definition file.
        :type wopfile_path: str
        :raise: WopmarsException: The content is not validated
        """
        self.load_wopfile_as_yml_dic(wopfile_path)

        session = SQLManager.instance().get_session()

        # The dict_workflow_definition is assumed to be well formed
        try:
            # The same execution entry for the whole workflow-related database entries.
            time_unix_ms, time_human = get_current_time()
            execution = Execution(started_at=time_human)
            # get the types database entries that should have been created previously
            input_entry = session.query(TypeInputOrOutput).filter(
                TypeInputOrOutput.is_input == True).one()
            output_entry = session.query(TypeInputOrOutput).filter(
                TypeInputOrOutput.is_input == False).one()
            tool_wrapper_set = set()
            # Encounter a rule block
            for yml_key_level1 in self.__wopfile_yml_dict:
                tool_wrapper_py_path = None
                # the is_input of the rule is extracted after the "rule" keyword. There shouldn't be a ":" but it costs nothing.
                rule_name_str = yml_key_level1.split()[-1].strip(":")
                Logger.instance().debug(
                    "Encounter rule " + rule_name_str + ": \n" + str(
                        DictUtils.pretty_repr(
                            self.__wopfile_yml_dict[yml_key_level1])))
                # The dict of "input"s, "output"s and "params" is re-initialized for each tool wrapper
                tool_wrapper_inst_dic = dict(dict_input={
                    "file": {},
                    "table": {}
                },
                                             dict_params={},
                                             dict_output={
                                                 "file": {},
                                                 "table": {}
                                             })
                for yml_key_level2 in self.__wopfile_yml_dict[yml_key_level1]:
                    # key_second_step is supposed to be "tool", "input", "output" or "params"
                    # if type(self.__wopfile_yml_dict[rule_header][yml_key_level_2nd]) == dict:
                    if yml_key_level2 in {"input", "output", "params"}:
                        # if it is a dict, then inputs, outputs or params are coming
                        for yml_key_level3 in self.__wopfile_yml_dict[
                                yml_key_level1][yml_key_level2]:
                            if yml_key_level2 == "params":
                                # yml_key = yml_key_level3
                                value = self.__wopfile_yml_dict[
                                    yml_key_level1][yml_key_level2][
                                        yml_key_level3]
                                option_inst = Option(name=yml_key_level3,
                                                     value=value)
                                tool_wrapper_inst_dic["dict_params"][
                                    yml_key_level3] = option_inst
                            else:  # file or table
                                for yml_key_level4 in self.__wopfile_yml_dict[
                                        yml_key_level1][yml_key_level2][
                                            yml_key_level3]:
                                    file_or_table_inst = None
                                    if yml_key_level3 == "file":
                                        # yml_key = yml_key_level4
                                        # str_path_to_file = os.path.join(OptionManager.instance()["--directory"],
                                        #                                 self.__wopfile_yml_dict[rule][
                                        #                                     key_second_step][key_third_step][key])
                                        str_path_to_file = self.__wopfile_yml_dict[
                                            yml_key_level1][yml_key_level2][
                                                yml_key_level3][yml_key_level4]
                                        file_or_table_inst = FileInputOutputInformation(
                                            file_key=yml_key_level4,
                                            path=str_path_to_file)

                                    elif yml_key_level3 == "table":
                                        yml_key = yml_key_level4
                                        modelname = self.__wopfile_yml_dict[
                                            yml_key_level1][yml_key_level2][
                                                yml_key_level3][yml_key]
                                        model_py_path = modelname
                                        table_name = model_py_path.split(
                                            '.')[-1]
                                        file_or_table_inst = TableInputOutputInformation(
                                            model_py_path=model_py_path,
                                            table_key=yml_key_level4,
                                            table_name=table_name)

                                    # all elements of the current rule block are stored in there
                                    # key_second_step is input or output here
                                    # tool_wrapper_inst_dic["dict_" + yml_key_level2][yml_key_level3][yml_key] = obj_created
                                    tool_wrapper_inst_dic["dict_" + yml_key_level2][yml_key_level3][yml_key_level4] \
                                        = file_or_table_inst
                                    Logger.instance().debug("Object " +
                                                            yml_key_level2 +
                                                            " " +
                                                            yml_key_level3 +
                                                            ": " +
                                                            yml_key_level4 +
                                                            " created.")
                    else:
                        # if the step is not a dict, then it is supposed to be the "tool" line
                        tool_wrapper_py_path = self.__wopfile_yml_dict[
                            yml_key_level1][yml_key_level2]
                # At this point, "tool_wrapper_inst_dic" is like this:
                # {
                #     'dict_params': {
                #         'option1': Option('option1', 'valueofoption1')
                #     },
                #     'dict_input': {
                #         'file' : {
                #             'input1': FileInputOutputInformation('input1', 'path/to/input1')
                #         }
                #         'table': {
                #             'table1': TableInputOutputInformation('table1', 'package.of.table1')
                #         }
                #     },
                # }

                # Instantiate the referred class and add it to the set of objects
                tool_wrapper_inst = self.create_tool_wrapper_inst(
                    rule_name_str, tool_wrapper_py_path, tool_wrapper_inst_dic,
                    input_entry, output_entry)
                # Associating a tool_python_path to an execution
                tool_wrapper_inst.relation_toolwrapper_to_execution = execution
                tool_wrapper_set.add(tool_wrapper_inst)
                Logger.instance().debug("Instance tool_python_path: " +
                                        tool_wrapper_py_path + " created.")
                # commit/rollback trick to clean the session - SQLAchemy bug suspected
                session.commit()
                session.rollback()
                # totodo LucG set_table_properties outside the rules loop to take into account all the models at once
                # (error if one tool has a foreign key refering to a table that is not in its I/O put
            TableInputOutputInformation.set_tables_properties(
                TableInputOutputInformation.get_execution_tables())
            session.commit()
            session.rollback()
            # This command is creating the triggers that will update the modification
            TableModificationTime.create_triggers()
            # This create_all will create all models that have been found in the tool_python_path
            SQLManager.instance().create_all()
            session.add_all(tool_wrapper_set)
            # save all operations done so far.
            session.commit()
            for tool_wrapper in tool_wrapper_set:
                tool_wrapper.is_content_respected()

        except NoResultFound as e:
            session.rollback()
            raise WopMarsException(
                "Error while parsing the configuration file. The database has not been setUp Correctly.",
                str(e))
Beispiel #23
0
    def test_read(self):

        self.__reader.iterate_wopfile_yml_dic_and_insert_rules_in_db(self.__example_def_file1_path)
        result = set(self.__session.query(ToolWrapper).all())

        input_entry = TypeInputOrOutput(is_input=True)
        output_entry = TypeInputOrOutput(is_input=False)

        f1 = FileInputOutputInformation(file_key="input1", path="resource/input_files/input_file1.txt")
        f1.relation_file_or_tableioinfo_to_typeio = input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="outdir/output_file1.txt")
        f2.relation_file_or_tableioinfo_to_typeio = output_entry

        f3 = FileInputOutputInformation(file_key="input1", path="outdir/output_file1.txt")
        f3.relation_file_or_tableioinfo_to_typeio = input_entry

        f3bis = FileInputOutputInformation(file_key="input1", path="outdir/output_file1.txt")
        f3bis.relation_file_or_tableioinfo_to_typeio = input_entry

        f4 = FileInputOutputInformation(file_key="output1", path="outdir/output_file2.txt")
        f4.relation_file_or_tableioinfo_to_typeio = output_entry

        f5 = FileInputOutputInformation(file_key="output1", path="outdir/output_file3.txt")
        f5.relation_file_or_tableioinfo_to_typeio = output_entry

        f6 = FileInputOutputInformation(file_key="output2", path="outdir/output_file4.txt")
        f6.relation_file_or_tableioinfo_to_typeio = output_entry

        f7 = FileInputOutputInformation(file_key="input1", path="outdir/output_file3.txt")
        f7.relation_file_or_tableioinfo_to_typeio = input_entry

        f8 = FileInputOutputInformation(file_key="input2", path="outdir/output_file2.txt")
        f8.relation_file_or_tableioinfo_to_typeio = input_entry

        f9 = FileInputOutputInformation(file_key="output1", path="outdir/output_file5.txt")
        f9.relation_file_or_tableioinfo_to_typeio = output_entry

        f10 = FileInputOutputInformation(file_key="input1", path="outdir/output_file4.txt")
        f10.relation_file_or_tableioinfo_to_typeio = input_entry

        f11 = FileInputOutputInformation(file_key="output1", path="outdir/output_file6.txt")
        f11.relation_file_or_tableioinfo_to_typeio = output_entry

        f12 = FileInputOutputInformation(file_key="input1", path="outdir/output_file1.txt")
        f12.relation_file_or_tableioinfo_to_typeio = input_entry

        f13 = FileInputOutputInformation(file_key="input2", path="outdir/output_file5.txt")
        f13.relation_file_or_tableioinfo_to_typeio = input_entry

        f14 = FileInputOutputInformation(file_key="input3", path="outdir/output_file6.txt")
        f14.relation_file_or_tableioinfo_to_typeio = input_entry

        f15 = FileInputOutputInformation(file_key="output1", path="outdir/output_file7.txt")
        f15.relation_file_or_tableioinfo_to_typeio = output_entry

        t1 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        t1.relation_file_or_tableioinfo_to_typeio = output_entry

        t1bis = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        t1bis.relation_file_or_tableioinfo_to_typeio = input_entry

        t2 = TableInputOutputInformation(model_py_path="FooBase2", table_key="FooBase2", table_name="FooBase2")
        t2.relation_file_or_tableioinfo_to_typeio = output_entry

        t2bis = TableInputOutputInformation(model_py_path="FooBase2", table_key="FooBase2", table_name="FooBase2")
        t2bis.relation_file_or_tableioinfo_to_typeio = input_entry

        tw1 = FooWrapper4(rule_name="rule1")
        tw1.relation_toolwrapper_to_fileioinfo.extend([f1, f2])
        tw2 = FooWrapper5(rule_name="rule2")
        tw2.relation_toolwrapper_to_fileioinfo.extend([f3, f4])
        tw2.relation_toolwrapper_to_tableioinfo.extend([t1])
        tw3 = FooWrapper6(rule_name="rule3")
        tw3.relation_toolwrapper_to_fileioinfo.extend([f3bis, f5, f6])
        tw4 = FooWrapper7(rule_name="rule4")
        tw4.relation_toolwrapper_to_tableioinfo.extend([t1bis, t2])
        tw5 = FooWrapper8(rule_name="rule5")
        tw5.relation_toolwrapper_to_fileioinfo.extend([f8, f7, f9])
        tw6 = FooWrapper9(rule_name="rule6")
        tw6.relation_toolwrapper_to_fileioinfo.extend([f10, f11])
        tw6.relation_toolwrapper_to_tableioinfo.extend([t2bis])
        tw7 = FooWrapper10(rule_name="rule7")
        tw7.relation_toolwrapper_to_fileioinfo.extend([f12, f13, f14, f15])

        expected = set([tw1, tw2, tw3, tw4, tw5, tw6, tw7])

        # The good ------------------------------------:
        self.assertTrue((SetUtils.all_elm_of_one_set_in_one_other(result, expected) and
                         SetUtils.all_elm_of_one_set_in_one_other(expected, result)))

        # The bad -------------------------------------:

        # [self.assertRaises(WopMarsException, self.__reader.iterate_wopfile_yml_dic_and_insert_rules_in_db, file) for file in self.__list_s_to_exception_read]

        SQLManager.instance().get_session().rollback()
Beispiel #24
0
    def load_one_toolwrapper(self, s_toolwrapper, s_dict_inputs,
                             s_dict_outputs, s_dict_params):
        """
        Method called when the ``tool`` command is used. It is equivalent to the :meth:`~.wopmars.framework.parsing.Reader.Reader.iterate_wopfile_yml_dic_and_insert_rules_in_db` method but create a workflow
        with only one tool_python_path. The workflow is also stored inside the database.

        :param s_toolwrapper: The is_input of the tool_python_path (will be imported)
        :type s_toolwrapper: str
        :param s_dict_inputs: A string containing the dict of input files
        :type s_dict_inputs: str
        :param s_dict_outputs: A string containing the dict of output files
        :type s_dict_outputs: str
        :param s_dict_params: A string containing the dict of params
        :type s_dict_params: str

        :raise WopMarsException: There is an error while accessing the database
        """
        session = SQLManager.instance().get_session()
        dict_inputs = dict(eval(s_dict_inputs))
        dict_outputs = dict(eval(s_dict_outputs))
        dict_params = dict(eval(s_dict_params))
        try:
            # The same execution entry for the whole workflow-related database entries.
            time_unix_ms, time_human = get_current_time()
            execution = Execution(started_at=time_human)
            # get the types that should have been created previously
            input_entry = session.query(TypeInputOrOutput).filter(
                TypeInputOrOutput.is_input == True).one()
            output_entry = session.query(TypeInputOrOutput).filter(
                TypeInputOrOutput.is_input == False).one()

            Logger.instance().debug("Loading unique tool_python_path " +
                                    s_toolwrapper)
            dict_dict_dict_elm = dict(dict_input={
                "file": {},
                "table": {}
            },
                                      dict_params={},
                                      dict_output={
                                          "file": {},
                                          "table": {}
                                      })
            for type in dict_inputs:
                if type == "file":
                    for s_input in dict_inputs[type]:
                        obj_created = FileInputOutputInformation(
                            file_key=s_input,
                            path=os.path.join(
                                OptionManager.instance()["--directory"],
                                dict_inputs[type][s_input]))
                        dict_dict_dict_elm["dict_input"][type][
                            s_input] = obj_created
                        Logger.instance().debug("Object input file: " +
                                                s_input + " created.")
                elif type == "table":
                    for s_input in dict_inputs[type]:
                        model_py_path = dict_inputs[type][s_input]
                        table_name = model_py_path.split('.')[-1]
                        obj_created = TableInputOutputInformation(
                            model_py_path=model_py_path,
                            table_key=s_input,
                            table_name=table_name)
                        dict_dict_dict_elm["dict_input"][type][
                            s_input] = obj_created
                        Logger.instance().debug("Object input table: " +
                                                s_input + " created.")
            for type in dict_outputs:
                if type == "file":
                    for s_output in dict_outputs[type]:
                        obj_created = FileInputOutputInformation(
                            file_key=s_output,
                            path=dict_outputs[type][s_output])
                        dict_dict_dict_elm["dict_output"]["file"][
                            s_output] = obj_created
                        Logger.instance().debug("Object output file: " +
                                                s_output + " created.")
                elif type == "table":
                    for s_output in dict_outputs[type]:
                        model_py_path = dict_outputs[type][s_output]
                        table_name = model_py_path.split('.')[-1]
                        obj_created = TableInputOutputInformation(
                            model_py_path=model_py_path,
                            table_key=s_output,
                            table_name=table_name)
                        dict_dict_dict_elm["dict_output"]["table"][
                            s_output] = obj_created
                        Logger.instance().debug("Object output table: " +
                                                s_output + " created.")
            for s_param in dict_params:
                obj_created = Option(name=s_param, value=dict_params[s_param])
                dict_dict_dict_elm["dict_params"][s_param] = obj_created
                Logger.instance().debug("Object option: " + s_param +
                                        " created.")

            # Instantiate the refered class
            wrapper_entry = self.create_tool_wrapper_inst(
                "rule_" + s_toolwrapper, s_toolwrapper, dict_dict_dict_elm,
                input_entry, output_entry)
            wrapper_entry.relation_toolwrapper_to_execution = execution
            Logger.instance().debug("Object tool_python_path: " +
                                    s_toolwrapper + " created.")
            session.add(wrapper_entry)
            session.commit()
            session.rollback()
            TableInputOutputInformation.set_tables_properties(
                TableInputOutputInformation.get_execution_tables())
            # commit /rollback trick to clean the session
            # totodo LucG ask lionel est-ce-que tu as deja eu ce problème à ne pas pouvoir faire des queries et des ajouts
            # dans la meme session?
            session.commit()
            session.rollback()
            # if not SQLManager.instance().d_database_config['db_connection'] == 'postgresql':
            # This command will create all the triggers that will create timestamp after modification
            TableModificationTime.create_triggers()
            # This create_all will create all models that have been found in the tool_python_path
            SQLManager.instance().create_all()
            wrapper_entry.is_content_respected()
        except NoResultFound as e:
            session.rollback()
            raise WopMarsException(
                "Error while parsing the configuration file. The database has not been setUp Correctly.",
                str(e))
Beispiel #25
0
    def run_queue(self):
        """
        Call start() method of all elements of the queue.

        The tools inside the queue are taken then their inputs are checked. If they are ready, the tools are started.
        If not, they are put in a buffer list of "not ready tools" or "ready but has not necessary ressources available
        tools".

        The start method is called with a dry argument, if it appears that the input of the ToolWrapper are the same
        than in a previous execution, and that the output are already ready. The dry parameter is set to True and the
        start method will only simulate the execution.

        After that, the code check for the state of the workflow and gather the informations to see if the workflow
        is finished, if it encounter an error or if it is currently running.

        :raises WopMarsException: The workflow encounter a problem and must stop.
        """

        #
        # # toTODO LucG THIS METHOD IS NOT THREAD-SAFE (peut etre que si, à voir)
        #

        ################################################################################################################
        #
        # Main while
        # If no tools have been added to the queue:
        #  - All tools have been executed and the queue is empty, so nothing happens
        #  - There were remaining tools in the queue but they weren't ready, so they are tested again
        #
        ################################################################################################################

        while not self.__queue_exec.empty():
            Logger.instance().debug("Queue size: " +
                                    str(self.__queue_exec.qsize()))
            Logger.instance().debug("Queue content: " + str([
                "rule: " + tt.get_toolwrapper().rule_name + "->" +
                tt.get_toolwrapper().tool_python_path
                for tt in self.__queue_exec.get_queue_tuple()
            ]))

            ############################################################################################################
            #
            # get the first element of the queue to execute
            #
            ############################################################################################################

            tool_wrapper_thread = self.__queue_exec.get()
            tool_wrapper = tool_wrapper_thread.get_toolwrapper()

            Logger.instance().debug("Current rule: " + tool_wrapper.rule_name +
                                    "->" + tool_wrapper.tool_python_path)
            # check if the predecessors of a rule have been already executed: a rule shouldn't be executed if
            # its predecessors have not been executed yet
            if not self.all_predecessors_have_run(tool_wrapper):
                Logger.instance().debug("Predecessors of rule: " +
                                        tool_wrapper.rule_name +
                                        " have not been executed yet.")

            ############################################################################################################
            #
            # Ready for running, either inputs are ready or dry-run mode is enabled
            #
            ############################################################################################################

            elif tool_wrapper.are_inputs_ready() or OptionManager.instance(
            )["--dry-run"]:
                # the state of inputs (table and file) are set in the db here.
                tool_wrapper.set_args_time_and_size(1)
                Logger.instance().debug("ToolWrapper ready: " +
                                        tool_wrapper.tool_python_path)
                dry = False

                ############################################################################################################
                #
                # Will set to dry (ie. will not execute) if all these conditions are true
                # - not in forceall mode
                # - tool already executed previously
                # - some predecessors of this tool wrapper has not been executed
                #
                ############################################################################################################

                # check if the actual execution of the tool_python_path is necessary
                # every predecessors of the tool_python_path have to be executed (or simulated)
                # will not execute and set to dry if all these options

                if not OptionManager.instance(
                )["--forceall"] and not OptionManager.instance(
                )["--touch"]:  # if not in forceall option
                    if self.is_this_tool_wrapper_already_executed(
                            tool_wrapper
                    ):  # this tool wrapper already executed
                        # some predecessors of this tool wrapper has not been executed
                        if not bool([
                                tool_wrapper_predecessor
                                for tool_wrapper_predecessor in
                                self.__dag_to_exec.predecessors(tool_wrapper)
                                if tool_wrapper_predecessor.status !=
                                "EXECUTED" and tool_wrapper_predecessor.status
                                != "ALREADY_EXECUTED"
                        ]):
                            Logger.instance().info(
                                "ToolWrapper: {} -> {} seems to have already been run with same parameters."
                                .format(tool_wrapper.rule_name,
                                        tool_wrapper.tool_python_path))
                            dry = True

                # totodo lucg twthread verification des resources
                tool_wrapper_thread.subscribe(self)
                self.__count_exec += 1
                # totodo lucg twthread methode start
                tool_wrapper_thread.set_dry(dry)
                try:
                    # be careful here: the execution of the toolthreads is recursive meaning that calls to function may
                    # be stacked (run -> notify success -> run(next tool) -> notify success(next tool) -> etc....
                    # totodo lucg twthread methode start
                    tool_wrapper_thread.run()
                except Exception as e:
                    # as mentioned above, there may be recursive calls to this function, so every exception can
                    # pass here multiple times: this attribute is used for recognizing exception that have already been
                    # caught
                    if not hasattr(e, "teb_already_seen"):
                        setattr(e, "teb_already_seen", True)
                        tool_wrapper.set_execution_infos(status="ERROR")
                        self.__session.add(tool_wrapper)
                        self.__session.commit()
                    raise e
            else:
                Logger.instance().debug("ToolWrapper not ready: rule: " +
                                        tool_wrapper.rule_name + " -> " +
                                        str(tool_wrapper.tool_python_path))
                # The buffer contains the ToolWrappers that have inputs which are not ready yet.
                self.__list_queue_buffer.append(tool_wrapper_thread)

        Logger.instance().debug("Buffer: " + str([
            "rule: " + t.get_toolwrapper().rule_name + "->" +
            t.get_toolwrapper().tool_python_path
            for t in self.__list_queue_buffer
        ]))
        Logger.instance().debug("Running rules: " + str(self.__count_exec))

        # There is no more ToolWrapper that are waiting to be executed.
        # Is there some tools that are currently being executed?
        if self.__count_exec == 0:
            # Is there some tools that weren't ready?
            finish_epoch_millis_unix_ms, finish_epoch_millis_datetime = get_current_time(
            )
            if len(self.__list_queue_buffer) == 0:
                # If there is no tool waiting and no tool being executed, the workflow has finished.
                # finished_at = finish_epoch_millis_unix_ms
                # finished_at_strftime = datetime.datetime.fromtimestamp(finished_at/1000).strftime('%Y-%m-%d %H:%M:%S')
                Logger.instance().info(
                    "The workflow has completed. Finished at: {}".format(
                        finish_epoch_millis_datetime))
                self.set_finishing_informations(finish_epoch_millis_datetime,
                                                "FINISHED")
                SQLManager.instance().get_session().close()
                sys.exit(0)
            # uniquement en environnement multiThreadpredece
            elif not self.check_buffer():
                # If there is no tool being executed but there is that are waiting something, the workflow has an issue
                # finished_at = time_unix_ms()
                tw_list = [
                    t.get_toolwrapper() for t in self.__list_queue_buffer
                ]
                if len(tw_list) > 0:
                    input_files_not_ready = tw_list[
                        0].get_input_files_not_ready()
                    self.set_finishing_informations(
                        finish_epoch_millis_datetime, "ERROR")
                    raise WopMarsException(
                        "The workflow has failed.",
                        " The inputs '{}' have failed for this tool '{}'".
                        format(input_files_not_ready[0], tw_list[0].rule_name))
Beispiel #26
0
    def run(argv):
        """
        Entry-point of the program
        """

        # if the command line is malformed, docopt interrupt the software.
        try:
            if argv[1:] == []:  # If not arguments, run the help
                argv.append('-h')
            OptionManager.instance().update(docopt(__doc__, argv=argv[1:]))

        except DocoptExit as SE:
            print("Bad argument in the command line: \n\t" + " ".join(argv) +
                  "\n" + str(SE))
            sys.exit(2)

        try:
            schema_option = Schema({
                '--wopfile':
                Or("Wopfile.yml", str),
                '--database':
                Use(PathManager.check_database_valid_url),
                '-v':
                Or(0, And(int, lambda n: 1 <= n <= 2)),
                '--dot':
                Or(
                    None,
                    And(Use(PathManager.check_valid_path),
                        Use(PathManager.check_pygraphviz))),
                "--log":
                Use(PathManager.check_valid_path),
                # '--printtools': Use(bool),
                "--since":
                Or(None, str),
                "--until":
                Or(None, str),
                "--forceall":
                Use(bool),
                "--dry-run":
                Use(bool),
                "--touch":
                Use(bool),
                "--directory":
                Use(lambda path: pathlib.Path(path).mkdir(parents=True,
                                                          exist_ok=True)),
                "--input":
                Use(DictUtils.str_to_dict),
                "--output":
                Use(DictUtils.str_to_dict),
                "--params":
                Use(DictUtils.str_to_dict),
                "TOOLWRAPPER":
                Or(None, Use(PathManager.is_in_python_path)),
                "tool":
                Use(bool),
                "example":
                Use(bool),
                "--version":
                Use(bool),
                "--cleanup-metadata":
                Use(bool),
            })
            # The option values are validated using schema library
            OptionManager.instance().validate(schema_option)
            os.chdir(OptionManager.instance()["--directory"])

        except SchemaError as schema_msg:
            Logger.instance().debug("\nCommand line Args:" +
                                    str(OptionManager.instance()))
            # regex for the different possible error messages.
            match_open_def = re.match(r"^open\('(.[^\)]+)'\)", str(schema_msg))
            match_dot_def = re.match(r"^check_valid_path\(('.[^\)]+')\)",
                                     str(schema_msg))
            match_wrong_key = re.match(r"^Wrong keys ('.[^\)]+')",
                                       str(schema_msg))
            match_pygraphviz = re.match(r".*dot.*", str(schema_msg))
            print(match_pygraphviz)
            # Check the different regex..
            if match_open_def:
                Logger.instance().error("The file " + match_open_def.group(1) +
                                        " cannot be opened. It may not exist.")
            elif match_dot_def:
                Logger.instance().error("The path " + match_dot_def.group(1) +
                                        " is not valid.")
            elif match_wrong_key:
                # Normally never reach
                Logger.instance().error("The option key " +
                                        match_wrong_key.group(1) +
                                        " is not known.")
            elif match_pygraphviz:
                Logger.instance().error(
                    "The dot file path is not valid or the pygraphviz module is not installed. In the second case, install wopmars with pygraphviz: pip install wopmars[pygraphviz]"
                )
            else:
                # Normally never reach
                Logger.instance().error(
                    "An unknown error has occured. Message: " +
                    str(schema_msg))
            sys.exit(2)

        Logger.instance().debug("\nCommand line Args:" +
                                str(OptionManager.instance()))

        ############################################################################################
        #
        # Print version to stdout and exists
        #
        ############################################################################################

        if OptionManager.instance()["--version"]:
            print("wopmars {}".format(__version__), file=sys.stdout)
            sys.exit(0)

        ############################################################################################
        #
        # Recursively writes quickstart example and exists
        #
        ############################################################################################

        if OptionManager.instance()["example"]:
            # ExampleBuilder().build()

            source_path = os.path.join(PathManager.get_package_path(),
                                       "data/example")
            destination_path = os.path.join("example")

            shutil.rmtree(destination_path, ignore_errors=True)
            shutil.copytree(source_path, destination_path)

            sys.exit(0)

        ############################################################################################
        #
        # Initiates new WorkflowManager instance
        #
        ############################################################################################

        workflow_manager = WorkflowManager()

        ############################################################################################
        #
        # Cleans up non fully terminated executions
        #
        ############################################################################################

        SQLManager.instance().clean_up_unexecuted_tool_wrappers()

        ############################################################################################
        #
        # --cleanup-metadata (clear history and exit)
        #
        ############################################################################################

        if OptionManager.instance()["--cleanup-metadata"]:
            Logger.instance().info("Deleting Wopmars history...")
            # Check if sqlite db path exists
            if pathlib.Path(SQLManager.instance().
                            d_database_config['db_database']).is_file():
                SQLManager.instance().clear_wopmars_history()
            if OptionManager.instance()["--cleanup-metadata"]:
                sys.exit(0)

        try:
            workflow_manager.run()
        except WopMarsException as WE:
            Logger.instance().error(str(WE))
            try:
                timestamp_epoch_millis, timestamp_human = get_current_time()
                Logger.instance().error(
                    "The workflow has encountered an error at: {}".format(
                        timestamp_human))
                workflow_manager.set_finishing_informations(
                    timestamp_human, "ERROR")
            except AttributeError:
                SQLManager.instance().get_session().rollback()
                Logger.instance().error(
                    "The execution has not even begun. No informations will be stored in the database."
                )
            except Exception as e:
                Logger.instance().error(
                    "An error occurred during the rollback of the changement of the database which can be now unstable:"
                    + str(e))
            sys.exit(1)
        except Exception as e:
            Logger.instance().error("An unknown error has occurred:\n" +
                                    str(e))
            sys.exit(1)
Beispiel #27
0
    def setUp(self):

        self.test_path = PathManager.get_test_path()
        OptionManager.initial_test_setup()  # Set tests arguments
        SQLManager.instance().create_all()  # Create database with tables

        set_tw_to_add = set()
        self.__session = SQLManager.instance().get_session()

        self.input_entry = TypeInputOrOutput(is_input=True)
        self.output_entry = TypeInputOrOutput(is_input=False)

        # Toolwrappers for __eq__ test_bak
        opt1 = Option(name="param1", value="1")

        f1 = FileInputOutputInformation(file_key="input1", path="file1.txt")
        f1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="file2.txt")
        f2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        self.__toolwrapper1 = ToolWrapper(rule_name="rule1")
        self.__toolwrapper1.relation_toolwrapper_to_fileioinfo.extend([f1, f2])
        self.__toolwrapper1.relation_toolwrapper_to_option.append(opt1)

        opt1 = Option(name="param1", value="1")

        f1 = FileInputOutputInformation(file_key="input1", path="file1.txt")
        f1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="file2.txt")
        f2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        self.__toolwrapper2 = ToolWrapper(rule_name="rule2")
        self.__toolwrapper2.relation_toolwrapper_to_fileioinfo.extend([f1, f2])
        self.__toolwrapper2.relation_toolwrapper_to_option.append(opt1)

        opt1 = Option(name="param2", value="2")

        f1 = FileInputOutputInformation(file_key="input1", path="file1.txt")
        f1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="file2.txt")
        f2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        self.__toolwrapper3 = ToolWrapper(rule_name="rule3")
        self.__toolwrapper3.relation_toolwrapper_to_fileioinfo.extend([f1, f2])
        self.__toolwrapper3.relation_toolwrapper_to_option.append(opt1)

        # ToolWrappers for content_respected
        opt1 = Option(name="param1", value="2")

        f1 = FileInputOutputInformation(file_key="input1", path="file1.txt")
        f1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="file2.txt")
        f2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        t1 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        t1.set_table(FooBase)
        t1.model_declarative_meta = FooBase
        t1.table = t1
        t1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        t2 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        t2.set_table(FooBase)
        t2.model_declarative_meta = FooBase
        t2.table = t2
        t2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        self.__foowrapper_right_content = FooWrapper3(rule_name="rule1")
        self.__foowrapper_right_content.relation_toolwrapper_to_fileioinfo.extend([f1, f2])
        self.__foowrapper_right_content.relation_toolwrapper_to_tableioinfo.extend([t1, t2])
        self.__foowrapper_right_content.relation_toolwrapper_to_option.append(opt1)

        opt1 = Option(name="param1", value="String")

        f1 = FileInputOutputInformation(file_key="input1", path="file1.txt")
        f1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="file2.txt")
        f2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        t1 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        # t1.set_table(FooBase)
        t1.model_declarative_meta = FooBase
        t1.table = t1

        t2 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        # t2.set_table(FooBase)
        t2.model_declarative_meta = FooBase
        t2.table = t2

        self.__foowrapper_wrong_content1 = FooWrapper3(rule_name="rule2")
        self.__foowrapper_wrong_content1.relation_toolwrapper_to_fileioinfo.extend([f1, f2])
        self.__foowrapper_wrong_content1.relation_toolwrapper_to_tableioinfo.extend([t1, t2])
        self.__foowrapper_wrong_content1.relation_toolwrapper_to_option.append(opt1)

        opt1 = Option(name="param2", value="2")

        f1 = FileInputOutputInformation(file_key="input1", path="file1.txt")
        f1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="file2.txt")
        f2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        f3 = FileInputOutputInformation(file_key="input2", path="file2.txt")
        f3.relation_file_or_tableioinfo_to_typeio = self.input_entry

        t1 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        # t1.set_table(FooBase)
        t1.model_declarative_meta = FooBase
        t1.table = t1

        t2 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        # t2.set_table(FooBase)
        t2.model_declarative_meta = FooBase
        t2.table = t2

        self.__foowrapper_wrong_content2 = FooWrapper3(rule_name="rule3")
        self.__foowrapper_wrong_content2.relation_toolwrapper_to_fileioinfo.extend([f1, f2, f3])
        self.__foowrapper_wrong_content2.relation_toolwrapper_to_tableioinfo.extend([t1, t2])
        self.__foowrapper_wrong_content2.relation_toolwrapper_to_option.append(opt1)

        opt1 = Option(name="param2", value="2")

        f1 = FileInputOutputInformation(file_key="input1", path="file1.txt")
        f1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="file2.txt")
        f2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        t1 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        # t1.set_table(FooBase)
        t1.model_declarative_meta = FooBase
        t1.table = t1

        t2 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        # t2.set_table(FooBase)
        t2.model_declarative_meta = FooBase
        t2.table = t2

        self.__foowrapper_wrong_content3 = FooWrapper3(rule_name="rule3")
        self.__foowrapper_wrong_content3.relation_toolwrapper_to_fileioinfo.extend([f1, f2])
        self.__foowrapper_wrong_content3.relation_toolwrapper_to_tableioinfo.extend([t1, t2])
        self.__foowrapper_wrong_content3.relation_toolwrapper_to_option.append(opt1)

        opt1 = Option(name="param1", value="String")

        f1 = FileInputOutputInformation(file_key="input1", path="file1.txt")
        f1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="file2.txt")
        f2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        t1 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        # t1.set_table(FooBase)
        t1.model_declarative_meta = FooBase
        t1.table = t1

        t2 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        # t2.set_table(FooBase)
        t2.model_declarative_meta = FooBase
        t2.table = t2

        self.__foowrapper_wrong_content4 = FooWrapper3(rule_name="rule3")
        self.__foowrapper_wrong_content4.relation_toolwrapper_to_fileioinfo.extend([f1, f2])
        self.__foowrapper_wrong_content4.relation_toolwrapper_to_tableioinfo.extend([t1, t2])
        self.__foowrapper_wrong_content4.relation_toolwrapper_to_option.append(opt1)

        f1 = FileInputOutputInformation(file_key="input1", path="file1.txt")
        f1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="file2.txt")
        f2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        t1 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        # t1.set_table(FooBase)
        t1.model_declarative_meta = FooBase
        t1.table = t1

        t2 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
        # t2.set_table(FooBase)
        t2.model_declarative_meta = FooBase
        t2.table = t2

        self.__foowrapper_wrong_content5 = FooWrapper3(rule_name="rule3")
        self.__foowrapper_wrong_content5.relation_toolwrapper_to_fileioinfo.extend([f1, f2])
        self.__foowrapper_wrong_content5.relation_toolwrapper_to_tableioinfo.extend([t1, t2])

        # TooLWrappers for follows

        f1 = FileInputOutputInformation(file_key="input1", path="file1.txt")
        f1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="file2.txt")
        f2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        self.__toolwrapper_first = FooWrapper2(rule_name="rule1")
        self.__toolwrapper_first.relation_toolwrapper_to_fileioinfo.extend([f1, f2])

        f1 = FileInputOutputInformation(file_key="input1", path="file2.txt")
        f1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="file3.txt")
        f2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        self.__toolwrapper_second = FooWrapper2(rule_name="rule2")
        self.__toolwrapper_second.relation_toolwrapper_to_fileioinfo.extend([f1, f2])

        # ToolWrappers for are_input_ready

        s_path_to_example_file_that_exists = os.path.join(self.test_path, "resource/input_files/input_file1.txt")

        f1 = FileInputOutputInformation(file_key="input1", path=s_path_to_example_file_that_exists)
        f1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="file2.txt")
        f2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        self.__toolwrapper_ready = FooWrapper2(rule_name="rule2")
        self.__toolwrapper_ready.relation_toolwrapper_to_fileioinfo.extend([f1, f2])

        f1 = FileInputOutputInformation(file_key="input1", path="/not/existent/file")
        f1.relation_file_or_tableioinfo_to_typeio = self.input_entry

        f2 = FileInputOutputInformation(file_key="output1", path="file2.txt")
        f2.relation_file_or_tableioinfo_to_typeio = self.output_entry

        self.__toolwrapper_not_ready = FooWrapper2(rule_name="rule2")
        self.__toolwrapper_not_ready.relation_toolwrapper_to_fileioinfo.extend([f1, f2])
Beispiel #28
0
    def setUp(self):

        OptionManager.initial_test_setup()  # Set tests arguments
        SQLManager.instance().create_all()  # Create database with tables)
        self.__session = SQLManager.instance().get_session()
Beispiel #29
0
 def tearDown(self):
     self.__session.rollback()
     SQLManager.instance().get_session().close()
     SQLManager.instance().drop_all()
     OptionManager._drop()
     SQLManager._drop()
Beispiel #30
0
    def create_tool_wrapper_inst(self, rule_name, tool_python_path,
                                 dict_dict_dict_elm, input_entry,
                                 output_entry):
        """
        Actual creating of the Toolwrapper object.

        The tool_python_path object is an entry of the table rule in the resulting database.

        If the scoped_session has current modification, they probably will be commited during this method:
        models are created and this can only be done with clean session.

        :param rule_name: Contains the is_input of the rule in which the tool_python_path will be used.
        :type rule_name: str
        :param tool_python_path: Contains the is_input of the tool_python_path. It will be used for importing the correct module and then for creating the class
        :type tool_python_path: str
        :param dict_dict_dict_elm: "input"s "output"s and "params" and will be used to make relations between options / input / output and the tool_python_path.
        :type dict_dict_dict_elm: dict(dict(dict()))
        :param input_entry: input entry
        :type input_entry: :class:`wopmars.framework.bdd.models.TypeInputOrOutput.TypeInputOrOutput`
        :param output_entry: output entry
        :type output_entry: :class:`wopmars.framework.bdd.models.TypeInputOrOutput.TypeInputOrOutput`

        :return: TooLWrapper instance
        """
        session = SQLManager.instance().get_session()
        # Importing the module in the mod variable
        try:
            mod = importlib.import_module(tool_python_path)
            # Building the class object
            ToolWrapper_class = eval("mod." + tool_python_path.split('.')[-1])
        except AttributeError:
            raise WopMarsException(
                "Error while parsing the configuration file: \n\t",
                "The class " + tool_python_path + " doesn't exist.")
        except ImportError as IE:
            if tool_python_path in str(IE):
                raise WopMarsException(
                    "Error while parsing the configuration file:",
                    tool_python_path + " module is not in the pythonpath. ")
            else:
                raise WopMarsException(
                    "Error while parsing the configuration file:",
                    tool_python_path + " module contains an ImportError: " +
                    str(IE))
        # Initialize the instance of the user ToolWrapper
        tool_wrapper_inst = ToolWrapper_class(rule_name=rule_name)

        # associating ToolWrapper instances with their files / models
        for elm in dict_dict_dict_elm["dict_input"]:
            if elm == "file":
                for input_f in dict_dict_dict_elm["dict_input"][elm]:
                    # set the type of FileInputOutputInformation object
                    iofileput_entry = dict_dict_dict_elm["dict_input"][elm][
                        input_f]
                    iofileput_entry.relation_file_or_tableioinfo_to_typeio = input_entry
                    try:
                        # associating file and tool_python_path
                        tool_wrapper_inst.relation_toolwrapper_to_fileioinfo.append(
                            iofileput_entry)
                    except ObjectDeletedError as e:
                        raise WopMarsException(
                            "Error in the tool_python_path class declaration. Please, notice the developer",
                            "The error is probably caused by the lack of the 'polymorphic_identity' attribute"
                            " in the tool_python_path. Error message: \n" +
                            str(e))
            elif elm == "table":
                for input_t in dict_dict_dict_elm["dict_input"][elm]:
                    # input_t is the is_input of the table (not the model)
                    # this is a preventing commit because next statement will create a new table and the session has to
                    # be clean. I think it is a bug in SQLAlchemy which not allows queries then insert statements in
                    # the same session
                    session.commit()
                    iodbput_entry = dict_dict_dict_elm["dict_input"][elm][
                        input_t]
                    # the user-side models are created during the reading of the definition file
                    # table_entry = TableInputOutputInformation(is_input=dict_dict_dict_elm["dict_input"][elm][input_t], tablename=input_t)
                    # insert in the database the mtime_epoch_millis of last modification of a developper-side table
                    time_unix_ms, time_human = get_current_time()
                    model_py_path_suffix = dict_dict_dict_elm["dict_input"][
                        elm][input_t].model_py_path.split('.')[-1]
                    modification_table_entry, created = session.get_or_create(
                        TableModificationTime,
                        defaults={
                            "mtime_epoch_millis": time_unix_ms,
                            "mtime_human": time_human
                        },
                        table_name=model_py_path_suffix)
                    iodbput_entry.relation_tableioinfo_to_tablemodiftime = modification_table_entry
                    iodbput_entry.relation_file_or_tableioinfo_to_typeio = input_entry
                    try:
                        tool_wrapper_inst.relation_toolwrapper_to_tableioinfo.append(
                            iodbput_entry)
                    except ObjectDeletedError as e:
                        raise WopMarsException(
                            "Error in the tool_python_path class declaration. Please, notice the developer",
                            "The error is probably caused by the lack of the 'polymorphic_identity' attribute"
                            " in the tool_python_path. Error message: \n" +
                            str(e))

        for elm in dict_dict_dict_elm["dict_output"]:
            if elm == "file":
                for output_f in dict_dict_dict_elm["dict_output"][elm]:
                    iofileput_entry = dict_dict_dict_elm["dict_output"][elm][
                        output_f]
                    iofileput_entry.relation_file_or_tableioinfo_to_typeio = output_entry
                    try:
                        tool_wrapper_inst.relation_toolwrapper_to_fileioinfo.append(
                            iofileput_entry)
                    except ObjectDeletedError as e:
                        raise WopMarsException(
                            "Error in the tool_python_path class declaration. Please, notice the developer",
                            "The error is probably caused by the lack of the 'polymorphic_identity' attribute"
                            " in the tool_python_path. Error message: \n" +
                            str(e))
            elif elm == "table":
                for output_t in dict_dict_dict_elm["dict_output"][elm]:
                    # output_t is the table is_input (not the model)
                    session.commit()
                    iodbput_entry = dict_dict_dict_elm["dict_output"][elm][
                        output_t]
                    time_unix_ms, time_human = get_current_time()
                    # This corresponds the __tablename__ of the database in the database
                    model_py_path_suffix = dict_dict_dict_elm["dict_output"][
                        elm][output_t].model_py_path.split('.')[-1]
                    modification_table_entry, created = session.get_or_create(
                        TableModificationTime,
                        defaults={
                            "mtime_epoch_millis": time_unix_ms,
                            "mtime_human": time_human
                        },
                        table_name=model_py_path_suffix)
                    iodbput_entry.relation_tableioinfo_to_tablemodiftime = modification_table_entry
                    iodbput_entry.relation_file_or_tableioinfo_to_typeio = output_entry
                    try:
                        tool_wrapper_inst.relation_toolwrapper_to_tableioinfo.append(
                            iodbput_entry)
                    except ObjectDeletedError as e:
                        raise WopMarsException(
                            "Error in the tool_python_path class declaration. Please, notice the developer",
                            "The error is probably caused by the lack of the 'polymorphic_identity' attribute"
                            " in the tool_python_path. Error message: \n" +
                            str(e))

        for opt in dict_dict_dict_elm["dict_params"]:
            # associating option and tool_python_path
            tool_wrapper_inst.relation_toolwrapper_to_option.append(
                dict_dict_dict_elm["dict_params"][opt])

        # toolwrapper_wrapper.is_content_respected()
        return tool_wrapper_inst