def test_launch_burst_invalid_portlet_analyzer_data(self):
        """
        Test that burst is marked as error if invalid data is passed to the first step.
        """
        algo_id = self.flow_service.get_algorithm_by_module_and_class('tvb.tests.framework.adapters.testadapter1',
                                                                      'TestAdapter1').id
        #Adapter tries to do an int(test1_val1) and int(test1_val2) so this should be valid
        burst_config = self.burst_service.new_burst_configuration(self.test_project.id)
        kwargs_replica = {'test1_val1': '1', 'test1_val2': '0'}
        burst_config.update_simulator_configuration(kwargs_replica)

        test_portlet = dao.get_portlet_by_identifier(self.PORTLET_ID)
        portlet_configuration = self.burst_service.new_portlet_configuration(test_portlet.id)
        #Portlet analyzer tries to do int(input) which should fail
        declared_overwrites = {ADAPTER_PREFIX_ROOT + '0test_non_dt_input': 'asa'}
        self.burst_service.update_portlet_configuration(portlet_configuration, declared_overwrites)
        burst_config.tabs[0].portlets[0] = portlet_configuration

        burst_id, _ = self.burst_service.launch_burst(burst_config, 0, algo_id, self.test_user.id)
        burst_config = dao.get_burst_by_id(burst_id)
        #Wait maximum x seconds for burst to finish
        burst_config = self._wait_for_burst(burst_config, error_expected=True)

        burst_wf = dao.get_workflows_for_burst(burst_config.id)[0]
        wf_steps = dao.get_workflow_steps(burst_wf.id)
        self.assertTrue(len(wf_steps) == 2,
                        "Should have exactly 2 wf steps. One for 'simulation' one for portlet analyze operation.")
        simulator_op = dao.get_operation_by_id(wf_steps[0].fk_operation)
        self.assertEqual(model.STATUS_FINISHED, simulator_op.status,
                         "First operation should be simulator which should have 'finished' status.")
        portlet_analyze_op = dao.get_operation_by_id(wf_steps[1].fk_operation)
        self.assertEqual(portlet_analyze_op.status, model.STATUS_ERROR,
                         "Second operation should be portlet analyze step which should have 'error' status.")
 def test_stop_operations(self):
     data = {"test1_val1": 5, 'test1_val2': 5}
     operations = self._launch_test_algo_on_cluster(**data)
     operation = dao.get_operation_by_id(operations[0].id)
     self.assertFalse(operation.has_finished)
     self.flow_c.stop_operation(operation.id, 0, False)
     operation = dao.get_operation_by_id(operation.id)
     self.assertEqual(operation.status, model.STATUS_CANCELED)
 def prepare_next_step(self, last_executed_op_id):
     """
     If the operation with id 'last_executed_op_id' resulted after
     the execution of a workflow step then this method will launch
     the operation corresponding to the next step from the workflow.
     """
     try:
         current_step, next_workflow_step = self._get_data(last_executed_op_id)
         if next_workflow_step is not None:
             operation = dao.get_operation_by_id(next_workflow_step.fk_operation)
             dynamic_param_names = next_workflow_step.dynamic_workflow_param_names
             if len(dynamic_param_names) > 0:
                 op_params = json.loads(operation.parameters)
                 for param_name in dynamic_param_names:
                     dynamic_param = op_params[param_name]
                     former_step = dao.get_workflow_step_by_step_index(next_workflow_step.fk_workflow,
                                                                       dynamic_param[wf_cfg.STEP_INDEX_KEY])
                     if type(dynamic_param[wf_cfg.DATATYPE_INDEX_KEY]) is IntType: 
                         datatypes = dao.get_results_for_operation(former_step.fk_operation)
                         op_params[param_name] = datatypes[dynamic_param[wf_cfg.DATATYPE_INDEX_KEY]].gid
                     else:
                         previous_operation = dao.get_operation_by_id(former_step.fk_operation)
                         op_params[param_name] = json.loads(previous_operation.parameters)[
                             dynamic_param[wf_cfg.DATATYPE_INDEX_KEY]]
                 operation.parameters = json.dumps(op_params)
                 operation = dao.store_entity(operation)
             return operation.id
         else:
             if current_step is not None:
                 current_workflow = dao.get_workflow_by_id(current_step.fk_workflow)
                 current_workflow.status = current_workflow.STATUS_FINISHED
                 dao.store_entity(current_workflow)
                 burst_entity = dao.get_burst_by_id(current_workflow.fk_burst)
                 parallel_workflows = dao.get_workflows_for_burst(burst_entity.id)
                 all_finished = True
                 for workflow in parallel_workflows:
                     if workflow.status == workflow.STATUS_STARTED:
                         all_finished = False
                 if all_finished:
                     self.mark_burst_finished(burst_entity, success=True)
                     disk_size = dao.get_burst_disk_size(burst_entity.id)  # Transform from kB to MB
                     if disk_size > 0:
                         user = dao.get_project_by_id(burst_entity.fk_project).administrator
                         user.used_disk_space = user.used_disk_space + disk_size
                         dao.store_entity(user)
             else:
                 operation = dao.get_operation_by_id(last_executed_op_id)
                 disk_size = dao.get_disk_size_for_operation(operation.id)  # Transform from kB to MB
                 if disk_size > 0:
                     user = dao.get_user_by_id(operation.fk_launched_by)
                     user.used_disk_space = user.used_disk_space + disk_size
                     dao.store_entity(user)
         return None
     except Exception, excep:
         self.logger.error(excep)
         self.logger.exception(excep)
         raise WorkflowInterStepsException(excep)
 def test_stop_operations_group(self):
     data = {model.RANGE_PARAMETER_1: "test1_val1", "test1_val1": '5,6,7', 'test1_val2': 5}
     operations = self._launch_test_algo_on_cluster(**data)
     operation_group_id = 0
     for operation in operations:
         operation = dao.get_operation_by_id(operation.id)
         self.assertFalse(operation.has_finished)
         operation_group_id = operation.fk_operation_group
     self.flow_c.stop_operation(operation_group_id, 1, False)
     for operation in operations:
         operation = dao.get_operation_by_id(operation.id)
         self.assertEqual(operation.status, model.STATUS_CANCELED)
Beispiel #5
0
    def _edit_data(self, datatype, new_data, from_group=False):
        """
        Private method, used for editing a meta-data XML file and a DataType row
        for a given custom DataType entity with new dictionary of data from UI.
        """
        if isinstance(datatype, MappedType) and not os.path.exists(datatype.get_storage_file_path()):
            if not datatype.invalid:
                datatype.invalid = True
                dao.store_entity(datatype)
            return
        # 1. First update Operation fields:
        #    Update group field if possible
        new_group_name = new_data[CommonDetails.CODE_OPERATION_TAG]
        empty_group_value = (new_group_name is None or new_group_name == "")
        if from_group:
            if empty_group_value:
                raise StructureException("Empty group is not allowed!")

            group = dao.get_generic_entity(model.OperationGroup, new_data[CommonDetails.CODE_OPERATION_GROUP_ID])
            if group and len(group) > 0 and new_group_name != group[0].name:
                group = group[0]
                exists_group = dao.get_generic_entity(model.OperationGroup, new_group_name, 'name')
                if exists_group:
                    raise StructureException("Group '" + new_group_name + "' already exists.")
                group.name = new_group_name
                dao.store_entity(group)
        else:
            operation = dao.get_operation_by_id(datatype.fk_from_operation)
            operation.user_group = new_group_name
            dao.store_entity(operation)

        # 2. Update dateType fields:
        datatype.subject = new_data[DataTypeOverlayDetails.DATA_SUBJECT]
        datatype.state = new_data[DataTypeOverlayDetails.DATA_STATE]
        if DataTypeOverlayDetails.DATA_TAG_1 in new_data:
            datatype.user_tag_1 = new_data[DataTypeOverlayDetails.DATA_TAG_1]
        if DataTypeOverlayDetails.DATA_TAG_2 in new_data:
            datatype.user_tag_2 = new_data[DataTypeOverlayDetails.DATA_TAG_2]
        if DataTypeOverlayDetails.DATA_TAG_3 in new_data:
            datatype.user_tag_3 = new_data[DataTypeOverlayDetails.DATA_TAG_3]
        if DataTypeOverlayDetails.DATA_TAG_4 in new_data:
            datatype.user_tag_4 = new_data[DataTypeOverlayDetails.DATA_TAG_4]
        if DataTypeOverlayDetails.DATA_TAG_5 in new_data:
            datatype.user_tag_5 = new_data[DataTypeOverlayDetails.DATA_TAG_5]

        datatype = dao.store_entity(datatype)
        # 3. Update MetaData in H5 as well.
        datatype.persist_full_metadata()
        # 4. Update the group_name/user_group into the operation meta-data file
        operation = dao.get_operation_by_id(datatype.fk_from_operation)
        self.structure_helper.update_operation_metadata(operation.project.name, new_group_name,
                                                        str(datatype.fk_from_operation), from_group)
    def initiate_prelaunch(self, operation, adapter_instance, temp_files, **kwargs):
        """
        Public method.
        This should be the common point in calling an adapter- method.
        """
        result_msg = ""
        try:
            unique_id = None
            if self.ATT_UID in kwargs:
                unique_id = kwargs[self.ATT_UID]
            filtered_kwargs = adapter_instance.prepare_ui_inputs(kwargs)
            self.logger.debug("Launching operation " + str(operation.id) + " with " + str(filtered_kwargs))
            operation = dao.get_operation_by_id(operation.id)   # Load Lazy fields

            params = dict()
            for k, value_ in filtered_kwargs.items():
                params[str(k)] = value_

            disk_space_per_user = TvbProfile.current.MAX_DISK_SPACE
            pending_op_disk_space = dao.compute_disk_size_for_started_ops(operation.fk_launched_by)
            user_disk_space = dao.compute_user_generated_disk_size(operation.fk_launched_by)    # From kB to Bytes
            available_space = disk_space_per_user - pending_op_disk_space - user_disk_space

            result_msg, nr_datatypes = adapter_instance._prelaunch(operation, unique_id, available_space, **params)
            operation = dao.get_operation_by_id(operation.id)
            ## Update DB stored kwargs for search purposes, to contain only valuable params (no unselected options)
            operation.parameters = json.dumps(kwargs)
            operation.mark_complete(model.STATUS_FINISHED)
            if nr_datatypes > 0:
                #### Write operation meta-XML only if some result are returned
                self.file_helper.write_operation_metadata(operation)
            dao.store_entity(operation)
            self._remove_files(temp_files)

        except zipfile.BadZipfile as excep:
            msg = "The uploaded file is not a valid ZIP!"
            self._handle_exception(excep, temp_files, msg, operation)
        except TVBException as excep:
            self._handle_exception(excep, temp_files, excep.message, operation)
        except MemoryError:
            msg = ("Could not execute operation because there is not enough free memory." +
                   " Please adjust operation parameters and re-launch it.")
            self._handle_exception(Exception(msg), temp_files, msg, operation)
        except Exception as excep1:
            msg = "Could not launch Operation with the given input data!"
            self._handle_exception(excep1, temp_files, msg, operation)

        ### Try to find next workflow Step. It might throw WorkflowException
        next_op_id = self.workflow_service.prepare_next_step(operation.id)
        self.launch_operation(next_op_id)
        return result_msg
 def _check_if_datatype_was_removed(self, datatype):
     """
     Check if a certain datatype was removed.
     """
     try:
         dao.get_datatype_by_id(datatype.id)
         self.fail("The datatype was not deleted.")
     except Exception:
         pass
     try:
         dao.get_operation_by_id(datatype.fk_from_operation)
         self.fail("The operation was not deleted.")
     except Exception:
         pass
    def launch(self, data_file):
        """
        Execute import operations: unpack ZIP, build and store generic DataType objects.

        :param data_file: an archive (ZIP / HDF5) containing the `DataType`

        :raises LaunchException: when data_file is None, nonexistent, or invalid \
                    (e.g. incomplete meta-data, not in ZIP / HDF5 format etc. )

        """
        if data_file is None:
            raise LaunchException("Please select file which contains data to import")

        if os.path.exists(data_file):
            if zipfile.is_zipfile(data_file):
                current_op = dao.get_operation_by_id(self.operation_id)
                
                # Creates a new TMP folder where to extract data
                tmp_folder = os.path.join(self.storage_path, "tmp_import") 
                FilesHelper().unpack_zip(data_file, tmp_folder)
                operations = ImportService().import_project_operations(current_op.project, self.storage_path)
                shutil.rmtree(tmp_folder)
                self.nr_of_datatypes += len(operations)
                
            else:
                #upgrade file if necessary
                file_update_manager = FilesUpdateManager()
                file_update_manager.upgrade_file(data_file)

                folder, h5file = os.path.split(data_file)
                manager = HDF5StorageManager(folder, h5file)
                if manager.is_valid_hdf5_file():
                    datatype = None
                    try:
                        current_op = dao.get_operation_by_id(self.operation_id)
                        service = ImportService()
                        datatype = service.load_datatype_from_file(folder, h5file, current_op.id)
                        service.store_datatype(datatype)
                        self.nr_of_datatypes += 1
                    except Exception, excep:
                        # If import operation failed delete file from disk.
                        if datatype is not None and os.path.exists(datatype.get_storage_file_path()):
                            os.remove(datatype.get_storage_file_path())
                        self.log.exception(excep)
                        raise LaunchException("Invalid file received as input. Most probably incomplete "
                                              "meta-data ...  " + str(excep))
                else:
                    raise LaunchException("Uploaded file: %s is neither in ZIP or HDF5 format" % data_file)
Beispiel #9
0
    def test_bct_all(self):
        """
        Iterate all BCT algorithms and execute them.
        """
        for i in xrange(len(self.bct_adapters)):
            for bct_identifier in self.bct_adapters[i].get_algorithms_dictionary():
                ### Prepare Operation and parameters
                algorithm = dao.get_algorithm_by_group(self.algo_groups[i].id, bct_identifier)
                operation = TestFactory.create_operation(algorithm=algorithm, test_user=self.test_user,
                                                         test_project=self.test_project,
                                                         operation_status=model.STATUS_STARTED)
                self.assertEqual(model.STATUS_STARTED, operation.status)
                ### Launch BCT algorithm
                submit_data = {self.algo_groups[i].algorithm_param_name: bct_identifier,
                               algorithm.parameter_name: self.connectivity.gid}
                try:
                    OperationService().initiate_prelaunch(operation, self.bct_adapters[i], {}, **submit_data)
                    if bct_identifier in BCTTest.EXPECTED_TO_FAIL_VALIDATION:
                        raise Exception("Algorithm %s was expected to throw input validation "
                                        "exception, but did not!" % (bct_identifier,))

                    operation = dao.get_operation_by_id(operation.id)
                    ### Check that operation status after execution is success.
                    self.assertEqual(STATUS_FINISHED, operation.status)
                    ### Make sure at least one result exists for each BCT algorithm
                    results = dao.get_generic_entity(model.DataType, operation.id, 'fk_from_operation')
                    self.assertTrue(len(results) > 0)

                except InvalidParameterException, excep:
                    ## Some algorithms are expected to throw validation exception.
                    if bct_identifier not in BCTTest.EXPECTED_TO_FAIL_VALIDATION:
                        raise excep
Beispiel #10
0
    def load_burst(self, burst_id):
        """
        :param burst_id: the id of the burst that should be loaded
        
        Having this input the method should:
        
            - load the entity from the DB
            - get all the workflow steps for the saved burst id
            - go trough the visualization workflow steps to create the tab 
                configuration of the burst using the tab_index and index_in_tab 
                fields saved on each workflow_step
                
        """
        burst = dao.get_burst_by_id(burst_id)
        burst.prepare_after_load()
        burst.reset_tabs()
        burst_workflows = dao.get_workflows_for_burst(burst.id)

        group_gid = None
        if len(burst_workflows) == 1:
            # A simple burst with no range parameters
            burst = self.__populate_tabs_from_workflow(burst, burst_workflows[0])
        elif len(burst_workflows) > 1:
            # A burst workflow with a range of values, created multiple workflows and need
            # to launch parameter space exploration with the resulted group
            self.__populate_tabs_from_workflow(burst, burst_workflows[0])
            executed_steps = dao.get_workflow_steps(burst_workflows[0].id)

            operation = dao.get_operation_by_id(executed_steps[0].fk_operation)
            if operation.operation_group:
                workflow_group = dao.get_datatypegroup_by_op_group_id(operation.operation_group.id)
                group_gid = workflow_group.gid
        return burst, group_gid
def do_operation_launch(operation_id):
    """
    Event attached to the local queue for executing an operation, when we will have resources available.
    """
    LOGGER = get_logger('tvb.core.operation_async_launcher')

    try:
        LOGGER.debug("Loading operation with id=%s" % operation_id)
        curent_operation = dao.get_operation_by_id(operation_id)
        stored_adapter = curent_operation.algorithm
        LOGGER.debug("Importing Algorithm: " + str(stored_adapter.classname) +
                     " for Operation:" + str(curent_operation.id))
        PARAMS = parse_json_parameters(curent_operation.parameters)
        adapter_instance = ABCAdapter.build_adapter(stored_adapter)

        ## Un-comment bellow for profiling an operation:
        ## import cherrypy.lib.profiler as profiler
        ## p = profiler.Profiler("/Users/lia.domide/TVB/profiler/")
        ## p.run(OperationService().initiate_prelaunch, curent_operation, adapter_instance, {}, **PARAMS)

        OperationService().initiate_prelaunch(curent_operation, adapter_instance, {}, **PARAMS)
        LOGGER.debug("Successfully finished operation " + str(operation_id))

    except Exception as excep:
        LOGGER.error("Could not execute operation " + str(sys.argv[1]))
        LOGGER.exception(excep)
        parent_burst = dao.get_burst_for_operation_id(operation_id)
        if parent_burst is not None:
            WorkflowService().mark_burst_finished(parent_burst, error_message=str(excep))
    def store_result_figure(self, project, user, img_type, export_data, image_name=None, operation_id=None):
        """
        Store into a file, Result Image and reference in DB.
        """
        store_path, file_name = self._image_path(project.name, img_type)

        if img_type == FigureService._TYPE_PNG:            # PNG file from canvas
            self._write_png(store_path, export_data)
        elif img_type == FigureService._TYPE_SVG:          # SVG file from svg viewer
            self._write_svg(store_path, export_data)

        if operation_id:
            operation = dao.get_operation_by_id(operation_id)
        else:
            operation = None
            operation_id = None

        image_name = self._generate_image_name(project, user, operation, image_name)

        # Store entity into DB
        entity = model.ResultFigure(operation_id, user.id, project.id, FigureService._DEFAULT_SESSION_NAME,
                                    image_name, file_name, img_type)
        entity = dao.store_entity(entity)

        # Load instance from DB to have lazy fields loaded
        figure = dao.load_figure(entity.id)
        # Write image meta data to disk  
        self.file_helper.write_image_metadata(figure)

        if operation:
            # Force writing operation meta data on disk.
            # This is important later for operation import
            self.file_helper.write_operation_metadata(operation)
Beispiel #13
0
    def test_bct_all(self):
        """
        Iterate all BCT algorithms and execute them.
        """
        for adapter_instance in self.bct_adapters:
            algorithm = adapter_instance.stored_adapter
            operation = TestFactory.create_operation(algorithm=algorithm, test_user=self.test_user,
                                                     test_project=self.test_project,
                                                     operation_status=model.STATUS_STARTED)
            assert model.STATUS_STARTED == operation.status
            ### Launch BCT algorithm
            submit_data = {algorithm.parameter_name: self.connectivity.gid}
            try:
                OperationService().initiate_prelaunch(operation, adapter_instance, {}, **submit_data)
                if algorithm.classname in TestBCT.EXPECTED_TO_FAIL_VALIDATION:
                    raise Exception("Algorithm %s was expected to throw input validation "
                                    "exception, but did not!" % (algorithm.classname,))

                operation = dao.get_operation_by_id(operation.id)
                ### Check that operation status after execution is success.
                assert STATUS_FINISHED == operation.status
                ### Make sure at least one result exists for each BCT algorithm
                results = dao.get_generic_entity(model.DataType, operation.id, 'fk_from_operation')
                assert len(results) > 0

            except InvalidParameterException as excep:
                ## Some algorithms are expected to throw validation exception.
                if algorithm.classname not in TestBCT.EXPECTED_TO_FAIL_VALIDATION:
                    raise excep
Beispiel #14
0
    def create_operation(algorithm=None, test_user=None, test_project=None, 
                         operation_status=model.STATUS_FINISHED, parameters="test params"):
        """
        Create persisted operation.
        
        :param algorithm: When not None, introspect TVB and TVB_TEST for adapters.
        :return: Operation entity after persistence. 
        """
        if algorithm is None:
            algorithm = dao.get_algorithm_by_module('tvb.tests.framework.adapters.ndimensionarrayadapter',
                                                    'NDimensionArrayAdapter')

        if test_user is None:
            test_user = TestFactory.create_user()
            
        if test_project is None:
            test_project = TestFactory.create_project(test_user)
            
        meta = {DataTypeMetaData.KEY_SUBJECT: "John Doe",
                DataTypeMetaData.KEY_STATE: "RAW_DATA"}
        operation = model.Operation(test_user.id, test_project.id, algorithm.id, parameters, meta=json.dumps(meta),
                                    status=operation_status)
        dao.store_entity(operation)
        ### Make sure lazy attributes are correctly loaded.
        return dao.get_operation_by_id(operation.id)
 def test_stop_burst_operation(self):
     burst_config = self._long_burst_launch()
     operation = self._wait_for_burst_ops(burst_config)[0]
     self.assertFalse(operation.has_finished)
     self.flow_c.stop_burst_operation(operation.id, 0, False)
     operation = dao.get_operation_by_id(operation.id)
     self.assertEqual(operation.status, model.STATUS_CANCELED)
Beispiel #16
0
 def add_operation_additional_info(self, message):
     """
     Adds additional info on the operation to be displayed in the UI. Usually a warning message.
     """
     current_op = dao.get_operation_by_id(self.operation_id)
     current_op.additional_info = message
     dao.store_entity(current_op)
    def _run_cluster_job(operation_identifier, user_name_label, adapter_instance):
        """
        Threaded Popen
        It is the function called by the ClusterSchedulerClient in a Thread.
        This function starts a new process.
        """
        # Load operation so we can estimate the execution time
        operation = dao.get_operation_by_id(operation_identifier)
        kwargs = parse_json_parameters(operation.parameters)
        kwargs = adapter_instance.prepare_ui_inputs(kwargs)
        time_estimate = int(adapter_instance.get_execution_time_approximation(**kwargs))
        hours = int(time_estimate / 3600)
        minutes = (int(time_estimate) % 3600) / 60
        seconds = int(time_estimate) % 60
        # Anything lower than 5 hours just use default walltime
        if hours < 5:
            walltime = "05:00:00"
        else:
            if hours < 10:
                hours = "0%d" % hours
            else:
                hours = str(hours)
            walltime = "%s:%s:%s" % (hours, str(minutes), str(seconds))

        call_arg = TvbProfile.current.cluster.SCHEDULE_COMMAND % (operation_identifier, user_name_label, walltime)
        LOGGER.info(call_arg)
        process_ = Popen([call_arg], stdout=PIPE, shell=True)
        job_id = process_.stdout.read().replace('\n', '').split(TvbProfile.current.cluster.JOB_ID_STRING)[-1]
        LOGGER.debug("Got jobIdentifier = %s for CLUSTER operationID = %s" % (operation_identifier, job_id))
        operation_identifier = model.OperationProcessIdentifier(operation_identifier, job_id=job_id)
        dao.store_entity(operation_identifier)
 def launch_visualization(visualization, frame_width=None, frame_height=None, 
                          method_name=ABCAdapter.LAUNCH_METHOD, is_preview=True):
     """
     :param visualization: a visualization workflow step
     """
     dynamic_params = visualization.dynamic_param
     static_params = visualization.static_param
     parameters_dict = static_params
     current_project_id = 0
     ## Current operation id needed for export mechanism. So far just use ##
     ## the operation of the workflow_step from which the inputs are taken    ####
     for param in dynamic_params:
         step_index = dynamic_params[param][WorkflowStepConfiguration.STEP_INDEX_KEY]
         datatype_index = dynamic_params[param][WorkflowStepConfiguration.DATATYPE_INDEX_KEY]
         referred_workflow_step = dao.get_workflow_step_by_step_index(visualization.fk_workflow, step_index)
         referred_operation_id = referred_workflow_step.fk_operation
         referred_operation = dao.get_operation_by_id(referred_operation_id)
         current_project_id = referred_operation.fk_launched_in
         if type(datatype_index) is IntType:
             ## Entry is the output of a previous step ##
             datatypes = dao.get_results_for_operation(referred_operation_id)
             parameters_dict[param] = datatypes[datatype_index].gid
         else:
             ## Entry is the input of a previous step ###
             parameters_dict[param] = json.loads(referred_operation.parameters)[datatype_index]
     algorithm = dao.get_algorithm_by_id(visualization.fk_algorithm)
     adapter_instance = ABCAdapter.build_adapter(algorithm.algo_group)
     adapter_instance.current_project_id = current_project_id
     prepared_inputs = adapter_instance.prepare_ui_inputs(parameters_dict)
     if frame_width is not None:
         prepared_inputs[ABCDisplayer.PARAM_FIGURE_SIZE] = (frame_width, frame_height)
     if isinstance(adapter_instance, ABCMPLH5Displayer) and is_preview is True:
         prepared_inputs[ABCMPLH5Displayer.SHOW_FULL_TOOLBAR] = False
     result = eval("adapter_instance." + method_name + "(**prepared_inputs)")
     return result, parameters_dict
    def _run_cluster_job(operation_identifier, user_name_label, adapter_instance):
        """
        Threaded Popen
        It is the function called by the ClusterSchedulerClient in a Thread.
        This function starts a new process.
        """
        # Load operation so we can estimate the execution time
        operation = dao.get_operation_by_id(operation_identifier)
        kwargs = parse_json_parameters(operation.parameters)
        time_estimate = int(adapter_instance.get_execution_time_approximation(**kwargs))
        hours = int(time_estimate / 3600)
        minutes = (int(time_estimate) % 3600) / 60
        seconds = int(time_estimate) % 60
        # Anything lower than 2 hours just use default walltime
        if hours < 2:
            walltime = "02:00:00"
        elif hours > 23:
            walltime = "23:59:59"
        else:
            walltime = datetime.time(hours, minutes, seconds)
            walltime = walltime.strftime("%H:%M:%S")

        call_arg = config.CLUSTER_SCHEDULE_COMMAND % (walltime, operation_identifier, user_name_label)
        LOGGER.info(call_arg)
        process_ = Popen([call_arg], stdout=PIPE, shell=True)
        job_id = process_.stdout.read().replace('\n', '').split('OAR_JOB_ID=')[-1]
        LOGGER.debug("Got jobIdentifier = %s for CLUSTER operationID = %s" % (operation_identifier, job_id))
        operation_identifier = model.OperationProcessIdentifier(operation_identifier, job_id=job_id)
        dao.store_entity(operation_identifier)
    def stop_operation(operation_id):
        """
        Stop a thread for a given operation id
        """
        operation = dao.get_operation_by_id(operation_id)
        if not operation or operation.status != model.STATUS_STARTED:
            LOGGER.warning("Operation already stopped or not found is given to stop job: %s" % operation_id)
            return True

        LOGGER.debug("Stopping operation: %s" % str(operation_id))

        ## Set the thread stop flag to true
        for thread in CURRENT_ACTIVE_THREADS:
            if int(thread.operation_id) == operation_id:
                thread.stop()
                LOGGER.debug("Found running thread for operation: %d" % operation_id)

        ## Kill Thread
        stopped = True
        operation_process = dao.get_operation_process_for_operation(operation_id)
        if operation_process is not None:
            ## Now try to kill the operation if it exists
            stopped = OperationExecutor.stop_pid(operation_process.pid)
            if not stopped:
                LOGGER.debug("Operation %d was probably killed from it's specific thread." % operation_id)
            else:
                LOGGER.debug("Stopped OperationExecutor process for %d" % operation_id)

        ## Mark operation as canceled in DB.
        operation.mark_cancelled()
        dao.store_entity(operation)
        return stopped
 def get_portlet_status(portlet_cfg):
     """ 
     Get the status of a portlet configuration. 
     """
     status = model.BurstConfiguration.BURST_FINISHED
     error_msg = ''
     if len(portlet_cfg.analyzers):
         for analyze_step in portlet_cfg.analyzers:
             operation = dao.get_operation_by_id(analyze_step.fk_operation)
             if operation is None:
                 status = model.BurstConfiguration.BURST_ERROR
                 error_msg = "Operation has been removed"
                 break
             if operation.status == model.STATUS_STARTED:
                 status = model.BurstConfiguration.BURST_RUNNING
                 break
             if operation.status == model.STATUS_ERROR:
                 status = model.BurstConfiguration.BURST_ERROR
                 error_msg = operation.additional_info
                 break
             if operation.status == model.STATUS_CANCELED:
                 status = model.BurstConfiguration.BURST_CANCELED
                 break
     else:
         ## Simulator is first step so now decide if we are waiting for input or output ##
         visualizer = portlet_cfg.visualizer
         wait_on_outputs = False
         for entry in visualizer.dynamic_param:
             if type(visualizer.dynamic_param[entry][WorkflowStepConfiguration.DATATYPE_INDEX_KEY]) == IntType:
                 wait_on_outputs = True
                 break
         if wait_on_outputs:
             simulator_step = dao.get_workflow_step_by_step_index(visualizer.fk_workflow, 0)
             operation = dao.get_operation_by_id(simulator_step.fk_operation)
             if operation is None:
                 status = model.BurstConfiguration.BURST_ERROR
                 error_msg = ("At least one simulation result was not found, it might have been removed. <br\>"
                              "You can copy and relaunch current simulation, if you are interested in having "
                              "your results re-computed.")
             elif operation.status == model.STATUS_STARTED:
                 status = model.BurstConfiguration.BURST_RUNNING
             elif operation.status == model.STATUS_ERROR:
                 status = model.BurstConfiguration.BURST_ERROR
                 error_msg = operation.additional_info
             elif operation.status == model.STATUS_CANCELED:
                 status = model.BurstConfiguration.BURST_CANCELED
     return status, error_msg
 def test_stop_operation_finished(self):
     """
     Test that an operation that is already finished is not changed by the stop operation.
     """
     adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter1", "TestAdapter1")
     data = {"test1_val1": 5, 'test1_val2': 5}
     algo = adapter.stored_adapter
     algo_category = dao.get_category_by_id(algo.fk_category)
     operations, _ = self.operation_service.prepare_operations(self.test_user.id, self.test_project.id, algo,
                                                               algo_category, {}, **data)
     self.operation_service._send_to_cluster(operations, adapter)
     operation = dao.get_operation_by_id(operations[0].id)
     operation.status = model.STATUS_FINISHED
     dao.store_entity(operation)
     self.operation_service.stop_operation(operations[0].id)
     operation = dao.get_operation_by_id(operations[0].id)
     assert operation.status, model.STATUS_FINISHED == "Operation shouldn't have been canceled!"
Beispiel #23
0
 def set_visibility(op):
     # workaround:
     # 'reload' the operation so that it has the project property set.
     # get_operations_in_group does not eager load it and now we're out of a sqlalchemy session
     # write_operation_metadata requires that property
     op = dao.get_operation_by_id(op.id)
     # end hack
     op.visible = is_visible
     self.structure_helper.write_operation_metadata(op)
     dao.store_entity(op)
    def _capture_operation_results(self, result, user_tag=None):
        """
        After an operation was finished, make sure the results are stored
        in DB storage and the correct meta-data,IDs are set.
        """
        results_to_store = []
        data_type_group_id = None
        operation = dao.get_operation_by_id(self.operation_id)
        if operation.user_group is None or len(operation.user_group) == 0:
            operation.user_group = date2string(datetime.now(), date_format=LESS_COMPLEX_TIME_FORMAT)
            operation = dao.store_entity(operation)
        if self._is_group_launch():
            data_type_group_id = dao.get_datatypegroup_by_op_group_id(operation.fk_operation_group).id
        # All entities will have the same subject and state
        subject = self.meta_data[DataTypeMetaData.KEY_SUBJECT]
        state = self.meta_data[DataTypeMetaData.KEY_STATE]
        burst_reference = None
        if DataTypeMetaData.KEY_BURST in self.meta_data:
            burst_reference = self.meta_data[DataTypeMetaData.KEY_BURST]
        perpetuated_identifier = None
        if DataTypeMetaData.KEY_TAG_1 in self.meta_data:
            perpetuated_identifier = self.meta_data[DataTypeMetaData.KEY_TAG_1]

        for res in result:
            if res is None:
                continue
            res.subject = str(subject)
            res.state = state
            res.fk_parent_burst = burst_reference
            res.fk_from_operation = self.operation_id
            res.framework_metadata = self.meta_data
            if not res.user_tag_1:
                res.user_tag_1 = user_tag if user_tag is not None else perpetuated_identifier
            else:
                res.user_tag_2 = user_tag if user_tag is not None else perpetuated_identifier
            res.fk_datatype_group = data_type_group_id
            ## Compute size-on disk, in case file-storage is used
            if hasattr(res, 'storage_path') and hasattr(res, 'get_storage_file_name'):
                associated_file = os.path.join(res.storage_path, res.get_storage_file_name())
                res.close_file()
                res.disk_size = self.file_handler.compute_size_on_disk(associated_file)
            res = dao.store_entity(res)
            # Write metaData
            res.persist_full_metadata()
            results_to_store.append(res)
        del result[0:len(result)]
        result.extend(results_to_store)

        if len(result) and self._is_group_launch():
            ## Update the operation group name
            operation_group = dao.get_operationgroup_by_id(operation.fk_operation_group)
            operation_group.fill_operationgroup_name(result[0].type)
            dao.store_entity(operation_group)

        return 'Operation ' + str(self.operation_id) + ' has finished.', len(results_to_store)
    def initiate_prelaunch(self, operation, adapter_instance, temp_files, **kwargs):
        """
        Public method.
        This should be the common point in calling an adapter- method.
        """
        result_msg = ""
        try:
            unique_id = None
            if self.ATT_UID in kwargs:
                unique_id = kwargs[self.ATT_UID]
            if operation.method_name == ABCAdapter.LAUNCH_METHOD:
                filtered_kwargs = adapter_instance.prepare_ui_inputs(kwargs)
            else:
                filtered_kwargs = kwargs

            self.logger.debug("Launching operation " + str(operation.id) + "." +
                              operation.method_name + " with " + str(filtered_kwargs))
            operation = dao.get_operation_by_id(operation.id)   # Load Lazy fields

            params = dict()
            for k, value_ in filtered_kwargs.items():
                params[str(k)] = value_

            disk_space_per_user = TvbProfile.current.MAX_DISK_SPACE
            pending_op_disk_space = dao.compute_disk_size_for_started_ops(operation.fk_launched_by)
            user_disk_space = dao.get_user_by_id(operation.fk_launched_by).used_disk_space  # Transform from kB to Bytes
            available_space = disk_space_per_user - pending_op_disk_space - user_disk_space

            result_msg, nr_datatypes = adapter_instance._prelaunch(operation, unique_id, available_space, **params)
            operation = dao.get_operation_by_id(operation.id)
            ## Update DB stored kwargs for search purposes, to contain only valuable params (no unselected options)
            operation.parameters = json.dumps(kwargs)
            operation.mark_complete(model.STATUS_FINISHED)
            if nr_datatypes > 0:
                #### Write operation meta-XML only if some result are returned
                self.file_helper.write_operation_metadata(operation)
            dao.store_entity(operation)
            self._remove_files(temp_files)

        except zipfile.BadZipfile, excep:
            msg = "The uploaded file is not a valid ZIP!"
            self._handle_exception(excep, temp_files, msg, operation)
    def run(self):
        """
        Get the required data from the operation queue and launch the operation.
        """
        # Try to get a spot to launch own operation.
        LOCKS_QUEUE.get(True)
        operation_id = self.operation_id
        run_params = [TvbProfile.current.PYTHON_INTERPRETER_PATH, '-m', 'tvb.core.operation_async_launcher',
                      str(operation_id), TvbProfile.CURRENT_PROFILE_NAME]

        # In the exceptional case where the user pressed stop while the Thread startup is done,
        # We should no longer launch the operation.
        if self.stopped() is False:

            env = os.environ.copy()
            env['PYTHONPATH'] = os.pathsep.join(sys.path)
            # anything that was already in $PYTHONPATH should have been reproduced in sys.path

            launched_process = Popen(run_params, stdout=PIPE, stderr=PIPE, env=env)

            LOGGER.debug("Storing pid=%s for operation id=%s launched on local machine." % (operation_id,
                                                                                            launched_process.pid))
            op_ident = model.OperationProcessIdentifier(operation_id, pid=launched_process.pid)
            dao.store_entity(op_ident)

            if self.stopped():
                # In the exceptional case where the user pressed stop while the Thread startup is done.
                # and stop_operation is concurrently asking about OperationProcessIdentity.
                self.stop_pid(launched_process.pid)

            subprocess_result = launched_process.communicate()
            LOGGER.info("Finished with launch of operation %s" % operation_id)
            returned = launched_process.wait()

            if returned != 0 and not self.stopped():
                # Process did not end as expected. (e.g. Segmentation fault)
                workflow_service = WorkflowService()
                operation = dao.get_operation_by_id(self.operation_id)
                LOGGER.error("Operation suffered fatal failure! Exit code: %s Exit message: %s" % (returned,
                                                                                                   subprocess_result))

                workflow_service.persist_operation_state(operation, model.STATUS_ERROR,
                                                         "Operation failed unexpectedly! Please check the log files.")

                burst_entity = dao.get_burst_for_operation_id(self.operation_id)
                if burst_entity:
                    message = "Error in operation process! Possibly segmentation fault."
                    workflow_service.mark_burst_finished(burst_entity, error_message=message)

            del launched_process

        # Give back empty spot now that you finished your operation
        CURRENT_ACTIVE_THREADS.remove(self)
        LOCKS_QUEUE.put(1)
 def test_set_operation_visibility(self):
     """
     Check if the visibility for an operation is set correct.
     """
     self.__init_algorithmn()
     op1 = model.Operation(self.test_user.id, self.test_project.id, self.algo_inst.id, "")
     op1 = dao.store_entity(op1)
     self.assertTrue(op1.visible, "The operation should be visible.")
     self.project_service.set_operation_and_group_visibility(op1.gid, False)
     updated_op = dao.get_operation_by_id(op1.id)
     self.assertFalse(updated_op.visible, "The operation should not be visible.")
 def test_stop_burst_operation_group(self):
     burst_config = self._long_burst_launch(True)
     operations = self._wait_for_burst_ops(burst_config)
     operations_group_id = 0
     for operation in operations:
         self.assertFalse(operation.has_finished)
         operations_group_id = operation.fk_operation_group
     self.flow_c.stop_burst_operation(operations_group_id, 1, False)
     for operation in operations:
         operation = dao.get_operation_by_id(operation.id)
         self.assertEqual(operation.status, model.STATUS_CANCELED)
 def remove_operation(self, operation_id):
     """
     Remove a given operation 
     """
     operation = dao.get_operation_by_id(operation_id)
     if operation is not None:
         datatypes_for_op = dao.get_results_for_operation(operation_id)
         for dt in datatypes_for_op:
             self.remove_datatype(operation.project.id, dt.gid, True)
         dao.remove_entity(model.Operation, operation.id)
     else:
         self.logger.warning("Attempt to delete operation with id=%s which no longer exists." % operation_id)
def _fire_simulation(project_id, **kwargs):
    launched_operation = lab.fire_simulation(project_id, **kwargs)

    # wait for the operation to finish
    while not launched_operation.has_finished:
        sleep(5)
        launched_operation = dao.get_operation_by_id(launched_operation.id)

    if launched_operation.status != model.STATUS_FINISHED:
        raise Exception('simulation failed: ' + launched_operation.additional_info)

    return launched_operation
    def run(self):
        """
        Get the required data from the operation queue and launch the operation.
        """
        # Try to get a spot to launch own operation.
        LOCKS_QUEUE.get(True)
        operation_id = self.operation_id
        run_params = [
            TvbProfile.current.PYTHON_INTERPRETER_PATH, '-m',
            'tvb.core.operation_async_launcher',
            str(operation_id), TvbProfile.CURRENT_PROFILE_NAME
        ]

        # In the exceptional case where the user pressed stop while the Thread startup is done,
        # We should no longer launch the operation.
        if self.stopped() is False:

            env = os.environ.copy()
            env['PYTHONPATH'] = os.pathsep.join(sys.path)
            # anything that was already in $PYTHONPATH should have been reproduced in sys.path

            launched_process = Popen(run_params,
                                     stdout=PIPE,
                                     stderr=PIPE,
                                     env=env)

            LOGGER.debug(
                "Storing pid=%s for operation id=%s launched on local machine."
                % (operation_id, launched_process.pid))
            op_ident = model.OperationProcessIdentifier(
                operation_id, pid=launched_process.pid)
            dao.store_entity(op_ident)

            if self.stopped():
                # In the exceptional case where the user pressed stop while the Thread startup is done.
                # and stop_operation is concurrently asking about OperationProcessIdentity.
                self.stop_pid(launched_process.pid)

            subprocess_result = launched_process.communicate()
            LOGGER.info("Finished with launch of operation %s" % operation_id)
            returned = launched_process.wait()

            if returned != 0 and not self.stopped():
                # Process did not end as expected. (e.g. Segmentation fault)
                workflow_service = WorkflowService()
                operation = dao.get_operation_by_id(self.operation_id)
                LOGGER.error(
                    "Operation suffered fatal failure! Exit code: %s Exit message: %s"
                    % (returned, subprocess_result))

                workflow_service.persist_operation_state(
                    operation, model.STATUS_ERROR,
                    "Operation failed unexpectedly! Please check the log files."
                )

                burst_entity = dao.get_burst_for_operation_id(
                    self.operation_id)
                if burst_entity:
                    message = "Error in operation process! Possibly segmentation fault."
                    workflow_service.mark_burst_finished(burst_entity,
                                                         error_message=message)

            del launched_process

        # Give back empty spot now that you finished your operation
        CURRENT_ACTIVE_THREADS.remove(self)
        LOCKS_QUEUE.put(1)
Beispiel #32
0
 def load_operation(operation_id):
     """ Retrieve previously stored Operation from DB, and load operation.burst attribute"""
     operation = dao.get_operation_by_id(operation_id)
     operation.burst = dao.get_burst_for_operation_id(operation_id)
     return operation
Beispiel #33
0
    for f in adapter_instance.flaten_input_interface():
        launch_args[f["name"]] = str(f["default"]) if 'default' in f else None
    launch_args["connectivity"] = connectivity.gid
    launch_args[
        "model_parameters_option_Generic2dOscillator_variables_of_interest"] = 'V'

    if len(sys.argv) > 1:
        launch_args[
            "model_parameters_option_Generic2dOscillator_tau"] = sys.argv[1]

    ## launch an operation and have the results stored both in DB and on disk
    launched_operation = flow_service.fire_operation(adapter_instance,
                                                     project.administrator,
                                                     project.id,
                                                     **launch_args)[0]

    ## wait for the operation to finish
    while not launched_operation.has_finished:
        sleep(5)
        launched_operation = dao.get_operation_by_id(launched_operation.id)

    if launched_operation.status == model.STATUS_FINISHED:
        ts = dao.get_generic_entity(TimeSeriesRegion, launched_operation.id,
                                    "fk_from_operation")[0]
        LOG.info("TimeSeries result is: %s " % ts.summary_info)
        print(ts.summary_info)
    else:
        LOG.warning(
            "Operation ended with problems [%s]: [%s]" %
            (launched_operation.status, launched_operation.additional_info))
Beispiel #34
0
    def _capture_operation_results(self, result, user_tag=None):
        """
        After an operation was finished, make sure the results are stored
        in DB storage and the correct meta-data,IDs are set.
        """
        results_to_store = []
        data_type_group_id = None
        operation = dao.get_operation_by_id(self.operation_id)
        if operation.user_group is None or len(operation.user_group) == 0:
            operation.user_group = date2string(
                datetime.now(), date_format=LESS_COMPLEX_TIME_FORMAT)
            operation = dao.store_entity(operation)
        if self._is_group_launch():
            data_type_group_id = dao.get_datatypegroup_by_op_group_id(
                operation.fk_operation_group).id
        # All entities will have the same subject and state
        subject = self.meta_data[DataTypeMetaData.KEY_SUBJECT]
        state = self.meta_data[DataTypeMetaData.KEY_STATE]
        burst_reference = None
        if DataTypeMetaData.KEY_BURST in self.meta_data:
            burst_reference = self.meta_data[DataTypeMetaData.KEY_BURST]
        perpetuated_identifier = None
        if DataTypeMetaData.KEY_TAG_1 in self.meta_data:
            perpetuated_identifier = self.meta_data[DataTypeMetaData.KEY_TAG_1]

        for res in result:
            if res is None:
                continue
            res.subject = str(subject)
            res.state = state
            res.fk_parent_burst = burst_reference
            res.fk_from_operation = self.operation_id
            res.framework_metadata = self.meta_data
            if not res.user_tag_1:
                res.user_tag_1 = user_tag if user_tag is not None else perpetuated_identifier
            else:
                res.user_tag_2 = user_tag if user_tag is not None else perpetuated_identifier
            res.fk_datatype_group = data_type_group_id
            ## Compute size-on disk, in case file-storage is used
            if hasattr(res, 'storage_path') and hasattr(
                    res, 'get_storage_file_name'):
                associated_file = os.path.join(res.storage_path,
                                               res.get_storage_file_name())
                res.close_file()
                res.disk_size = self.file_handler.compute_size_on_disk(
                    associated_file)
            res = dao.store_entity(res)
            # Write metaData
            res.persist_full_metadata()
            results_to_store.append(res)
        del result[0:len(result)]
        result.extend(results_to_store)

        if len(result) and self._is_group_launch():
            ## Update the operation group name
            operation_group = dao.get_operationgroup_by_id(
                operation.fk_operation_group)
            operation_group.fill_operationgroup_name(result[0].type)
            dao.store_entity(operation_group)

        return 'Operation ' + str(
            self.operation_id) + ' has finished.', len(results_to_store)
    def _remove_project_node_files(self, project_id, gid, skip_validation=False):
        """
        Delegate removal of a node in the structure of the project.
        In case of a problem will THROW StructureException.
        """
        try:
            project = self.find_project(project_id)
            datatype = dao.get_datatype_by_gid(gid)
            links = dao.get_links_for_datatype(datatype.id)

            op = dao.get_operation_by_id(datatype.fk_from_operation)
            adapter = ABCAdapter.build_adapter(op.algorithm)
            if links:
                was_link = False
                for link in links:
                    # This means it's only a link and we need to remove it
                    if link.fk_from_datatype == datatype.id and link.fk_to_project == project.id:
                        dao.remove_entity(Links, link.id)
                        was_link = True
                if not was_link:
                    # Create a clone of the operation
                    # There is no view_model so the view_model_gid is None

                    new_op = Operation(op.view_model_gid,
                                       dao.get_system_user().id,
                                       links[0].fk_to_project,
                                       datatype.parent_operation.fk_from_algo,
                                       datatype.parent_operation.status,
                                       datatype.parent_operation.start_date,
                                       datatype.parent_operation.completion_date,
                                       datatype.parent_operation.fk_operation_group,
                                       datatype.parent_operation.additional_info,
                                       datatype.parent_operation.user_group,
                                       datatype.parent_operation.range_values)
                    new_op = dao.store_entity(new_op)
                    to_project = self.find_project(links[0].fk_to_project)
                    to_project_path = self.structure_helper.get_project_folder(to_project)

                    encryption_handler.set_project_active(to_project)
                    encryption_handler.sync_folders(to_project_path)
                    to_project_name = to_project.name

                    full_path = h5.path_for_stored_index(datatype)
                    self.structure_helper.move_datatype(datatype, to_project_name, str(new_op.id), full_path)
                    # Move also the ViewModel H5
                    old_folder = self.structure_helper.get_project_folder(project, str(op.id))
                    view_model = adapter.load_view_model(op)
                    vm_full_path = h5.determine_filepath(op.view_model_gid, old_folder)
                    self.structure_helper.move_datatype(view_model, to_project_name, str(new_op.id), vm_full_path)

                    encryption_handler.sync_folders(to_project_path)
                    encryption_handler.set_project_inactive(to_project)

                    datatype.fk_from_operation = new_op.id
                    datatype.parent_operation = new_op
                    dao.store_entity(datatype)
                    dao.remove_entity(Links, links[0].id)
            else:
                specific_remover = get_remover(datatype.type)(datatype)
                specific_remover.remove_datatype(skip_validation)
                h5_path = h5.path_for_stored_index(datatype)
                self.structure_helper.remove_datatype_file(h5_path)
                encryption_handler.push_folder_to_sync(self.structure_helper.get_project_folder_from_h5(h5_path))

        except RemoveDataTypeException:
            self.logger.exception("Could not execute operation Node Remove!")
            raise
        except FileStructureException:
            self.logger.exception("Remove operation failed")
            raise StructureException("Remove operation failed for unknown reasons.Please contact system administrator.")
Beispiel #36
0
 def _is_group_launch(self):
     """
     Return true if this adapter is launched from a group of operations
     """
     operation = dao.get_operation_by_id(self.operation_id)
     return operation.fk_operation_group is not None
Beispiel #37
0
    def build(existing_op_id):
        op = dao.get_operation_by_id(existing_op_id)
        project = dao.get_project_by_id(op.fk_launched_in)
        user = dao.get_user_by_id(op.fk_launched_by)

        return operation_factory(test_user=user, test_project=project), project.id
    def _edit_data(self, datatype, new_data, from_group=False):
        # type: (DataType, dict, bool) -> None
        """
        Private method, used for editing a meta-data XML file and a DataType row
        for a given custom DataType entity with new dictionary of data from UI.
        """
        # 1. First update Operation fields:
        #    Update group field if possible
        new_group_name = new_data[CommonDetails.CODE_OPERATION_TAG]
        empty_group_value = (new_group_name is None or new_group_name == "")
        if from_group:
            if empty_group_value:
                raise StructureException("Empty group is not allowed!")

            group = dao.get_generic_entity(OperationGroup, new_data[CommonDetails.CODE_OPERATION_GROUP_ID])
            if group and len(group) > 0 and new_group_name != group[0].name:
                group = group[0]
                exists_group = dao.get_generic_entity(OperationGroup, new_group_name, 'name')
                if exists_group:
                    raise StructureException("Group '" + new_group_name + "' already exists.")
                group.name = new_group_name
                dao.store_entity(group)
        else:
            operation = dao.get_operation_by_id(datatype.fk_from_operation)
            operation.user_group = new_group_name
            dao.store_entity(operation)
            op_folder = self.structure_helper.get_project_folder(operation.project, str(operation.id))
            vm_gid = operation.view_model_gid
            view_model_file = h5.determine_filepath(vm_gid, op_folder)
            if view_model_file:
                view_model_class = H5File.determine_type(view_model_file)
                view_model = view_model_class()
                with ViewModelH5(view_model_file, view_model) as f:
                    ga = f.load_generic_attributes()
                    ga.operation_tag = new_group_name
                    f.store_generic_attributes(ga, False)
            else:
                self.logger.warning("Could not find ViewModel H5 file for op: {}".format(operation))

        # 2. Update GenericAttributes in the associated H5 files:
        h5_path = h5.path_for_stored_index(datatype)
        with H5File.from_file(h5_path) as f:
            ga = f.load_generic_attributes()

            ga.subject = new_data[DataTypeOverlayDetails.DATA_SUBJECT]
            ga.state = new_data[DataTypeOverlayDetails.DATA_STATE]
            ga.operation_tag = new_group_name
            if DataTypeOverlayDetails.DATA_TAG_1 in new_data:
                ga.user_tag_1 = new_data[DataTypeOverlayDetails.DATA_TAG_1]
            if DataTypeOverlayDetails.DATA_TAG_2 in new_data:
                ga.user_tag_2 = new_data[DataTypeOverlayDetails.DATA_TAG_2]
            if DataTypeOverlayDetails.DATA_TAG_3 in new_data:
                ga.user_tag_3 = new_data[DataTypeOverlayDetails.DATA_TAG_3]
            if DataTypeOverlayDetails.DATA_TAG_4 in new_data:
                ga.user_tag_4 = new_data[DataTypeOverlayDetails.DATA_TAG_4]
            if DataTypeOverlayDetails.DATA_TAG_5 in new_data:
                ga.user_tag_5 = new_data[DataTypeOverlayDetails.DATA_TAG_5]

            f.store_generic_attributes(ga, False)

        # 3. Update MetaData in DT Index DB as well.
        datatype.fill_from_generic_attributes(ga)
        dao.store_entity(datatype)