示例#1
0
    def create_group(test_user=None, test_project=None, subject="John Doe"):
        """
        Create a group of 2 operations, each with at least one resultant DataType.
        """
        if test_user is None:
            test_user = TestFactory.create_user()
        if test_project is None:
            test_project = TestFactory.create_project(test_user)

        adapter_inst = TestFactory.create_adapter('tvb.tests.framework.adapters.testadapter3', 'TestAdapter3')
        adapter_inst.generic_attributes.subject = subject

        view_model = adapter_inst.get_view_model()()
        args = {RANGE_PARAMETER_1: 'param_5', 'param_5': json.dumps({constants.ATT_MINVALUE: 1,
                                                                     constants.ATT_MAXVALUE: 2.1,
                                                                     constants.ATT_STEP: 1})}
        algo = adapter_inst.stored_adapter
        algo_category = dao.get_category_by_id(algo.fk_category)

        # Prepare Operations group. Execute them synchronously
        service = OperationService()
        operations = service.prepare_operations(test_user.id, test_project, algo, algo_category,
                                                view_model=view_model, **args)[0]
        service.launch_operation(operations[0].id, False, adapter_inst)
        service.launch_operation(operations[1].id, False, adapter_inst)

        resulted_dts = dao.get_datatype_in_group(operation_group_id=operations[0].fk_operation_group)
        return resulted_dts, operations[0].fk_operation_group
示例#2
0
 def create_group(test_user=None, test_project=None, subject="John Doe"):
     """
     Create a group of 2 operations, each with at least one resultant DataType.
     """
     if test_user is None:
         test_user = TestFactory.create_user()  
     if test_project is None:
         test_project = TestFactory.create_project(test_user)
        
     ### Retrieve Adapter instance 
     algo_group = dao.find_group('tvb.tests.framework.adapters.testadapter3', 'TestAdapter3')
     algo_category = dao.get_category_by_id(algo_group.fk_category)
     algo = dao.get_algorithm_by_group(algo_group.id) 
     
     adapter_inst = TestFactory.create_adapter(algo_group=algo_group, test_project=test_project)
     adapter_inst.meta_data = {DataTypeMetaData.KEY_SUBJECT: subject}
     args = {model.RANGE_PARAMETER_1: 'param_5', 'param_5': [1, 2]}
     
     ### Prepare Operations group. Execute them synchronously
     service = OperationService()
     operations = service.prepare_operations(test_user.id, test_project.id, algo, algo_category, {}, **args)[0]
     service.launch_operation(operations[0].id, False, adapter_inst)
     service.launch_operation(operations[1].id, False, adapter_inst)
     
     resulted_dts = dao.get_datatype_in_group(operation_group_id=operations[0].fk_operation_group)
     return resulted_dts, operations[0].fk_operation_group
示例#3
0
 def create_group(test_user=None, test_project=None, subject="John Doe"):
     """
     Create a group of 2 operations, each with at least one resultant DataType.
     """
     if test_user is None:
         test_user = TestFactory.create_user()  
     if test_project is None:
         test_project = TestFactory.create_project(test_user)
        
     ### Retrieve Adapter instance 
     algo_group = dao.find_group('tvb.tests.framework.adapters.testadapter3', 'TestAdapter3')
     algo_category = dao.get_category_by_id(algo_group.fk_category)
     algo = dao.get_algorithm_by_group(algo_group.id) 
     
     adapter_inst = TestFactory.create_adapter(algo_group=algo_group, test_project=test_project)
     adapter_inst.meta_data = {DataTypeMetaData.KEY_SUBJECT: subject}
     args = {model.RANGE_PARAMETER_1: 'param_5', 'param_5': [1, 2]}
     
     ### Prepare Operations group. Execute them synchronously
     service = OperationService()
     operations = service.prepare_operations(test_user.id, test_project.id, algo, algo_category, {}, **args)[0]
     service.launch_operation(operations[0].id, False, adapter_inst)
     service.launch_operation(operations[1].id, False, adapter_inst)
     
     resulted_dts = dao.get_datatype_in_group(operation_group_id=operations[0].fk_operation_group)
     return resulted_dts, operations[0].fk_operation_group
示例#4
0
    def launch_synchronously(test_user, test_project, adapter_instance, view_model, algo_category=None):
        # Avoid the scheduled execution, as this is asynch, thus launch it immediately
        service = OperationService()
        algorithm = adapter_instance.stored_adapter
        if algo_category is None:
            algo_category = dao.get_category_by_id(algorithm.fk_category)
        operation = service.prepare_operations(test_user.id, test_project, algorithm, algo_category,
                                               True, view_model=view_model)[0][0]
        service.initiate_prelaunch(operation, adapter_instance)

        operation = dao.get_operation_by_id(operation.id)
        # Check that operation status after execution is success.
        assert STATUS_FINISHED == operation.status
        # Make sure at least one result exists for each BCT algorithm
        return dao.get_generic_entity(DataType, operation.id, 'fk_from_operation')
示例#5
0
    def test_bct_all(self):
        """
        Iterate all BCT algorithms and execute them.
        """
        service = OperationService()
        algo_category = dao.get_category_by_id(self.bct_adapters[0].stored_adapter.fk_category)
        for adapter_instance in self.bct_adapters:
            algorithm = adapter_instance.stored_adapter
            view_model = BaseBCTModel()
            view_model.connectivity = self.connectivity.gid

            # Avoid the scheduled execution, as this is asynch, thus launch it immediately
            operation = service.prepare_operations(self.test_user.id, self.test_project, algorithm, algo_category,
                                                   {}, True, view_model=view_model)[0][0]
            service.initiate_prelaunch(operation, adapter_instance)

            operation = dao.get_operation_by_id(operation.id)
            # Check that operation status after execution is success.
            assert STATUS_FINISHED == operation.status
            # Make sure at least one result exists for each BCT algorithm
            results = dao.get_generic_entity(DataType, operation.id, 'fk_from_operation')
            assert len(results) > 0
示例#6
0
class BurstService():
    """
    Service layer for Burst related entities.
    """
    def __init__(self):
        self.operation_service = OperationService()
        self.workflow_service = WorkflowService()
        self.logger = get_logger(self.__class__.__module__)

    def build_portlet_interface(self, portlet_configuration, project_id):
        """
        From a portlet_id and a project_id, first build the portlet
        entity then get it's configurable interface. 
        
        :param portlet_configuration: a portlet configuration entity. It holds at the
            least the portlet_id, and in case any default parameters were saved
            they can be rebuilt from the analyzers // visualizer parameters
        :param project_id: the id of the current project   
            
        :returns: the portlet interface will be of the following form::
            [{'interface': adapter_interface, 
            'prefix': prefix_for_parameter_names, 
            'subalg': {algorithm_field_name: default_algorithm_value},
            'algo_group': algorithm_group,
            'alg_ui_name': displayname},
            ......]
            A list of dictionaries for each adapter that makes up the portlet.
            
        """
        portlet_entity = dao.get_portlet_by_id(
            portlet_configuration.portlet_id)
        if portlet_entity is None:
            raise InvalidPortletConfiguration(
                "No portlet entity located in database with id=%s. "
                "Portlet configuration %s is not valid." %
                (portlet_configuration.portlet_id, portlet_configuration))
        portlet_configurer = PortletConfigurer(portlet_entity)
        portlet_interface = portlet_configurer.get_configurable_interface()
        self.logger.debug("Created interface for portlet " +
                          str([portlet_entity]))

        for adapter_conf in portlet_interface:
            interface = adapter_conf.interface
            interface = FlowService().prepare_parameters(
                interface, project_id, adapter_conf.group.fk_category)
            interface = ABCAdapter.prepare_param_names(interface,
                                                       adapter_conf.prefix)
            adapter_conf.interface = interface

        portlet_configurer.update_default_values(portlet_interface,
                                                 portlet_configuration)
        portlet_configurer.prefix_adapters_parameters(portlet_interface)

        return portlet_interface

    @staticmethod
    def update_portlet_configuration(portlet_configuration,
                                     submited_parameters):
        """
        :param portlet_configuration: the portlet configuration that needs to be updated
        :param submited_parameters: a list of parameters as submitted from the UI. This 
            is a dictionary in the form : 
            {'dynamic' : {name:value pairs}, 'static' : {name:value pairs}}
            
        All names are prefixed with adapter specific generated prefix.
        """
        portlet_entity = dao.get_portlet_by_id(
            portlet_configuration.portlet_id)
        portlet_configurer = PortletConfigurer(portlet_entity)
        return portlet_configurer.update_portlet_configuration(
            portlet_configuration, submited_parameters)

    @staticmethod
    def new_burst_configuration(project_id):
        """
        Return a new burst configuration entity with all the default values.
        """
        burst_configuration = model.BurstConfiguration(project_id)
        burst_configuration.selected_tab = 0
        BurstService.set_default_portlets(burst_configuration)
        return burst_configuration

    @staticmethod
    def set_default_portlets(burst_configuration):
        """
        Sets the default portlets for the specified burst configuration.
        The default portlets are specified in the __init__.py script from tvb root.
        """
        for tab_idx, value in DEFAULT_PORTLETS.items():
            for sel_idx, portlet_identifier in value.items():
                portlet = BurstService.get_portlet_by_identifier(
                    portlet_identifier)
                if portlet is not None:
                    portlet_configuration = BurstService.new_portlet_configuration(
                        portlet.id, tab_idx, sel_idx,
                        portlet.algorithm_identifier)
                    burst_configuration.set_portlet(tab_idx, sel_idx,
                                                    portlet_configuration)

    @staticmethod
    def _store_burst_config(burst_config):
        """
        Store a burst configuration entity.
        """
        burst_config.prepare_before_save()
        saved_entity = dao.store_entity(burst_config)
        return saved_entity.id

    @staticmethod
    def get_available_bursts(project_id):
        """
        Return all the burst for the current project.
        """
        bursts = dao.get_bursts_for_project(
            project_id, page_size=MAX_BURSTS_DISPLAYED) or []
        for burst in bursts:
            burst.prepare_after_load()
        return bursts

    @staticmethod
    def populate_burst_disk_usage(bursts):
        """
        Adds a disk_usage field to each burst object.
        The disk usage is computed as the sum of the datatypes generated by a burst
        """
        sizes = dao.compute_bursts_disk_size([b.id for b in bursts])
        for b in bursts:
            b.disk_size = format_bytes_human(sizes[b.id])

    @staticmethod
    def rename_burst(burst_id, new_name):
        """
        Rename the burst given by burst_id, setting it's new name to
        burst_name.
        """
        burst = dao.get_burst_by_id(burst_id)
        burst.name = new_name
        dao.store_entity(burst)

    def load_burst(self, burst_id):
        """
        :param burst_id: the id of the burst that should be loaded
        
        Having this input the method should:
        
            - load the entity from the DB
            - get all the workflow steps for the saved burst id
            - go trough the visualization workflow steps to create the tab 
                configuration of the burst using the tab_index and index_in_tab 
                fields saved on each workflow_step
                
        """
        burst = dao.get_burst_by_id(burst_id)
        burst.prepare_after_load()
        burst.reset_tabs()
        burst_workflows = dao.get_workflows_for_burst(burst.id)

        group_gid = None
        if len(burst_workflows) == 1:
            # A simple burst with no range parameters
            burst = self.__populate_tabs_from_workflow(burst,
                                                       burst_workflows[0])
        elif len(burst_workflows) > 1:
            # A burst workflow with a range of values, created multiple workflows and need
            # to launch parameter space exploration with the resulted group
            self.__populate_tabs_from_workflow(burst, burst_workflows[0])
            executed_steps = dao.get_workflow_steps(burst_workflows[0].id)

            operation = dao.get_operation_by_id(executed_steps[0].fk_operation)
            if operation.operation_group:
                workflow_group = dao.get_datatypegroup_by_op_group_id(
                    operation.operation_group.id)
                group_gid = workflow_group.gid
        return burst, group_gid

    @staticmethod
    def __populate_tabs_from_workflow(burst_entity, workflow):
        """
        Given a burst and a workflow populate the tabs of the burst with the PortletConfigurations
        generated from the steps of the workflow.
        """
        visualizers = dao.get_visualization_steps(workflow.id)
        for entry in visualizers:
            ## For each visualize step, also load all of the analyze steps.
            portlet_cfg = PortletConfiguration(entry.fk_portlet)
            portlet_cfg.set_visualizer(entry)
            analyzers = dao.get_workflow_steps_for_position(
                entry.fk_workflow, entry.tab_index, entry.index_in_tab)
            portlet_cfg.set_analyzers(analyzers)
            burst_entity.tabs[entry.tab_index].portlets[
                entry.index_in_tab] = portlet_cfg
        return burst_entity

    def load_tab_configuration(self, burst_entity, op_id):
        """
        Given a burst entity and an operation id, find the workflow to which the op_id
        belongs and the load the burst_entity's tab configuration with those workflow steps.
        """
        originating_workflow = dao.get_workflow_for_operation_id(op_id)
        burst_entity = self.__populate_tabs_from_workflow(
            burst_entity, originating_workflow)
        return burst_entity

    @staticmethod
    def new_portlet_configuration(portlet_id,
                                  tab_nr=-1,
                                  index_in_tab=-1,
                                  portlet_name='Default'):
        """
        Return a new portlet configuration entitiy with default parameters.
        
        :param portlet_id: the id of the portlet for which a configuration will
            be stored
        :param tab_nr: the index of the currently selected tab
        :param index_in_tab: the index from the currently selected tab
        
        """
        portlet_entity = dao.get_portlet_by_id(portlet_id)
        if portlet_entity is None:
            raise InvalidPortletConfiguration(
                "No portlet entity located in database with id=%s." %
                portlet_id)
        portlet_configurer = PortletConfigurer(portlet_entity)
        configuration = portlet_configurer.create_new_portlet_configuration(
            portlet_name)
        for wf_step in configuration.analyzers:
            wf_step.tab_index = tab_nr
            wf_step.index_in_tab = index_in_tab
        configuration.visualizer.tab_index = tab_nr
        configuration.visualizer.index_in_tab = index_in_tab
        return configuration

    @staticmethod
    def get_available_portlets():
        """
        :returns: a list of all the available portlet entites
        """
        return dao.get_available_portlets()

    @staticmethod
    def get_portlet_by_id(portlet_id):
        """
        :returns: the portlet entity with the id =@portlet_id
        """
        return dao.get_portlet_by_id(portlet_id)

    @staticmethod
    def get_portlet_by_identifier(portlet_identifier):
        """
        :returns: the portlet entity with the algorithm identifier =@portlet_identifier
        """
        return dao.get_portlet_by_identifier(portlet_identifier)

    def launch_burst(self,
                     burst_configuration,
                     simulator_index,
                     simulator_id,
                     user_id,
                     launch_mode=LAUNCH_NEW):
        """
        Given a burst configuration and all the necessary data do the actual launch.
        
        :param burst_configuration: BurstConfiguration   
        :param simulator_index: the position within the workflows step list that the simulator will take. This is needed
            so that the rest of the portlet workflow steps know what steps do their dynamic parameters come from.
        :param simulator_id: the id of the simulator adapter as stored in the DB. It's needed to load the simulator algo
            group and category that are then passed to the launcher's prepare_operation method.
        :param user_id: the id of the user that launched this burst
        :param launch_mode: new/branch/continue
        """
        ## 1. Prepare BurstConfiguration entity
        if launch_mode == LAUNCH_NEW:
            ## Fully new entity for new simulation
            burst_config = burst_configuration.clone()
            if burst_config.name is None:
                new_id = dao.get_max_burst_id() + 1
                burst_config.name = 'simulation_' + str(new_id)
        else:
            ## Branch or Continue simulation
            burst_config = burst_configuration
            simulation_state = dao.get_generic_entity(
                SIMULATION_DATATYPE_MODULE + "." + SIMULATION_DATATYPE_CLASS,
                burst_config.id, "fk_parent_burst")
            if simulation_state is None or len(simulation_state) < 1:
                exc = BurstServiceException(
                    "Simulation State not found for %s, "
                    "thus we are unable to branch from it!" %
                    burst_config.name)
                self.logger.error(exc)
                raise exc

            simulation_state = simulation_state[0]
            burst_config.update_simulation_parameter("simulation_state",
                                                     simulation_state.gid)
            burst_config = burst_configuration.clone()

            count = dao.count_bursts_with_name(burst_config.name,
                                               burst_config.fk_project)
            burst_config.name = burst_config.name + "_" + launch_mode + str(
                count)

        ## 2. Create Operations and do the actual launch
        if launch_mode in [LAUNCH_NEW, LAUNCH_BRANCH]:
            ## New Burst entry in the history
            burst_id = self._store_burst_config(burst_config)
            thread = threading.Thread(target=self._async_launch_and_prepare,
                                      kwargs={
                                          'burst_config': burst_config,
                                          'simulator_index': simulator_index,
                                          'simulator_id': simulator_id,
                                          'user_id': user_id
                                      })
            thread.start()
            return burst_id, burst_config.name
        else:
            ## Continue simulation
            ## TODO
            return burst_config.id, burst_config.name

    @transactional
    def _prepare_operations(self, burst_config, simulator_index, simulator_id,
                            user_id):
        """
        Prepare all required operations for burst launch.
        """
        project_id = burst_config.fk_project
        burst_id = burst_config.id
        workflow_step_list = []
        starting_index = simulator_index + 1

        sim_algo = FlowService().get_algorithm_by_identifier(simulator_id)
        metadata = {DataTypeMetaData.KEY_BURST: burst_id}
        launch_data = burst_config.get_all_simulator_values()[0]
        operations, group = self.operation_service.prepare_operations(
            user_id, project_id, sim_algo, sim_algo.algo_group.group_category,
            metadata, **launch_data)
        group_launched = group is not None
        if group_launched:
            starting_index += 1

        for tab in burst_config.tabs:
            for portlet_cfg in tab.portlets:
                ### For each portlet configuration stored, update the step index ###
                ### and also change the dynamic parameters step indexes to point ###
                ### to the simulator outputs.                                     ##
                if portlet_cfg is not None:
                    analyzers = portlet_cfg.analyzers
                    visualizer = portlet_cfg.visualizer
                    for entry in analyzers:
                        entry.step_index = starting_index
                        self.workflow_service.set_dynamic_step_references(
                            entry, simulator_index)
                        workflow_step_list.append(entry)
                        starting_index += 1
                    ### Change the dynamic parameters to point to the last adapter from this portlet execution.
                    visualizer.step_visible = False
                    if len(workflow_step_list) > 0 and isinstance(
                            workflow_step_list[-1], model.WorkflowStep):
                        self.workflow_service.set_dynamic_step_references(
                            visualizer, workflow_step_list[-1].step_index)
                    else:
                        self.workflow_service.set_dynamic_step_references(
                            visualizer, simulator_index)
                    ### Only for a single operation have the step of visualization, otherwise is useless.
                    if not group_launched:
                        workflow_step_list.append(visualizer)

        if group_launched:
            ###  For a group of operations, make sure the metric for PSE view
            ### is also computed, immediately after the simulation.
            metric_algo, metric_group = FlowService(
            ).get_algorithm_by_module_and_class(MEASURE_METRICS_MODULE,
                                                MEASURE_METRICS_CLASS)
            _, metric_interface = FlowService().prepare_adapter(
                project_id, metric_group)
            dynamics = {}
            for entry in metric_interface:
                # We have a select that should be the dataType and a select multiple with the
                # required metric algorithms to be evaluated. Only dynamic parameter should be
                # the select type.
                if entry[ABCAdapter.KEY_TYPE] == 'select':
                    dynamics[entry[ABCAdapter.KEY_NAME]] = {
                        WorkflowStepConfiguration.DATATYPE_INDEX_KEY: 0,
                        WorkflowStepConfiguration.STEP_INDEX_KEY:
                        simulator_index
                    }
            metric_step = model.WorkflowStep(algorithm_id=metric_algo.id,
                                             step_index=simulator_index + 1,
                                             static_param={},
                                             dynamic_param=dynamics)
            metric_step.step_visible = False
            workflow_step_list.insert(0, metric_step)

        workflows = self.workflow_service.create_and_store_workflow(
            project_id, burst_id, simulator_index, simulator_id, operations)
        self.operation_service.prepare_operations_for_workflowsteps(
            workflow_step_list, workflows, user_id, burst_id, project_id,
            group, operations)
        operation_ids = [operation.id for operation in operations]
        return operation_ids

    def _async_launch_and_prepare(self, burst_config, simulator_index,
                                  simulator_id, user_id):
        """
        Prepare operations asynchronously.
        """
        try:
            operation_ids = self._prepare_operations(burst_config,
                                                     simulator_index,
                                                     simulator_id, user_id)
            self.logger.debug("Starting a total of %s workflows" %
                              (len(operation_ids, )))
            wf_errs = 0
            for operation_id in operation_ids:
                try:
                    OperationService().launch_operation(operation_id, True)
                except Exception, excep:
                    self.logger.error(excep)
                    wf_errs += 1
                    self.workflow_service.mark_burst_finished(
                        burst_config, error_message=str(excep))

            self.logger.debug("Finished launching workflows. " +
                              str(len(operation_ids) - wf_errs) +
                              " were launched successfully, " + str(wf_errs) +
                              " had error on pre-launch steps")
        except Exception, excep:
            self.logger.error(excep)
            self.workflow_service.mark_burst_finished(burst_config,
                                                      error_message=str(excep))
示例#7
0
class TestFlowContoller(BaseControllersTest):
    """ Unit tests for FlowController """
    
    def setup_method(self):
        """
        Sets up the environment for testing;
        creates a `FlowController`
        """
        self.init()
        self.flow_c = FlowController()
        self.burst_c = BurstController()
        self.operation_service = OperationService()
    
    
    def teardown_method(self):
        """ Cleans up the testing environment """
        self.cleanup()
        self.clean_database()

    @pytest.fixture()
    def long_burst_launch(self, connectivity_factory):

        def build(is_range=False):
            self.burst_c.index()
            connectivity = connectivity_factory[1]
            launch_params = copy.deepcopy(SIMULATOR_PARAMETERS)
            launch_params['connectivity'] = dao.get_datatype_by_id(connectivity.id).gid
            launch_params['simulation_length'] = '10000'
            if is_range:
                launch_params['conduction_speed'] = '[10,15,20]'
                launch_params[RANGE_PARAMETER_1] = 'conduction_speed'
            launch_params = {"simulator_parameters": json.dumps(launch_params)}
            burst_id = json.loads(self.burst_c.launch_burst("new", "test_burst", **launch_params))['id']
            return dao.get_burst_by_id(burst_id)

        return build
            
            
    def test_context_selected(self):
        """
        Remove the project from CherryPy session and check that you are redirected to projects page.
        """
        del cherrypy.session[common.KEY_PROJECT]
        self._expect_redirect('/project/viewall', self.flow_c.step_analyzers)
        
        
    def test_valid_step(self):
        """
        For all algorithm categories check that a submenu is generated and the result
        page has it's title given by category name.
        """
        result_dict = self.flow_c.step_analyzers()
        assert common.KEY_SUBMENU_LIST in result_dict,\
                        "Expect to have a submenu with available algorithms for category."
        assert result_dict["section_name"] == 'analyze'


    def test_step_connectivity(self):
        """
        Check that the correct section name and connectivity sub-menu are returned for the connectivity step.
        """
        result_dict = self.flow_c.step_connectivity()
        assert result_dict['section_name'] == 'connectivity'
        assert result_dict['submenu_list'] == self.flow_c.connectivity_submenu


    def test_default(self):
        """
        Test default method from step controllers. Check that the submit link is ok, that a mainContent
        is present in result dict and that the isAdapter flag is set to true.
        """
        cherrypy.request.method = "GET"
        categories = dao.get_algorithm_categories()
        for categ in categories:
            algo_groups = dao.get_adapters_from_categories([categ.id])
            for algo in algo_groups:
                result_dict = self.flow_c.default(categ.id, algo.id)
                assert result_dict[common.KEY_SUBMIT_LINK] == '/flow/%i/%i' % (categ.id, algo.id)
                assert 'mainContent' in result_dict
                assert result_dict['isAdapter']
                
                
    def test_default_cancel(self):
        """
        On cancel we should get a redirect to the back page link.
        """
        cherrypy.request.method = "POST"
        categories = dao.get_algorithm_categories()
        algo_groups = dao.get_adapters_from_categories([categories[0].id])
        self._expect_redirect('/project/viewoperations/%i' % self.test_project.id, self.flow_c.default,
                              categories[0].id, algo_groups[0].id, cancel=True, back_page='operations')
        
        
    def test_default_invalid_key(self):
        """
        Pass invalid keys for adapter and step and check you get redirect to tvb entry
        page with error set.
        """
        self._expect_redirect('/tvb?error=True', self.flow_c.default, 'invalid', 'invalid')
        
        
    def test_read_datatype_attribute(self, datatype_with_storage_factory):
        """
        Read an attribute from a datatype.
        """
        dt = datatype_with_storage_factory("test_subject", "RAW_STATE",
                                                             'this is the stored data'.split())
        returned_data = self.flow_c.read_datatype_attribute(dt.gid, "string_data")
        assert returned_data == '["this", "is", "the", "stored", "data"]'
        
        
    def test_read_datatype_attribute_method_call(self, datatype_with_storage_factory):
        """
        Call method on given datatype.
        """
        dt =datatype_with_storage_factory("test_subject", "RAW_STATE",
                                                             'this is the stored data'.split())
        args = {'length': 101}
        returned_data = self.flow_c.read_datatype_attribute(dt.gid, 'return_test_data', **args)
        assert returned_data == str(list(range(101)))
        
        
    def test_get_simple_adapter_interface(self):
        adapter = dao.get_algorithm_by_module('tvb.tests.framework.adapters.testadapter1', 'TestAdapter1')
        result = self.flow_c.get_simple_adapter_interface(adapter.id)
        expected_interface = TestAdapter1().get_input_tree()
        assert result['inputList'] == expected_interface

    def _wait_for_burst_ops(self, burst_config):
        """ sleeps until some operation of the burst is created"""
        waited = 1
        timeout = 50
        operations = dao.get_operations_in_burst(burst_config.id)
        while not len(operations) and waited <= timeout:
            sleep(1)
            waited += 1
            operations = dao.get_operations_in_burst(burst_config.id)
        operations = dao.get_operations_in_burst(burst_config.id)
        return operations


    def test_stop_burst_operation(self, long_burst_launch):
        burst_config = long_burst_launch
        operation = self._wait_for_burst_ops(burst_config)[0]
        assert not operation.has_finished
        self.flow_c.stop_burst_operation(operation.id, 0, False)
        operation = dao.get_operation_by_id(operation.id)
        assert operation.status == STATUS_CANCELED
        
        
    def test_stop_burst_operation_group(self, long_burst_launch):
        burst_config = long_burst_launch(True)
        operations = self._wait_for_burst_ops(burst_config)
        operations_group_id = 0
        for operation in operations:
            assert not operation.has_finished
            operations_group_id = operation.fk_operation_group
        self.flow_c.stop_burst_operation(operations_group_id, 1, False)
        for operation in operations:
            operation = dao.get_operation_by_id(operation.id)
            assert operation.status == STATUS_CANCELED
        
        
    def test_remove_burst_operation(self, long_burst_launch):
        burst_config = long_burst_launch
        operation = self._wait_for_burst_ops(burst_config)[0]
        assert not operation.has_finished
        self.flow_c.stop_burst_operation(operation.id, 0, True)
        operation = dao.try_get_operation_by_id(operation.id)
        assert operation is None
        
        
    def test_remove_burst_operation_group(self, long_burst_launch):
        burst_config = long_burst_launch(True)
        operations = self._wait_for_burst_ops(burst_config)
        operations_group_id = 0
        for operation in operations:
            assert not operation.has_finished
            operations_group_id = operation.fk_operation_group
        self.flow_c.stop_burst_operation(operations_group_id, 1, True)
        for operation in operations:
            operation = dao.try_get_operation_by_id(operation.id)
            assert operation is None


    def _launch_test_algo_on_cluster(self, **data):
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter1", "TestAdapter1")
        algo = adapter.stored_adapter
        algo_category = dao.get_category_by_id(algo.fk_category)
        operations, _ = self.operation_service.prepare_operations(self.test_user.id, self.test_project.id, algo,
                                                                  algo_category, {}, **data)
        self.operation_service._send_to_cluster(operations, adapter)
        return operations


    def test_stop_operations(self):
        data = {"test1_val1": 5, 'test1_val2': 5}
        operations = self._launch_test_algo_on_cluster(**data)
        operation = dao.get_operation_by_id(operations[0].id)
        assert not operation.has_finished
        self.flow_c.stop_operation(operation.id, 0, False)
        operation = dao.get_operation_by_id(operation.id)
        assert operation.status == STATUS_CANCELED
        
        
    def test_stop_operations_group(self):
        data = {RANGE_PARAMETER_1: "test1_val1", "test1_val1": '5,6,7', 'test1_val2': 5}
        operations = self._launch_test_algo_on_cluster(**data)
        operation_group_id = 0
        for operation in operations:
            operation = dao.get_operation_by_id(operation.id)
            assert not operation.has_finished
            operation_group_id = operation.fk_operation_group
        self.flow_c.stop_operation(operation_group_id, 1, False)
        for operation in operations:
            operation = dao.get_operation_by_id(operation.id)
            assert operation.status == STATUS_CANCELED
class WorkflowTest(TransactionalTestCase):
    """
    Test that workflow conversion methods are valid.
    """
    def setUp(self):
        """
        Sets up the testing environment;
        saves config file;
        creates a test user, a test project;
        creates burst, operation, flow and workflow services
        """
        self.test_user = TestFactory.create_user()
        self.test_project = TestFactory.create_project(self.test_user)
        self.workflow_service = WorkflowService()
        self.burst_service = BurstService()
        self.operation_service = OperationService()
        self.flow_service = FlowService()

    def tearDown(self):
        """
        Remove project folders and clean up database.
        """
        FilesHelper().remove_project_structure(self.test_project.name)
        self.delete_project_folders()

    def __create_complex_workflow(self, workflow_step_list):
        """
        Creates a burst with a complex workflow with a given list of workflow steps.
        :param workflow_step_list: a list of workflow steps that will be used in the
            creation of a new workflow for a new burst
        """
        burst_config = TestFactory.store_burst(self.test_project.id)

        stored_dt = datatypes_factory.DatatypesFactory()._store_datatype(
            Datatype1())

        first_step_algorithm = self.flow_service.get_algorithm_by_module_and_class(
            "tvb.tests.framework.adapters.testadapter1",
            "TestAdapterDatatypeInput")
        metadata = {DataTypeMetaData.KEY_BURST: burst_config.id}
        kwargs = {"test_dt_input": stored_dt.gid, 'test_non_dt_input': '0'}
        operations, group = self.operation_service.prepare_operations(
            self.test_user.id, self.test_project.id, first_step_algorithm,
            first_step_algorithm.algorithm_category, metadata, **kwargs)

        workflows = self.workflow_service.create_and_store_workflow(
            project_id=self.test_project.id,
            burst_id=burst_config.id,
            simulator_index=0,
            simulator_id=first_step_algorithm.id,
            operations=operations)
        self.operation_service.prepare_operations_for_workflowsteps(
            workflow_step_list, workflows, self.test_user.id, burst_config.id,
            self.test_project.id, group, operations)
        #fire the first op
        if len(operations) > 0:
            self.operation_service.launch_operation(operations[0].id, False)
        return burst_config.id

    def test_workflow_generation(self):
        """
        A simple test just for the fact that a workflow is created an ran, 
        no dynamic parameters are passed. In this case we create a two steps
        workflow: step1 - tvb.tests.framework.adapters.testadapter2.TestAdapter2
                  step2 - tvb.tests.framework.adapters.testadapter1.TestAdapter1
        The first adapter doesn't return anything and the second returns one
        tvb.datatypes.datatype1.Datatype1 instance. We check that the steps
        are actually ran by checking that two operations are created and that
        one dataType is stored.
        """
        workflow_step_list = [
            TestFactory.create_workflow_step(
                "tvb.tests.framework.adapters.testadapter2",
                "TestAdapter2",
                step_index=1,
                static_kwargs={"test2": 2}),
            TestFactory.create_workflow_step(
                "tvb.tests.framework.adapters.testadapter1",
                "TestAdapter1",
                step_index=2,
                static_kwargs={
                    "test1_val1": 1,
                    "test1_val2": 1
                })
        ]
        self.__create_complex_workflow(workflow_step_list)
        stored_datatypes = dao.get_datatypes_in_project(self.test_project.id)
        self.assertTrue(
            len(stored_datatypes) == 2,
            "DataType from second step was not stored.")
        self.assertTrue(stored_datatypes[0].type == 'Datatype1',
                        "Wrong type was stored.")
        self.assertTrue(stored_datatypes[1].type == 'Datatype1',
                        "Wrong type was stored.")

        finished, started, error, _, _ = dao.get_operation_numbers(
            self.test_project.id)
        self.assertEqual(
            finished, 3,
            "Didnt start operations for both adapters in workflow.")
        self.assertEqual(started, 0,
                         "Some operations from workflow didnt finish.")
        self.assertEqual(error, 0,
                         "Some operations finished with error status.")

    def test_workflow_dynamic_params(self):
        """
        A simple test just for the fact that dynamic parameters are passed properly
        between two workflow steps: 
                  step1 - tvb.tests.framework.adapters.testadapter1.TestAdapter1
                  step2 - tvb.tests.framework.adapters.testadapter3.TestAdapter3
        The first adapter returns a tvb.datatypes.datatype1.Datatype1 instance. 
        The second adapter has this passed as a dynamic workflow parameter.
        We check that the steps are actually ran by checking that two operations 
        are created and that two dataTypes are stored.
        """
        workflow_step_list = [
            TestFactory.create_workflow_step(
                "tvb.tests.framework.adapters.testadapter1",
                "TestAdapter1",
                step_index=1,
                static_kwargs={
                    "test1_val1": 1,
                    "test1_val2": 1
                }),
            TestFactory.create_workflow_step(
                "tvb.tests.framework.adapters.testadapter3",
                "TestAdapter3",
                step_index=2,
                dynamic_kwargs={
                    "test": {
                        wf_cfg.DATATYPE_INDEX_KEY: 0,
                        wf_cfg.STEP_INDEX_KEY: 1
                    }
                })
        ]

        self.__create_complex_workflow(workflow_step_list)
        stored_datatypes = dao.get_datatypes_in_project(self.test_project.id)
        self.assertTrue(
            len(stored_datatypes) == 3,
            "DataType from all step were not stored.")
        for result_row in stored_datatypes:
            self.assertTrue(result_row.type in ['Datatype1', 'Datatype2'],
                            "Wrong type was stored.")

        finished, started, error, _, _ = dao.get_operation_numbers(
            self.test_project.id)
        self.assertEqual(
            finished, 3,
            "Didn't start operations for both adapters in workflow.")
        self.assertEqual(started, 0,
                         "Some operations from workflow didn't finish.")
        self.assertEqual(error, 0,
                         "Some operations finished with error status.")

    def test_configuration2workflow(self):
        """
        Test that building a WorkflowStep from a WorkflowStepConfiguration. Make sure all the data is
        correctly passed. Also check that any base_wf_step is incremented to dynamic parameters step index.
        """
        workflow_step = TestFactory.create_workflow_step(
            "tvb.tests.framework.adapters.testadapter1",
            "TestAdapter1",
            static_kwargs={"static_param": "test"},
            dynamic_kwargs={
                "dynamic_param": {
                    wf_cfg.STEP_INDEX_KEY: 0,
                    wf_cfg.DATATYPE_INDEX_KEY: 0
                }
            },
            step_index=1,
            base_step=5)
        self.assertEqual(workflow_step.step_index, 1,
                         "Wrong step index in created workflow step.")
        self.assertEqual(workflow_step.static_param, {'static_param': 'test'},
                         'Different static parameters on step.')
        self.assertEqual(
            workflow_step.dynamic_param, {
                'dynamic_param': {
                    wf_cfg.STEP_INDEX_KEY: 5,
                    wf_cfg.DATATYPE_INDEX_KEY: 0
                }
            },
            "Dynamic parameters not saved properly, or base workflow index not added to step index."
        )

    def test_create_workflow(self):
        """
        Test that a workflow with all the associated workflow steps is actually created.
        """
        workflow_step_list = [
            TestFactory.create_workflow_step(
                "tvb.tests.framework.adapters.testadapter2",
                "TestAdapter2",
                step_index=1,
                static_kwargs={"test2": 2}),
            TestFactory.create_workflow_step(
                "tvb.tests.framework.adapters.testadapter1",
                "TestAdapter1",
                step_index=2,
                static_kwargs={
                    "test1_val1": 1,
                    "test1_val2": 1
                })
        ]
        burst_id = self.__create_complex_workflow(workflow_step_list)
        workflow_entities = dao.get_workflows_for_burst(burst_id)
        self.assertTrue(
            len(workflow_entities) == 1,
            "For some reason workflow was not stored in database.")
        workflow_steps = dao.get_workflow_steps(workflow_entities[0].id)
        self.assertEqual(len(workflow_steps),
                         len(workflow_step_list) + 1,
                         "Wrong number of workflow steps created.")
示例#9
0
class TestOperationService(BaseTestCase):
    """
    Test class for the introspection module. Some tests from here do async launches. For those
    cases Transactional tests won't work.
    """

    def setup_method(self):
        """
        Reset the database before each test.
        """
        self.clean_database()
        initialize_storage()
        self.test_user = TestFactory.create_user()
        self.test_project = TestFactory.create_project(self.test_user)
        self.operation_service = OperationService()
        self.backup_hdd_size = TvbProfile.current.MAX_DISK_SPACE

    def teardown_method(self):
        """
        Reset the database when test is done.
        """
        TvbProfile.current.MAX_DISK_SPACE = self.backup_hdd_size
        self.clean_database()

    def _assert_no_ddti(self):
        count = dao.count_datatypes(self.test_project.id, DummyDataTypeIndex)
        assert 0 == count

    def _assert_stored_ddti(self, expected_cnt=1):
        count = dao.count_datatypes(self.test_project.id, DummyDataTypeIndex)
        assert expected_cnt == count
        datatype = dao.try_load_last_entity_of_type(self.test_project.id, DummyDataTypeIndex)
        assert datatype.subject == DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored."
        return datatype

    def test_datatypes_groups(self, test_adapter_factory):
        """
        Tests if the dataType group is set correct on the dataTypes resulted from the same operation group.
        """
        # TODO: re-write this to use groups correctly
        all_operations = dao.get_filtered_operations(self.test_project.id, None)
        assert len(all_operations) == 0, "There should be no operation"

        algo = test_adapter_factory(TestAdapter3)
        adapter_instance = ABCAdapter.build_adapter(algo)
        data = {model_burst.RANGE_PARAMETER_1: 'param_5', 'param_5': [1, 2]}
        ## Create Group of operations
        FlowService().fire_operation(adapter_instance, self.test_user, self.test_project.id)

        all_operations = dao.get_filtered_operations(self.test_project.id, None)
        assert len(all_operations) == 1, "Expected one operation group"
        assert all_operations[0][2] == 2, "Expected 2 operations in group"

        operation_group_id = all_operations[0][3]
        assert operation_group_id != None, "The operation should be part of a group."

        self.operation_service.stop_operation(all_operations[0][0])
        self.operation_service.stop_operation(all_operations[0][1])
        ## Make sure operations are executed
        self.operation_service.launch_operation(all_operations[0][0], False)
        self.operation_service.launch_operation(all_operations[0][1], False)

        resulted_datatypes = dao.get_datatype_in_group(operation_group_id=operation_group_id)
        assert len(resulted_datatypes) >= 2, "Expected at least 2, but: " + str(len(resulted_datatypes))

        dt = dao.get_datatype_by_id(resulted_datatypes[0].id)
        datatype_group = dao.get_datatypegroup_by_op_group_id(operation_group_id)
        assert dt.fk_datatype_group == datatype_group.id, "DataTypeGroup is incorrect"

    def test_initiate_operation(self, test_adapter_factory):
        """
        Test the actual operation flow by executing a test adapter.
        """
        module = "tvb.tests.framework.adapters.testadapter1"
        class_name = "TestAdapter1"
        test_adapter_factory()
        adapter = TestFactory.create_adapter(module, class_name)
        output = adapter.get_output()
        output_type = output[0].__name__
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")

        view_model = adapter.get_view_model()()
        view_model.test1_val1 = 5
        view_model.test1_val2 = 5
        self.operation_service.initiate_operation(self.test_user, self.test_project, adapter,
                                                  tmp_folder, model_view=view_model)

        group = dao.get_algorithm_by_module(module, class_name)
        assert group.module == 'tvb.tests.framework.adapters.testadapter1', "Wrong data stored."
        assert group.classname == 'TestAdapter1', "Wrong data stored."
        dts, count = dao.get_values_of_datatype(self.test_project.id, DummyDataTypeIndex)
        assert count == 1
        assert len(dts) == 1
        datatype = dao.get_datatype_by_id(dts[0][0])
        assert datatype.subject == DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored."
        assert datatype.type == output_type, "Wrong data stored."

    def test_delete_dt_free_hdd_space(self, test_adapter_factory, operation_factory):
        """
        Launch two operations and give enough available space for user so that both should finish.
        """
        test_adapter_factory(adapter_class=TestAdapterHDDRequired)
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        view_model = adapter.get_view_model()()
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(view_model))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")

        self._assert_no_ddti()
        self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, tmp_folder,
                                                  model_view=view_model)
        datatype = self._assert_stored_ddti()

        # Now free some space and relaunch
        ProjectService().remove_datatype(self.test_project.id, datatype.gid)
        self._assert_no_ddti()
        self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, tmp_folder,
                                                  model_view=view_model)
        self._assert_stored_ddti()

    def test_launch_two_ops_hdd_with_space(self, test_adapter_factory):
        """
        Launch two operations and give enough available space for user so that both should finish.
        """
        test_adapter_factory(adapter_class=TestAdapterHDDRequired)
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        view_model = adapter.get_view_model()()
        TvbProfile.current.MAX_DISK_SPACE = 2 * float(adapter.get_required_disk_size(view_model))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")

        self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, tmp_folder,
                                                  model_view=view_model)
        datatype = self._assert_stored_ddti()

        # Now update the maximum disk size to be the size of the previously resulted datatypes (transform from kB to MB)
        # plus what is estimated to be required from the next one (transform from B to MB)
        TvbProfile.current.MAX_DISK_SPACE = float(datatype.disk_size) + float(
            adapter.get_required_disk_size(view_model))

        self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, tmp_folder,
                                                  model_view=view_model)
        self._assert_stored_ddti(2)

    def test_launch_two_ops_hdd_full_space(self):
        """
        Launch two operations and give available space for user so that the first should finish,
        but after the update to the user hdd size the second should not.
        """
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        view_model = adapter.get_view_model()()

        TvbProfile.current.MAX_DISK_SPACE = (1 + float(adapter.get_required_disk_size(view_model)))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, tmp_folder,
                                                  model_view=view_model)

        datatype = self._assert_stored_ddti()
        # Now update the maximum disk size to be less than size of the previously resulted datatypes (transform kB to MB)
        # plus what is estimated to be required from the next one (transform from B to MB)
        TvbProfile.current.MAX_DISK_SPACE = float(datatype.disk_size - 1) + \
                                            float(adapter.get_required_disk_size(view_model) - 1)

        with pytest.raises(NoMemoryAvailableException):
            self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, tmp_folder,
                                                      model_view=view_model)
        self._assert_stored_ddti()

    def test_launch_operation_hdd_with_space(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        view_model = adapter.get_view_model()()

        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(view_model))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, tmp_folder,
                                                  model_view=view_model)
        self._assert_stored_ddti()

    def test_launch_operation_hdd_with_space_started_ops(self, test_adapter_factory):
        """
        Test the actual operation flow by executing a test adapter.
        """
        test_adapter_factory(adapter_class=TestAdapterHDDRequired)

        space_taken_by_started = 100
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        form = TestAdapterHDDRequiredForm()
        adapter.submit_form(form)
        started_operation = model_operation.Operation(self.test_user.id, self.test_project.id,
                                                      adapter.stored_adapter.id, "",
                                                      status=model_operation.STATUS_STARTED,
                                                      estimated_disk_size=space_taken_by_started)
        view_model = adapter.get_view_model()()

        dao.store_entity(started_operation)
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(view_model) + space_taken_by_started)
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, tmp_folder,
                                                  model_view=view_model)
        self._assert_stored_ddti()

    def test_launch_operation_hdd_full_space(self, test_adapter_factory):
        """
        Test the actual operation flow by executing a test adapter.
        """
        test_adapter_factory(adapter_class=TestAdapterHDDRequired)

        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        form = TestAdapterHDDRequiredForm()
        adapter.submit_form(form)
        view_model = adapter.get_view_model()()

        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(view_model) - 1)
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        with pytest.raises(NoMemoryAvailableException):
            self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, tmp_folder,
                                                      model_view=view_model)
        self._assert_no_ddti()

    def test_launch_operation_hdd_full_space_started_ops(self, test_adapter_factory):
        """
        Test the actual operation flow by executing a test adapter.
        """
        test_adapter_factory(adapter_class=TestAdapterHDDRequired)

        space_taken_by_started = 100
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        form = TestAdapterHDDRequiredForm()
        adapter.submit_form(form)
        started_operation = model_operation.Operation(self.test_user.id, self.test_project.id,
                                                      adapter.stored_adapter.id, "",
                                                      status=model_operation.STATUS_STARTED,
                                                      estimated_disk_size=space_taken_by_started)
        view_model = adapter.get_view_model()()

        dao.store_entity(started_operation)
        TvbProfile.current.MAX_DISK_SPACE = float(
            adapter.get_required_disk_size(view_model) + space_taken_by_started - 1)
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        with pytest.raises(NoMemoryAvailableException):
            self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, tmp_folder,
                                                      model_view=view_model)
        self._assert_no_ddti()

    def test_stop_operation(self, test_adapter_factory):
        """
        Test that an operation is successfully stopped.
        """
        test_adapter_factory(adapter_class=TestAdapter2)
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter2", "TestAdapter2")
        view_model = adapter.get_view_model()()
        view_model.test = 5
        algo = adapter.stored_adapter
        algo_category = dao.get_category_by_id(algo.fk_category)
        operations, _ = self.operation_service.prepare_operations(self.test_user.id, self.test_project, algo,
                                                                  algo_category, {},
                                                                  view_model=view_model)
        self.operation_service._send_to_cluster(operations, adapter)
        self.operation_service.stop_operation(operations[0].id)
        operation = dao.get_operation_by_id(operations[0].id)
        assert operation.status, model_operation.STATUS_CANCELED == "Operation should have been canceled!"

    def test_stop_operation_finished(self, test_adapter_factory):
        """
        Test that an operation that is already finished is not changed by the stop operation.
        """
        test_adapter_factory()
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter1", "TestAdapter1")
        view_model = adapter.get_view_model()()
        view_model.test1_val1 = 5
        view_model.test1_val2 = 5
        algo = adapter.stored_adapter
        algo_category = dao.get_category_by_id(algo.fk_category)
        operations, _ = self.operation_service.prepare_operations(self.test_user.id, self.test_project, algo,
                                                                  algo_category, {}, view_model=view_model)
        self.operation_service._send_to_cluster(operations, adapter)
        operation = dao.get_operation_by_id(operations[0].id)
        operation.status = model_operation.STATUS_FINISHED
        dao.store_entity(operation)
        self.operation_service.stop_operation(operations[0].id)
        operation = dao.get_operation_by_id(operations[0].id)
        assert operation.status, model_operation.STATUS_FINISHED == "Operation shouldn't have been canceled!"
class FlowContollerTest(BaseControllersTest):
    """ Unit tests for FlowController """
    
    def setUp(self):
        """
        Sets up the environment for testing;
        creates a `FlowController`
        """
        self.init()
        self.flow_c = FlowController()
        self.burst_c = BurstController()
        self.operation_service = OperationService()
    
    
    def tearDown(self):
        """ Cleans up the testing environment """
        self.cleanup()
        self.clean_database()
            
            
    def test_context_selected(self):
        """
        Remove the project from CherryPy session and check that you are redirected to projects page.
        """
        del cherrypy.session[common.KEY_PROJECT]
        self._expect_redirect('/project/viewall', self.flow_c.step)
    

    def test_invalid_step(self):
        """
        Pass an invalid step and make sure we are redirected to tvb start page.
        """
        self._expect_redirect('/tvb', self.flow_c.step)
        
        
    def test_valid_step(self):
        """
        For all algorithm categories check that a submenu is generated and the result
        page has it's title given by category name.
        """
        categories = dao.get_algorithm_categories()
        for categ in categories:
            result_dict = self.flow_c.step(categ.id)
            self.assertTrue(common.KEY_SUBMENU_LIST in result_dict,
                            "Expect to have a submenu with available algorithms for category.")
            self.assertEqual(result_dict["section_name"], categ.displayname.lower())


    def test_step_connectivity(self):
        """
        Check that the correct section name and connectivity sub-menu are returned for the connectivity step.
        """
        result_dict = self.flow_c.step_connectivity()
        self.assertEqual(result_dict['section_name'], 'connectivity')
        self.assertEqual(result_dict['submenu_list'], self.flow_c.connectivity_submenu)


    def test_default(self):
        """
        Test default method from step controllers. Check that the submit link is ok, that a mainContent
        is present in result dict and that the isAdapter flag is set to true.
        """
        cherrypy.request.method = "GET"
        categories = dao.get_algorithm_categories()
        for categ in categories:
            algo_groups = dao.get_groups_by_categories([categ.id])
            for algo in algo_groups:
                result_dict = self.flow_c.default(categ.id, algo.id)
                self.assertEqual(result_dict[common.KEY_SUBMIT_LINK], '/flow/%i/%i' % (categ.id, algo.id))
                self.assertTrue('mainContent' in result_dict)
                self.assertTrue(result_dict['isAdapter'])
                
                
    def test_default_cancel(self):
        """
        On cancel we should get a redirect to the back page link.
        """
        cherrypy.request.method = "POST"
        categories = dao.get_algorithm_categories()
        algo_groups = dao.get_groups_by_categories([categories[0].id])
        self._expect_redirect('/project/viewoperations/%i' % self.test_project.id, self.flow_c.default,
                              categories[0].id, algo_groups[0].id, cancel=True, back_page='operations')
        
        
    def test_default_invalid_key(self):
        """
        Pass invalid keys for adapter and step and check you get redirect to tvb entry
        page with error set.
        """
        self._expect_redirect('/tvb?error=True', self.flow_c.default, 'invalid', 'invalid')
        
        
    def test_read_datatype_attribute(self):
        """
        Read an attribute from a datatype.
        """
        dt = DatatypesFactory().create_datatype_with_storage("test_subject", "RAW_STATE",
                                                             'this is the stored data'.split())
        returned_data = self.flow_c.read_datatype_attribute(dt.gid, "string_data")
        self.assertEqual(returned_data, '["this", "is", "the", "stored", "data"]')
        
        
    def test_read_datatype_attribute_method_call(self):
        """
        Call method on given datatype.
        """
        dt = DatatypesFactory().create_datatype_with_storage("test_subject", "RAW_STATE",
                                                             'this is the stored data'.split())
        args = {'length': 101}
        returned_data = self.flow_c.read_datatype_attribute(dt.gid, 'return_test_data', **args)
        self.assertTrue(returned_data == str(range(101)))
        
        
    def test_get_simple_adapter_interface(self):
        adapter = dao.find_group('tvb.tests.framework.adapters.testadapter1', 'TestAdapter1')
        result = self.flow_c.get_simple_adapter_interface(adapter.id)
        expected_interface = TestAdapter1().get_input_tree()
        self.assertEqual(result['inputList'], expected_interface)
        
    
    def _long_burst_launch(self, is_range=False):
        self.burst_c.index()
        connectivity = DatatypesFactory().create_connectivity()[1]
        launch_params = copy.deepcopy(SIMULATOR_PARAMETERS)
        launch_params['connectivity'] = dao.get_datatype_by_id(connectivity.id).gid
        if not is_range:
            launch_params['simulation_length'] = '10000'
        else:
            launch_params['simulation_length'] = '[10000,10001,10002]'
            launch_params[model.RANGE_PARAMETER_1] = 'simulation_length'
        launch_params = {"simulator_parameters": json.dumps(launch_params)}
        burst_id = json.loads(self.burst_c.launch_burst("new", "test_burst", **launch_params))['id']
        return dao.get_burst_by_id(burst_id)


    def _wait_for_burst_ops(self, burst_config):
        """ sleeps until some operation of the burst is created"""
        waited = 1
        timeout = 50
        operations = dao.get_operations_in_burst(burst_config.id)
        while not len(operations) and waited <= timeout:
            sleep(1)
            waited += 1
            operations = dao.get_operations_in_burst(burst_config.id)
        operations = dao.get_operations_in_burst(burst_config.id)
        return operations


    def test_stop_burst_operation(self):
        burst_config = self._long_burst_launch()
        operation = self._wait_for_burst_ops(burst_config)[0]
        self.assertFalse(operation.has_finished)
        self.flow_c.stop_burst_operation(operation.id, 0, False)
        operation = dao.get_operation_by_id(operation.id)
        self.assertEqual(operation.status, model.STATUS_CANCELED)
        
        
    def test_stop_burst_operation_group(self):
        burst_config = self._long_burst_launch(True)
        operations = self._wait_for_burst_ops(burst_config)
        operations_group_id = 0
        for operation in operations:
            self.assertFalse(operation.has_finished)
            operations_group_id = operation.fk_operation_group
        self.flow_c.stop_burst_operation(operations_group_id, 1, False)
        for operation in operations:
            operation = dao.get_operation_by_id(operation.id)
            self.assertEqual(operation.status, model.STATUS_CANCELED)
        
        
    def test_remove_burst_operation(self):
        burst_config = self._long_burst_launch()
        operation = self._wait_for_burst_ops(burst_config)[0]
        self.assertFalse(operation.has_finished)
        self.flow_c.stop_burst_operation(operation.id, 0, True)
        operation = dao.try_get_operation_by_id(operation.id)
        self.assertTrue(operation is None)
        
        
    def test_remove_burst_operation_group(self):
        burst_config = self._long_burst_launch(True)
        operations = self._wait_for_burst_ops(burst_config)
        operations_group_id = 0
        for operation in operations:
            self.assertFalse(operation.has_finished)
            operations_group_id = operation.fk_operation_group
        self.flow_c.stop_burst_operation(operations_group_id, 1, True)
        for operation in operations:
            operation = dao.try_get_operation_by_id(operation.id)
            self.assertTrue(operation is None)


    def _launch_test_algo_on_cluster(self, **data):
        module = "tvb.tests.framework.adapters.testadapter1"
        class_name = "TestAdapter1"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        algo_group = adapter.algorithm_group
        algo_category = dao.get_category_by_id(algo_group.fk_category)
        algo = dao.get_algorithm_by_group(algo_group.id)
        operations, _ = self.operation_service.prepare_operations(self.test_user.id, self.test_project.id, algo,
                                                                  algo_category, {}, ABCAdapter.LAUNCH_METHOD, **data)
        self.operation_service._send_to_cluster(operations, adapter)
        return operations


    def test_stop_operations(self):
        data = {"test1_val1": 5, 'test1_val2': 5}
        operations = self._launch_test_algo_on_cluster(**data)
        operation = dao.get_operation_by_id(operations[0].id)
        self.assertFalse(operation.has_finished)
        self.flow_c.stop_operation(operation.id, 0, False)
        operation = dao.get_operation_by_id(operation.id)
        self.assertEqual(operation.status, model.STATUS_CANCELED)
        
        
    def test_stop_operations_group(self):
        data = {model.RANGE_PARAMETER_1: "test1_val1", "test1_val1": '5,6,7', 'test1_val2': 5}
        operations = self._launch_test_algo_on_cluster(**data)
        operation_group_id = 0
        for operation in operations:
            operation = dao.get_operation_by_id(operation.id)
            self.assertFalse(operation.has_finished)
            operation_group_id = operation.fk_operation_group
        self.flow_c.stop_operation(operation_group_id, 1, False)
        for operation in operations:
            operation = dao.get_operation_by_id(operation.id)
            self.assertEqual(operation.status, model.STATUS_CANCELED)
class TestWorkflow(TransactionalTestCase):
    """
    Test that workflow conversion methods are valid.
    """


    def transactional_setup_method(self):
        """
        Sets up the testing environment;
        saves config file;
        creates a test user, a test project;
        creates burst, operation, flow and workflow services
        """
        self.test_user = TestFactory.create_user()
        self.test_project = TestFactory.create_project(self.test_user)
        self.workflow_service = WorkflowService()
        self.burst_service = BurstService()
        self.operation_service = OperationService()
        self.flow_service = FlowService()


    def transactional_teardown_method(self):
        """
        Remove project folders and clean up database.
        """
        FilesHelper().remove_project_structure(self.test_project.name)
        self.delete_project_folders()


    def __create_complex_workflow(self, workflow_step_list):
        """
        Creates a burst with a complex workflow with a given list of workflow steps.
        :param workflow_step_list: a list of workflow steps that will be used in the
            creation of a new workflow for a new burst
        """
        burst_config = TestFactory.store_burst(self.test_project.id)

        stored_dt = datatypes_factory.DatatypesFactory()._store_datatype(Datatype1())

        first_step_algorithm = self.flow_service.get_algorithm_by_module_and_class("tvb.tests.framework.adapters.testadapter1",
                                                                                   "TestAdapterDatatypeInput")
        metadata = {DataTypeMetaData.KEY_BURST: burst_config.id}
        kwargs = {"test_dt_input": stored_dt.gid, 'test_non_dt_input': '0'}
        operations, group = self.operation_service.prepare_operations(self.test_user.id, self.test_project.id,
                                                                      first_step_algorithm,
                                                                      first_step_algorithm.algorithm_category,
                                                                      metadata, **kwargs)

        workflows = self.workflow_service.create_and_store_workflow(project_id=self.test_project.id,
                                                                    burst_id=burst_config.id,
                                                                    simulator_index=0,
                                                                    simulator_id=first_step_algorithm.id,
                                                                    operations=operations)
        self.operation_service.prepare_operations_for_workflowsteps(workflow_step_list, workflows, self.test_user.id,
                                                                    burst_config.id, self.test_project.id, group,
                                                                    operations)
        #fire the first op
        if len(operations) > 0:
            self.operation_service.launch_operation(operations[0].id, False)
        return burst_config.id


    def test_workflow_generation(self):
        """
        A simple test just for the fact that a workflow is created an ran, 
        no dynamic parameters are passed. In this case we create a two steps
        workflow: step1 - tvb.tests.framework.adapters.testadapter2.TestAdapter2
                  step2 - tvb.tests.framework.adapters.testadapter1.TestAdapter1
        The first adapter doesn't return anything and the second returns one
        tvb.datatypes.datatype1.Datatype1 instance. We check that the steps
        are actually ran by checking that two operations are created and that
        one dataType is stored.
        """
        workflow_step_list = [TestFactory.create_workflow_step("tvb.tests.framework.adapters.testadapter2",
                                                               "TestAdapter2", step_index=1,
                                                               static_kwargs={"test2": 2}),
                              TestFactory.create_workflow_step("tvb.tests.framework.adapters.testadapter1",
                                                               "TestAdapter1", step_index=2,
                                                               static_kwargs={"test1_val1": 1, "test1_val2": 1})]
        self.__create_complex_workflow(workflow_step_list)
        stored_datatypes = dao.get_datatypes_in_project(self.test_project.id)
        assert  len(stored_datatypes) == 2, "DataType from second step was not stored."
        assert  stored_datatypes[0].type == 'Datatype1', "Wrong type was stored."
        assert  stored_datatypes[1].type == 'Datatype1', "Wrong type was stored."

        finished, started, error, _, _ = dao.get_operation_numbers(self.test_project.id)
        assert  finished == 3, "Didnt start operations for both adapters in workflow."
        assert  started == 0, "Some operations from workflow didnt finish."
        assert  error == 0, "Some operations finished with error status."


    def test_workflow_dynamic_params(self):
        """
        A simple test just for the fact that dynamic parameters are passed properly
        between two workflow steps: 
                  step1 - tvb.tests.framework.adapters.testadapter1.TestAdapter1
                  step2 - tvb.tests.framework.adapters.testadapter3.TestAdapter3
        The first adapter returns a tvb.datatypes.datatype1.Datatype1 instance. 
        The second adapter has this passed as a dynamic workflow parameter.
        We check that the steps are actually ran by checking that two operations 
        are created and that two dataTypes are stored.
        """
        workflow_step_list = [TestFactory.create_workflow_step("tvb.tests.framework.adapters.testadapter1",
                                                               "TestAdapter1", step_index=1,
                                                               static_kwargs={"test1_val1": 1, "test1_val2": 1}),
                              TestFactory.create_workflow_step("tvb.tests.framework.adapters.testadapter3",
                                                               "TestAdapter3", step_index=2,
                                                               dynamic_kwargs={
                                                                   "test": {wf_cfg.DATATYPE_INDEX_KEY: 0,
                                                                            wf_cfg.STEP_INDEX_KEY: 1}})]

        self.__create_complex_workflow(workflow_step_list)
        stored_datatypes = dao.get_datatypes_in_project(self.test_project.id)
        assert  len(stored_datatypes) == 3, "DataType from all step were not stored."
        for result_row in stored_datatypes:
            assert  result_row.type in ['Datatype1', 'Datatype2'], "Wrong type was stored."

        finished, started, error, _, _ = dao.get_operation_numbers(self.test_project.id)
        assert  finished == 3, "Didn't start operations for both adapters in workflow."
        assert  started == 0, "Some operations from workflow didn't finish."
        assert  error == 0, "Some operations finished with error status."


    def test_configuration2workflow(self):
        """
        Test that building a WorkflowStep from a WorkflowStepConfiguration. Make sure all the data is
        correctly passed. Also check that any base_wf_step is incremented to dynamic parameters step index.
        """
        workflow_step = TestFactory.create_workflow_step("tvb.tests.framework.adapters.testadapter1", "TestAdapter1",
                                                         static_kwargs={"static_param": "test"},
                                                         dynamic_kwargs={"dynamic_param": {wf_cfg.STEP_INDEX_KEY: 0,
                                                                                           wf_cfg.DATATYPE_INDEX_KEY: 0}},
                                                         step_index=1, base_step=5)
        assert  workflow_step.step_index == 1, "Wrong step index in created workflow step."
        assert  workflow_step.static_param == {'static_param': 'test'}, 'Different static parameters on step.'
        assert  workflow_step.dynamic_param == {'dynamic_param': {wf_cfg.STEP_INDEX_KEY: 5,
                                                                         wf_cfg.DATATYPE_INDEX_KEY: 0}},\
                         "Dynamic parameters not saved properly, or base workflow index not added to step index."


    def test_create_workflow(self):
        """
        Test that a workflow with all the associated workflow steps is actually created.
        """
        workflow_step_list = [TestFactory.create_workflow_step("tvb.tests.framework.adapters.testadapter2",
                                                               "TestAdapter2", step_index=1,
                                                               static_kwargs={"test2": 2}),
                              TestFactory.create_workflow_step("tvb.tests.framework.adapters.testadapter1",
                                                               "TestAdapter1", step_index=2,
                                                               static_kwargs={"test1_val1": 1, "test1_val2": 1})]
        burst_id = self.__create_complex_workflow(workflow_step_list)
        workflow_entities = dao.get_workflows_for_burst(burst_id)
        assert  len(workflow_entities) == 1, "For some reason workflow was not stored in database."
        workflow_steps = dao.get_workflow_steps(workflow_entities[0].id)
        assert  len(workflow_steps) == len(workflow_step_list) + 1, "Wrong number of workflow steps created."
示例#12
0
class TestFlowController(BaseControllersTest):
    """ Unit tests for FlowController """
    def setup_method(self):
        """
        Sets up the environment for testing;
        creates a `FlowController`
        """
        self.init()
        self.flow_c = FlowController()
        self.burst_c = SimulatorController()
        self.operation_service = OperationService()

    def teardown_method(self):
        """ Cleans up the testing environment """
        self.cleanup()
        self.clean_database()

    def test_context_selected(self):
        """
        Remove the project from CherryPy session and check that you are redirected to projects page.
        """
        del cherrypy.session[common.KEY_PROJECT]
        self._expect_redirect('/project/viewall', self.flow_c.step_analyzers)

    def test_valid_step(self):
        """
        For all algorithm categories check that a submenu is generated and the result
        page has it's title given by category name.
        """
        result_dict = self.flow_c.step_analyzers()
        assert common.KEY_SUBMENU_LIST in result_dict, \
            "Expect to have a submenu with available algorithms for category."
        assert result_dict["section_name"] == 'analyze'

    def test_step_connectivity(self):
        """
        Check that the correct section name and connectivity sub-menu are returned for the connectivity step.
        """
        result_dict = self.flow_c.step_connectivity()
        assert result_dict['section_name'] == 'connectivity'
        assert result_dict['submenu_list'] == self.flow_c.connectivity_submenu

    def test_default(self):
        """
        Test default method from step controllers. Check that the submit link is ok, that a mainContent
        is present in result dict and that the isAdapter flag is set to true.
        """
        cherrypy.request.method = "GET"
        categories = dao.get_algorithm_categories()
        for categ in categories:
            # Ignore creators, as those won't go through this flow
            if categ.displayname in [
                    CreateAlgorithmCategoryConfig.category_name
            ]:
                continue
            algo_groups = dao.get_adapters_from_categories([categ.id])
            for algo in algo_groups:
                result_dict = self.flow_c.default(categ.id, algo.id)
                assert result_dict[common.KEY_SUBMIT_LINK] == '/flow/%i/%i' % (
                    categ.id, algo.id)
                assert 'mainContent' in result_dict
                assert result_dict['isAdapter']

    def test_default_cancel(self):
        """
        On cancel we should get a redirect to the back page link.
        """
        cherrypy.request.method = "POST"
        categories = dao.get_algorithm_categories()
        algo_groups = dao.get_adapters_from_categories([categories[0].id])
        self._expect_redirect('/project/viewoperations/%i' %
                              self.test_project.id,
                              self.flow_c.default,
                              categories[0].id,
                              algo_groups[0].id,
                              cancel=True,
                              back_page='operations')

    def test_default_invalid_key(self):
        """
        Pass invalid keys for adapter and step and check you get redirect to tvb entry
        page with error set.
        """
        self._expect_redirect('/tvb?error=True', self.flow_c.default,
                              'invalid', 'invalid')

    def test_read_datatype_attribute(self, dummy_datatype_index_factory):
        """
        Read an attribute from a datatype.
        """
        dt = dummy_datatype_index_factory(row1='This is stored data')
        dt.subject = "test_subject"
        dt.state = "RAW_STATE"

        returned_data = self.flow_c.read_datatype_attribute(dt.gid, "row1")
        assert returned_data == '"This is stored data"'

    def test_read_datatype_attribute_method_call(self,
                                                 dummy_datatype_index_factory):
        """
        Call method on given datatype.
        """
        dt = dummy_datatype_index_factory(row1='This is stored data')
        args = {'length': 101}
        returned_data = self.flow_c.read_datatype_attribute(
            dt.gid, 'return_test_data', **args)
        assert returned_data.replace('"', '') == " ".join(
            str(x) for x in range(101))

    def test_get_simple_adapter_interface(self, test_adapter_factory):
        algo = test_adapter_factory()
        form = TestAdapter1Form()
        adapter = TestFactory.create_adapter(
            'tvb.tests.framework.adapters.testadapter1', 'TestAdapter1')
        adapter.submit_form(form)
        result = self.flow_c.get_simple_adapter_interface(algo.id)
        expected_interface = adapter.get_form()
        found_form = result['adapter_form']['adapter_form']
        assert isinstance(result['adapter_form'], dict)
        assert isinstance(found_form, TestAdapter1Form)
        assert found_form.test1_val1.value == expected_interface.test1_val1.value
        assert found_form.test1_val2.value == expected_interface.test1_val2.value

    def test_stop_burst_operation(self, simulation_launch):
        operation = simulation_launch(self.test_user, self.test_project, 1000)
        assert not operation.has_finished
        self.flow_c.cancel_or_remove_operation(operation.id, 0, False)
        operation = dao.get_operation_by_id(operation.id)
        assert operation.status == STATUS_CANCELED

    def test_stop_burst_operation_group(self, simulation_launch):
        first_op = simulation_launch(self.test_user, self.test_project, 1000,
                                     True)
        operations_group_id = first_op.fk_operation_group
        assert not first_op.has_finished
        self.flow_c.cancel_or_remove_operation(operations_group_id, 1, False)
        operations = dao.get_operations_in_group(operations_group_id)
        for operation in operations:
            operation = dao.get_operation_by_id(operation.id)
            assert operation.status == STATUS_CANCELED

    def test_remove_burst_operation(self, simulation_launch):
        operation = simulation_launch(self.test_user, self.test_project, 1000)
        assert not operation.has_finished
        self.flow_c.cancel_or_remove_operation(operation.id, 0, True)
        operation = dao.try_get_operation_by_id(operation.id)
        assert operation is None

    def test_remove_burst_operation_group(self, simulation_launch):
        first_op = simulation_launch(self.test_user, self.test_project, 1000,
                                     True)
        operations_group_id = first_op.fk_operation_group
        assert not first_op.has_finished
        self.flow_c.cancel_or_remove_operation(operations_group_id, 1, True)
        operations = dao.get_operations_in_group(operations_group_id)
        for operation in operations:
            operation = dao.try_get_operation_by_id(operation.id)
            assert operation is None

    def _asynch_launch_simple_op(self, **data):
        adapter = TestFactory.create_adapter(
            'tvb.tests.framework.adapters.testadapter1', 'TestAdapter1')
        view_model = TestModel()
        view_model.test1_val1 = 5
        view_model.test1_val2 = 6
        algo = adapter.stored_adapter
        algo_category = dao.get_category_by_id(algo.fk_category)
        operations, _ = self.operation_service.prepare_operations(
            self.test_user.id,
            self.test_project,
            algo,
            algo_category,
            view_model=view_model,
            **data)
        self.operation_service._send_to_cluster(operations, adapter)
        return operations

    def test_stop_operations(self):
        operations = self._asynch_launch_simple_op()
        operation = dao.get_operation_by_id(operations[0].id)
        assert not operation.has_finished
        self.flow_c.cancel_or_remove_operation(operation.id, 0, False)
        operation = dao.get_operation_by_id(operation.id)
        assert operation.status == STATUS_CANCELED

    def test_stop_operations_group(self):
        range_param = {
            RANGE_PARAMETER_1: "test1_val1",
            'test1_val1': {
                "lo": 0,
                "step": 2.0,
                "hi": 5.0
            }
        }
        operations = self._asynch_launch_simple_op(**range_param)
        assert 3 == len(operations)
        operation_group_id = 0
        for operation in operations:
            operation = dao.get_operation_by_id(operation.id)
            assert not operation.has_finished
            operation_group_id = operation.fk_operation_group
        self.flow_c.cancel_or_remove_operation(operation_group_id, 1, False)
        for operation in operations:
            operation = dao.get_operation_by_id(operation.id)
            assert operation.status == STATUS_CANCELED
class TestOperationService(BaseTestCase):
    """
    Test class for the introspection module. Some tests from here do async launches. For those
    cases Transactional tests won't work.
    TODO: this is still to be refactored, for being huge, with duplicates and many irrelevant checks
    """


    def setup_method(self):
        """
        Reset the database before each test.
        """
        self.clean_database()
        initialize_storage()
        self.test_user = TestFactory.create_user()
        self.test_project = TestFactory.create_project(self.test_user)
        self.operation_service = OperationService()
        self.backup_hdd_size = TvbProfile.current.MAX_DISK_SPACE


    def teardown_method(self):
        """
        Reset the database when test is done.
        """
        TvbProfile.current.MAX_DISK_SPACE = self.backup_hdd_size
        self.clean_database()


    def _assert_no_dt2(self):
        count = dao.count_datatypes(self.test_project.id, Datatype2)
        assert 0 == count


    def _assert_stored_dt2(self, expected_cnt=1):
        count = dao.count_datatypes(self.test_project.id, Datatype2)
        assert expected_cnt == count
        datatype = dao.try_load_last_entity_of_type(self.test_project.id, Datatype2)
        assert datatype.subject == DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored."
        return datatype


    def test_datatypes_groups(self):
        """
        Tests if the dataType group is set correct on the dataTypes resulted from the same operation group.
        """
        flow_service = FlowService()

        all_operations = dao.get_filtered_operations(self.test_project.id, None)
        assert len(all_operations) == 0, "There should be no operation"

        adapter_instance = TestFactory.create_adapter('tvb.tests.framework.adapters.testadapter3', 'TestAdapter3')
        data = {model.RANGE_PARAMETER_1: 'param_5', 'param_5': [1, 2]}
        ## Create Group of operations
        flow_service.fire_operation(adapter_instance, self.test_user, self.test_project.id, **data)

        all_operations = dao.get_filtered_operations(self.test_project.id, None)
        assert len(all_operations) == 1, "Expected one operation group"
        assert all_operations[0][2] == 2, "Expected 2 operations in group"

        operation_group_id = all_operations[0][3]
        assert operation_group_id != None, "The operation should be part of a group."

        self.operation_service.stop_operation(all_operations[0][0])
        self.operation_service.stop_operation(all_operations[0][1])
        ## Make sure operations are executed
        self.operation_service.launch_operation(all_operations[0][0], False)
        self.operation_service.launch_operation(all_operations[0][1], False)

        resulted_datatypes = dao.get_datatype_in_group(operation_group_id=operation_group_id)
        assert len(resulted_datatypes) >= 2, "Expected at least 2, but: " + str(len(resulted_datatypes))

        dt = dao.get_datatype_by_id(resulted_datatypes[0].id)
        datatype_group = dao.get_datatypegroup_by_op_group_id(operation_group_id)
        assert dt.fk_datatype_group == datatype_group.id, "DataTypeGroup is incorrect"


    def test_initiate_operation(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        module = "tvb.tests.framework.adapters.testadapter1"
        class_name = "TestAdapter1"
        adapter = TestFactory.create_adapter(module, class_name)
        output = adapter.get_output()
        output_type = output[0].__name__
        data = {"test1_val1": 5, "test1_val2": 5}
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        res = self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter,
                                                        tmp_folder, **data)
        assert res.index("has finished.") > 10, "Operation didn't finish"
        group = dao.get_algorithm_by_module(module, class_name)
        assert group.module == 'tvb.tests.framework.adapters.testadapter1', "Wrong data stored."
        assert group.classname == 'TestAdapter1', "Wrong data stored."
        dts, count = dao.get_values_of_datatype(self.test_project.id, Datatype1)
        assert count == 1
        assert len(dts) == 1
        datatype = dao.get_datatype_by_id(dts[0][0])
        assert datatype.subject == DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored."
        assert datatype.type == output_type, "Wrong data stored."


    def test_delete_dt_free_HDD_space(self):
        """
        Launch two operations and give enough available space for user so that both should finish.
        """
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")

        self._assert_no_dt2()
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)
        datatype = self._assert_stored_dt2()

        # Now free some space and relaunch
        ProjectService().remove_datatype(self.test_project.id, datatype.gid)
        self._assert_no_dt2()
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)
        self._assert_stored_dt2()


    def test_launch_two_ops_HDD_with_space(self):
        """
        Launch two operations and give enough available space for user so that both should finish.
        """
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = 2 * float(adapter.get_required_disk_size(**data))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")

        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)
        datatype = self._assert_stored_dt2()

        #Now update the maximum disk size to be the size of the previously resulted datatypes (transform from kB to MB)
        #plus what is estimated to be required from the next one (transform from B to MB)
        TvbProfile.current.MAX_DISK_SPACE = float(datatype.disk_size) + float(adapter.get_required_disk_size(**data))

        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)
        self._assert_stored_dt2(2)


    def test_launch_two_ops_HDD_full_space(self):
        """
        Launch two operations and give available space for user so that the first should finish,
        but after the update to the user hdd size the second should not.
        """
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        data = {"test": 100}

        TvbProfile.current.MAX_DISK_SPACE = (1 + float(adapter.get_required_disk_size(**data)))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)

        datatype = self._assert_stored_dt2()
        #Now update the maximum disk size to be less than size of the previously resulted datatypes (transform kB to MB)
        #plus what is estimated to be required from the next one (transform from B to MB)
        TvbProfile.current.MAX_DISK_SPACE = float(datatype.disk_size - 1) + \
                                            float(adapter.get_required_disk_size(**data) - 1)

        with pytest.raises(NoMemoryAvailableException):
            self.operation_service.initiate_operation(self.test_user,self.test_project.id, adapter, tmp_folder, **data)
        self._assert_stored_dt2()


    def test_launch_operation_HDD_with_space(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        data = {"test": 100}

        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)
        self._assert_stored_dt2()


    def test_launch_operation_HDD_with_space_started_ops(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        space_taken_by_started = 100
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        started_operation = model.Operation(self.test_user.id, self.test_project.id, adapter.stored_adapter.id, "",
                                            status=model.STATUS_STARTED, estimated_disk_size=space_taken_by_started)
        dao.store_entity(started_operation)
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data) + space_taken_by_started)
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)
        self._assert_stored_dt2()


    def test_launch_operation_HDD_full_space(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data) - 1)
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        with pytest.raises(NoMemoryAvailableException):
            self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)
        self._assert_no_dt2()


    def test_launch_operation_HDD_full_space_started_ops(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        space_taken_by_started = 100
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired")
        started_operation = model.Operation(self.test_user.id, self.test_project.id, adapter.stored_adapter.id, "",
                                            status=model.STATUS_STARTED, estimated_disk_size=space_taken_by_started)
        dao.store_entity(started_operation)
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data) + space_taken_by_started - 1)
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        with pytest.raises(NoMemoryAvailableException):
            self.operation_service.initiate_operation(self.test_user,self.test_project.id, adapter, tmp_folder, **data)
        self._assert_no_dt2()


    def test_stop_operation(self):
        """
        Test that an operation is successfully stopped.
        """
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter2", "TestAdapter2")
        data = {"test": 5}
        algo = adapter.stored_adapter
        algo_category = dao.get_category_by_id(algo.fk_category)
        operations, _ = self.operation_service.prepare_operations(self.test_user.id, self.test_project.id, algo,
                                                                  algo_category, {}, **data)
        self.operation_service._send_to_cluster(operations, adapter)
        self.operation_service.stop_operation(operations[0].id)
        operation = dao.get_operation_by_id(operations[0].id)
        assert operation.status, model.STATUS_CANCELED == "Operation should have been canceled!"


    def test_stop_operation_finished(self):
        """
        Test that an operation that is already finished is not changed by the stop operation.
        """
        adapter = TestFactory.create_adapter("tvb.tests.framework.adapters.testadapter1", "TestAdapter1")
        data = {"test1_val1": 5, 'test1_val2': 5}
        algo = adapter.stored_adapter
        algo_category = dao.get_category_by_id(algo.fk_category)
        operations, _ = self.operation_service.prepare_operations(self.test_user.id, self.test_project.id, algo,
                                                                  algo_category, {}, **data)
        self.operation_service._send_to_cluster(operations, adapter)
        operation = dao.get_operation_by_id(operations[0].id)
        operation.status = model.STATUS_FINISHED
        dao.store_entity(operation)
        self.operation_service.stop_operation(operations[0].id)
        operation = dao.get_operation_by_id(operations[0].id)
        assert operation.status, model.STATUS_FINISHED == "Operation shouldn't have been canceled!"


    def test_array_from_string(self):
        """
        Simple test for parse array on 1d, 2d and 3d array.
        """
        row = {'description': 'test.',
               'default': 'None',
               'required': True,
               'label': 'test: ',
               'attributes': None,
               'elementType': 'float',
               'type': 'array',
               'options': None,
               'name': 'test'}
        input_data_string = '[ [1 2 3] [4 5 6]]'
        output = string2array(input_data_string, ' ', row['elementType'])
        assert output.shape, (2, 3) == "Dimensions not properly parsed"
        for i in output[0]:
            assert i in [1, 2, 3]
        for i in output[1]:
            assert i in [4, 5, 6]
        input_data_string = '[1, 2, 3, 4, 5, 6]'
        output = string2array(input_data_string, ',', row['elementType'])
        assert output.shape == (6,), "Dimensions not properly parsed"
        for i in output:
            assert i in [1, 2, 3, 4, 5, 6]
        input_data_string = '[ [ [1,1], [2, 2] ], [ [3 ,3], [4,4] ] ]'
        output = string2array(input_data_string, ',', row['elementType'])
        assert output.shape == (2, 2, 2), "Wrong dimensions."
        for i in output[0][0]:
            assert i == 1
        for i in output[0][1]:
            assert i == 2
        for i in output[1][0]:
            assert i == 3
        for i in output[1][1]:
            assert i == 4
        row = {'description': 'test.',
               'default': 'None',
               'required': True,
               'label': 'test: ',
               'attributes': None,
               'elementType': 'str',
               'type': 'array',
               'options': None,
               'name': 'test'}
        input_data_string = '[1, 2, 3, 4, 5, 6]'
        output = string2array(input_data_string, ',', row['elementType'])
        for i in output:
            assert i in [1, 2, 3, 4, 5, 6]


    def test_wrong_array_from_string(self):
        """Test that parsing an array from string is throwing the expected 
        exception when wrong input string"""
        row = {'description': 'test.',
               'default': 'None',
               'required': True,
               'label': 'test: ',
               'attributes': None,
               'elementType': 'float',
               'type': 'array',
               'options': None,
               'name': 'test'}
        input_data_string = '[ [1,2 3] [4,5,6]]'
        with pytest.raises(ValueError):
            string2array(input_data_string, ',', row['elementType'])
        input_data_string = '[ [1,2,wrong], [4, 5, 6]]'
        with pytest.raises(ValueError):
            string2array(input_data_string, ',', row['elementType'])
        row = {'description': 'test.',
               'default': 'None',
               'required': True,
               'label': 'test: ',
               'attributes': None,
               'elementType': 'str',
               'type': 'array',
               'options': None,
               'name': 'test'}
        output = string2array(input_data_string, ',', row['elementType'])
        assert output.shape == (2, 3)
        assert output[0][2] == 'wrong', 'String data not converted properly'
        input_data_string = '[ [1,2 3] [4,5,6]]'
        output = string2array(input_data_string, ',', row['elementType'])
        assert output[0][1] == '2 3'


    def test_reduce_dimension_component(self):
        """
         This method tests if the data passed to the launch method of
         the NDimensionArrayAdapter adapter is correct. The passed data should be a list
         of arrays with one dimension.
        """
        inserted_count = FlowService().get_available_datatypes(self.test_project.id,
                                                               "tvb.datatypes.arrays.MappedArray")[1]
        assert inserted_count == 0, "Expected to find no data."
        #create an operation
        algorithm_id = FlowService().get_algorithm_by_module_and_class('tvb.tests.framework.adapters.ndimensionarrayadapter',
                                                                       'NDimensionArrayAdapter').id
        operation = model.Operation(self.test_user.id, self.test_project.id, algorithm_id, 'test params',
                                    meta=json.dumps({DataTypeMetaData.KEY_STATE: "RAW_DATA"}),
                                    status=model.STATUS_FINISHED)
        operation = dao.store_entity(operation)
        #save the array wrapper in DB
        adapter_instance = NDimensionArrayAdapter()
        PARAMS = {}
        self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
        inserted_data = FlowService().get_available_datatypes(self.test_project.id,
                                                              "tvb.datatypes.arrays.MappedArray")[0]
        assert len(inserted_data) == 1, "Problems when inserting data"
        gid = inserted_data[0][2]
        entity = dao.get_datatype_by_gid(gid)
        #from the 3D array do not select any array
        PARAMS = {"python_method": "reduce_dimension", "input_data": gid,
                  "input_data_dimensions_0": "requiredDim_1",
                  "input_data_dimensions_1": "",
                  "input_data_dimensions_2": ""}
        try:
            self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
            raise AssertionError("Test should not pass. The resulted array should be a 1D array.")
        except Exception:
            # OK, do nothing; we were expecting to produce a 1D array
            pass
        #from the 3D array select only a 1D array
        first_dim = [gid + '_1_0', 'requiredDim_1']
        PARAMS = {"python_method": "reduce_dimension", "input_data": gid,
                  "input_data_dimensions_0": first_dim,
                  "input_data_dimensions_1": gid + "_2_1"}
        self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
        expected_result = entity.array_data[:, 0, 1]
        actual_result = adapter_instance.launch_param
        assert len(actual_result) == len(expected_result), "Not the same size for results!"
        assert numpy.equal(actual_result, expected_result).all()

        #from the 3D array select a 2D array
        first_dim = [gid + '_1_0', gid + '_1_1', 'requiredDim_2']
        PARAMS = {"python_method": "reduce_dimension", "input_data": gid,
                  "input_data_dimensions_0": first_dim,
                  "input_data_dimensions_1": gid + "_2_1"}
        self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
        expected_result = entity.array_data[slice(0, None), [0, 1], 1]
        actual_result = adapter_instance.launch_param
        assert len(actual_result) == len(expected_result), "Not the same size for results!"
        assert numpy.equal(actual_result, expected_result).all()

        #from 3D array select 1D array by applying SUM function on the first
        #dimension and average function on the second dimension
        PARAMS = {"python_method": "reduce_dimension", "input_data": gid,
                  "input_data_dimensions_0": ["requiredDim_1", "func_sum"],
                  "input_data_dimensions_1": "func_average",
                  "input_data_dimensions_2": ""}
        self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
        aux = numpy.sum(entity.array_data, axis=0)
        expected_result = numpy.average(aux, axis=0)
        actual_result = adapter_instance.launch_param
        assert len(actual_result) == len(expected_result), "Not the same size of results!"
        assert numpy.equal(actual_result, expected_result).all()

        #from 3D array select a 2D array and apply op. on the second dimension
        PARAMS = {"python_method": "reduce_dimension", "input_data": gid,
                  "input_data_dimensions_0": ["requiredDim_2", "func_sum",
                                              "expected_shape_x,512", "operations_x,&gt;"],
                  "input_data_dimensions_1": "",
                  "input_data_dimensions_2": ""}
        try:
            self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
            raise AssertionError("Test should not pass! The second dimension of the array should be >512.")
        except Exception:
            # OK, do nothing;
            pass
示例#14
0
class BurstService(object):
    """
    Service layer for Burst related entities.
    """

    def __init__(self):
        self.operation_service = OperationService()
        self.workflow_service = WorkflowService()
        self.logger = get_logger(self.__class__.__module__)
        self.cache_portlet_configurators = {}


    def build_portlet_interface(self, portlet_configuration, project_id):
        """
        From a portlet_id and a project_id, first build the portlet
        entity then get it's configurable interface. 
        
        :param portlet_configuration: a portlet configuration entity. It holds at the
            least the portlet_id, and in case any default parameters were saved
            they can be rebuilt from the analyzers // visualizer parameters
        :param project_id: the id of the current project   
            
        :returns: the portlet interface will be of the following form::
            [{'interface': adapter_interface, 
            'prefix': prefix_for_parameter_names, 
            'subalg': {algorithm_field_name: default_algorithm_value},
            'algo_group': algorithm_group,
            'alg_ui_name': displayname},
            ......]
            A list of dictionaries for each adapter that makes up the portlet.
            
        """
        portlet_configurer = self._get_portlet_configurer(portlet_configuration.portlet_id)
        portlet_interface = portlet_configurer.get_configurable_interface()

        for adapter_conf in portlet_interface:
            interface = adapter_conf.interface
            itree_mngr = InputTreeManager()
            interface = itree_mngr.fill_input_tree_with_options(interface, project_id,
                                                                adapter_conf.stored_adapter.fk_category)
            adapter_conf.interface = itree_mngr.prepare_param_names(interface)

        portlet_configurer.update_default_values(portlet_interface, portlet_configuration)
        portlet_configurer.prefix_adapters_parameters(portlet_interface)

        return portlet_interface


    def _get_portlet_configurer(self, portlet_id):

        if portlet_id not in self.cache_portlet_configurators:

            portlet_entity = dao.get_portlet_by_id(portlet_id)
            if portlet_entity is None:
                raise InvalidPortletConfiguration("No portlet entity located in database with id=%s. " % portlet_id)

            self.cache_portlet_configurators[portlet_id] = PortletConfigurer(portlet_entity)
            self.logger.debug("Recently parsed portlet XML:" + str([portlet_entity]))

        return self.cache_portlet_configurators[portlet_id]


    def update_portlet_configuration(self, portlet_configuration, submited_parameters):
        """
        :param portlet_configuration: the portlet configuration that needs to be updated
        :param submited_parameters: a list of parameters as submitted from the UI. This 
            is a dictionary in the form : 
            {'dynamic' : {name:value pairs}, 'static' : {name:value pairs}}
            
        All names are prefixed with adapter specific generated prefix.
        """
        portlet_configurer = self._get_portlet_configurer(portlet_configuration.portlet_id)
        return portlet_configurer.update_portlet_configuration(portlet_configuration, submited_parameters)


    def new_burst_configuration(self, project_id):
        """
        Return a new burst configuration entity with all the default values.
        """
        burst_configuration = model.BurstConfiguration(project_id)
        burst_configuration.selected_tab = 0

        # Now set the default portlets for the specified burst configuration.
        # The default portlets are specified in the __init__.py script from tvb root.
        for tab_idx, value in DEFAULT_PORTLETS.items():
            for sel_idx, portlet_identifier in value.items():
                portlet = BurstService.get_portlet_by_identifier(portlet_identifier)
                if portlet is not None:
                    portlet_configuration = self.new_portlet_configuration(portlet.id, tab_idx, sel_idx,
                                                                           portlet.algorithm_identifier)
                    burst_configuration.set_portlet(tab_idx, sel_idx, portlet_configuration)

        return burst_configuration


    @staticmethod
    def _store_burst_config(burst_config):
        """
        Store a burst configuration entity.
        """
        burst_config.prepare_before_save()
        saved_entity = dao.store_entity(burst_config)
        return saved_entity.id


    @staticmethod
    def get_available_bursts(project_id):
        """
        Return all the burst for the current project.
        """
        bursts = dao.get_bursts_for_project(project_id, page_size=MAX_BURSTS_DISPLAYED) or []
        for burst in bursts:
            burst.prepare_after_load()
        return bursts


    @staticmethod
    def populate_burst_disk_usage(bursts):
        """
        Adds a disk_usage field to each burst object.
        The disk usage is computed as the sum of the datatypes generated by a burst
        """
        sizes = dao.compute_bursts_disk_size([b.id for b in bursts])
        for b in bursts:
            b.disk_size = format_bytes_human(sizes[b.id])


    @staticmethod
    def rename_burst(burst_id, new_name):
        """
        Rename the burst given by burst_id, setting it's new name to
        burst_name.
        """
        burst = dao.get_burst_by_id(burst_id)
        burst.name = new_name
        dao.store_entity(burst)


    def load_burst(self, burst_id):
        """
        :param burst_id: the id of the burst that should be loaded
        
        Having this input the method should:
        
            - load the entity from the DB
            - get all the workflow steps for the saved burst id
            - go trough the visualization workflow steps to create the tab 
                configuration of the burst using the tab_index and index_in_tab 
                fields saved on each workflow_step
                
        """
        burst = dao.get_burst_by_id(burst_id)
        burst.prepare_after_load()
        burst.reset_tabs()
        burst_workflows = dao.get_workflows_for_burst(burst.id)

        group_gid = None
        if len(burst_workflows) == 1:
            # A simple burst with no range parameters
            burst = self.__populate_tabs_from_workflow(burst, burst_workflows[0])
        elif len(burst_workflows) > 1:
            # A burst workflow with a range of values, created multiple workflows and need
            # to launch parameter space exploration with the resulted group
            self.__populate_tabs_from_workflow(burst, burst_workflows[0])
            executed_steps = dao.get_workflow_steps(burst_workflows[0].id)

            operation = dao.get_operation_by_id(executed_steps[0].fk_operation)
            if operation.operation_group:
                workflow_group = dao.get_datatypegroup_by_op_group_id(operation.operation_group.id)
                group_gid = workflow_group.gid
        return burst, group_gid

    @staticmethod
    def __populate_tabs_from_workflow(burst_entity, workflow):
        """
        Given a burst and a workflow populate the tabs of the burst with the PortletConfigurations
        generated from the steps of the workflow.
        """
        visualizers = dao.get_visualization_steps(workflow.id)
        for entry in visualizers:
            ## For each visualize step, also load all of the analyze steps.
            portlet_cfg = PortletConfiguration(entry.fk_portlet)
            portlet_cfg.set_visualizer(entry)
            analyzers = dao.get_workflow_steps_for_position(entry.fk_workflow, entry.tab_index, entry.index_in_tab)
            portlet_cfg.set_analyzers(analyzers)
            burst_entity.tabs[entry.tab_index].portlets[entry.index_in_tab] = portlet_cfg
        return burst_entity

    def load_tab_configuration(self, burst_entity, op_id):
        """
        Given a burst entity and an operation id, find the workflow to which the op_id
        belongs and the load the burst_entity's tab configuration with those workflow steps.
        """
        originating_workflow = dao.get_workflow_for_operation_id(op_id)
        burst_entity = self.__populate_tabs_from_workflow(burst_entity, originating_workflow)
        return burst_entity


    def new_portlet_configuration(self, portlet_id, tab_nr=-1, index_in_tab=-1, portlet_name='Default'):
        """
        Return a new portlet configuration entity with default parameters.
        
        :param portlet_id: the id of the portlet for which a configuration will be stored
        :param tab_nr: the index of the currently selected tab
        :param index_in_tab: the index from the currently selected tab
        """
        portlet_configurer = self._get_portlet_configurer(portlet_id)
        configuration = portlet_configurer.create_new_portlet_configuration(portlet_name)
        for wf_step in configuration.analyzers:
            wf_step.tab_index = tab_nr
            wf_step.index_in_tab = index_in_tab
        configuration.visualizer.tab_index = tab_nr
        configuration.visualizer.index_in_tab = index_in_tab
        return configuration


    @staticmethod
    def get_available_portlets():
        """
        :returns: a list of all the available portlet entites
        """
        return dao.get_available_portlets()

    @staticmethod
    def get_portlet_by_id(portlet_id):
        """
        :returns: the portlet entity with the id =@portlet_id
        """
        return dao.get_portlet_by_id(portlet_id)

    @staticmethod
    def get_portlet_by_identifier(portlet_identifier):
        """
        :returns: the portlet entity with the algorithm identifier =@portlet_identifier
        """
        return dao.get_portlet_by_identifier(portlet_identifier)


    def launch_burst(self, burst_configuration, simulator_index, simulator_id, user_id, launch_mode=LAUNCH_NEW):
        """
        Given a burst configuration and all the necessary data do the actual launch.
        
        :param burst_configuration: BurstConfiguration   
        :param simulator_index: the position within the workflows step list that the simulator will take. This is needed
            so that the rest of the portlet workflow steps know what steps do their dynamic parameters come from.
        :param simulator_id: the id of the simulator adapter as stored in the DB. It's needed to load the simulator algo
            group and category that are then passed to the launcher's prepare_operation method.
        :param user_id: the id of the user that launched this burst
        :param launch_mode: new/branch/continue
        """
        ## 1. Prepare BurstConfiguration entity
        if launch_mode == LAUNCH_NEW:
            ## Fully new entity for new simulation
            burst_config = burst_configuration.clone()
            if burst_config.name is None:
                new_id = dao.get_max_burst_id() + 1
                burst_config.name = 'simulation_' + str(new_id)
        else:
            ## Branch or Continue simulation
            burst_config = burst_configuration
            simulation_state = dao.get_generic_entity(SIMULATION_DATATYPE_MODULE + "." + SIMULATION_DATATYPE_CLASS,
                                                      burst_config.id, "fk_parent_burst")
            if simulation_state is None or len(simulation_state) < 1:
                exc = BurstServiceException("Simulation State not found for %s, "
                                            "thus we are unable to branch from it!" % burst_config.name)
                self.logger.error(exc)
                raise exc

            simulation_state = simulation_state[0]
            burst_config.update_simulation_parameter("simulation_state", simulation_state.gid)
            burst_config = burst_configuration.clone()

            count = dao.count_bursts_with_name(burst_config.name, burst_config.fk_project)
            burst_config.name = burst_config.name + "_" + launch_mode + str(count)

        ## 2. Create Operations and do the actual launch  
        if launch_mode in [LAUNCH_NEW, LAUNCH_BRANCH]:
            ## New Burst entry in the history
            burst_id = self._store_burst_config(burst_config)
            thread = threading.Thread(target=self._async_launch_and_prepare,
                                      kwargs={'burst_config': burst_config,
                                              'simulator_index': simulator_index,
                                              'simulator_id': simulator_id,
                                              'user_id': user_id})
            thread.start()
            return burst_id, burst_config.name
        else:
            ## Continue simulation
            ## TODO
            return burst_config.id, burst_config.name


    @transactional
    def _prepare_operations(self, burst_config, simulator_index, simulator_id, user_id):
        """
        Prepare all required operations for burst launch.
        """
        project_id = burst_config.fk_project
        burst_id = burst_config.id
        workflow_step_list = []
        starting_index = simulator_index + 1

        sim_algo = FlowService().get_algorithm_by_identifier(simulator_id)
        metadata = {DataTypeMetaData.KEY_BURST: burst_id}
        launch_data = burst_config.get_all_simulator_values()[0]
        operations, group = self.operation_service.prepare_operations(user_id, project_id, sim_algo,
                                                                      sim_algo.algorithm_category, metadata,
                                                                      **launch_data)
        group_launched = group is not None
        if group_launched:
            starting_index += 1

        for tab in burst_config.tabs:
            for portlet_cfg in tab.portlets:
                ### For each portlet configuration stored, update the step index ###
                ### and also change the dynamic parameters step indexes to point ###
                ### to the simulator outputs.                                     ##
                if portlet_cfg is not None:
                    analyzers = portlet_cfg.analyzers
                    visualizer = portlet_cfg.visualizer
                    for entry in analyzers:
                        entry.step_index = starting_index
                        self.workflow_service.set_dynamic_step_references(entry, simulator_index)
                        workflow_step_list.append(entry)
                        starting_index += 1
                    ### Change the dynamic parameters to point to the last adapter from this portlet execution.
                    visualizer.step_visible = False
                    if len(workflow_step_list) > 0 and isinstance(workflow_step_list[-1], model.WorkflowStep):
                        self.workflow_service.set_dynamic_step_references(visualizer, workflow_step_list[-1].step_index)
                    else:
                        self.workflow_service.set_dynamic_step_references(visualizer, simulator_index)
                    ### Only for a single operation have the step of visualization, otherwise is useless.
                    if not group_launched:
                        workflow_step_list.append(visualizer)

        if group_launched:
            ###  For a group of operations, make sure the metric for PSE view 
            ### is also computed, immediately after the simulation.
            metric_algo = FlowService().get_algorithm_by_module_and_class(MEASURE_METRICS_MODULE, MEASURE_METRICS_CLASS)
            metric_interface = FlowService().prepare_adapter(project_id, metric_algo)
            dynamics = {}
            for entry in metric_interface:
                # We have a select that should be the dataType and a select multiple with the 
                # required metric algorithms to be evaluated. Only dynamic parameter should be
                # the select type.
                if entry[KEY_TYPE] == TYPE_SELECT:
                    dynamics[entry[KEY_NAME]] = {WorkflowStepConfiguration.DATATYPE_INDEX_KEY: 0,
                                                 WorkflowStepConfiguration.STEP_INDEX_KEY: simulator_index}
            metric_step = model.WorkflowStep(algorithm_id=metric_algo.id, step_index=simulator_index + 1,
                                             static_param={}, dynamic_param=dynamics)
            metric_step.step_visible = False
            workflow_step_list.insert(0, metric_step)

        workflows = self.workflow_service.create_and_store_workflow(project_id, burst_id, simulator_index,
                                                                    simulator_id, operations)
        self.operation_service.prepare_operations_for_workflowsteps(workflow_step_list, workflows, user_id,
                                                                    burst_id, project_id, group, operations)
        operation_ids = [operation.id for operation in operations]
        return operation_ids


    def _async_launch_and_prepare(self, burst_config, simulator_index, simulator_id, user_id):
        """
        Prepare operations asynchronously.
        """
        try:
            operation_ids = self._prepare_operations(burst_config, simulator_index, simulator_id, user_id)
            self.logger.debug("Starting a total of %s workflows" % (len(operation_ids, )))
            wf_errs = 0
            for operation_id in operation_ids:
                try:
                    OperationService().launch_operation(operation_id, True)
                except Exception as excep:
                    self.logger.error(excep)
                    wf_errs += 1
                    self.workflow_service.mark_burst_finished(burst_config, error_message=str(excep))

            self.logger.debug("Finished launching workflows. " + str(len(operation_ids) - wf_errs) +
                              " were launched successfully, " + str(wf_errs) + " had error on pre-launch steps")
        except Exception as excep:
            self.logger.error(excep)
            self.workflow_service.mark_burst_finished(burst_config, error_message=str(excep))


    @staticmethod
    def launch_visualization(visualization, frame_width=None, frame_height=None, is_preview=True):
        """
        :param visualization: a visualization workflow step
        """
        dynamic_params = visualization.dynamic_param
        static_params = visualization.static_param
        parameters_dict = static_params
        current_project_id = 0
        # Current operation id needed for export mechanism. So far just use ##
        # the operation of the workflow_step from which the inputs are taken    ####
        for param in dynamic_params:
            step_index = dynamic_params[param][WorkflowStepConfiguration.STEP_INDEX_KEY]
            datatype_index = dynamic_params[param][WorkflowStepConfiguration.DATATYPE_INDEX_KEY]
            referred_workflow_step = dao.get_workflow_step_by_step_index(visualization.fk_workflow, step_index)
            referred_operation_id = referred_workflow_step.fk_operation
            referred_operation = dao.get_operation_by_id(referred_operation_id)
            current_project_id = referred_operation.fk_launched_in
            if type(datatype_index) is IntType:
                # Entry is the output of a previous step ##
                datatypes = dao.get_results_for_operation(referred_operation_id)
                parameters_dict[param] = datatypes[datatype_index].gid
            else:
                # Entry is the input of a previous step ###
                parameters_dict[param] = json.loads(referred_operation.parameters)[datatype_index]
        algorithm = dao.get_algorithm_by_id(visualization.fk_algorithm)
        adapter_instance = ABCAdapter.build_adapter(algorithm)
        adapter_instance.current_project_id = current_project_id
        prepared_inputs = adapter_instance.prepare_ui_inputs(parameters_dict)
        if frame_width is not None:
            prepared_inputs[ABCDisplayer.PARAM_FIGURE_SIZE] = (frame_width, frame_height)

        if is_preview:
            result = adapter_instance.generate_preview(**prepared_inputs)
        else:
            result = adapter_instance.launch(**prepared_inputs)
        return result, parameters_dict


    def update_history_status(self, id_list):
        """
        For each burst_id received in the id_list read new status from DB and return a list [id, new_status] pair.
        """
        result = []
        for b_id in id_list:
            burst = dao.get_burst_by_id(b_id)
            burst.prepare_after_load()
            if burst is not None:
                if burst.status == burst.BURST_RUNNING:
                    running_time = datetime.now() - burst.start_time
                else:
                    running_time = burst.finish_time - burst.start_time
                running_time = format_timedelta(running_time, most_significant2=False)

                if burst.status == burst.BURST_ERROR:
                    msg = 'Check Operations page for error Message'
                else:
                    msg = ''
                result.append([burst.id, burst.status, burst.is_group, msg, running_time])
            else:
                self.logger.debug("Could not find burst with id=" + str(b_id) + ". Might have been deleted by user!!")
        return result


    def stop_burst(self, burst_entity):
        """
        Stop all the entities for the current burst and set the burst status to canceled.
        """
        burst_wfs = dao.get_workflows_for_burst(burst_entity.id)
        any_stopped = False
        for workflow in burst_wfs:
            wf_steps = dao.get_workflow_steps(workflow.id)
            for step in wf_steps:
                if step.fk_operation is not None:
                    self.logger.debug("We will stop operation: %d" % step.fk_operation)
                    any_stopped = self.operation_service.stop_operation(step.fk_operation) or any_stopped

        if any_stopped and burst_entity.status != burst_entity.BURST_CANCELED:
            self.workflow_service.mark_burst_finished(burst_entity, model.BurstConfiguration.BURST_CANCELED)
            return True
        return False


    @transactional
    def cancel_or_remove_burst(self, burst_id):
        """
        Cancel (if burst is still running) or Remove the burst given by burst_id.
        :returns True when Remove operation was done and False when Cancel
        """
        burst_entity = dao.get_burst_by_id(burst_id)
        if burst_entity.status == burst_entity.BURST_RUNNING:
            self.stop_burst(burst_entity)
            return False

        service = ProjectService()
        ## Remove each DataType in current burst.
        ## We can not leave all on cascade, because it won't work on SQLite for mapped dataTypes.
        datatypes = dao.get_all_datatypes_in_burst(burst_id)
        ## Get operations linked to current burst before removing the burst or else 
        ##    the burst won't be there to identify operations any more.
        remaining_ops = dao.get_operations_in_burst(burst_id)

        # Remove burst first to delete work-flow steps which still hold foreign keys to operations.
        correct = dao.remove_entity(burst_entity.__class__, burst_id)
        if not correct:
            raise RemoveDataTypeException("Could not remove Burst entity!")

        for datatype in datatypes:
            service.remove_datatype(burst_entity.fk_project, datatype.gid, False)

        ## Remove all Operations remained.
        correct = True
        remaining_op_groups = set()
        project = dao.get_project_by_id(burst_entity.fk_project)

        for oper in remaining_ops:
            is_remaining = dao.get_generic_entity(oper.__class__, oper.id)
            if len(is_remaining) == 0:
                ### Operation removed cascaded.
                continue
            if oper.fk_operation_group is not None and oper.fk_operation_group not in remaining_op_groups:
                is_remaining = dao.get_generic_entity(model.OperationGroup, oper.fk_operation_group)
                if len(is_remaining) > 0:
                    remaining_op_groups.add(oper.fk_operation_group)
                    correct = correct and dao.remove_entity(model.OperationGroup, oper.fk_operation_group)
            correct = correct and dao.remove_entity(oper.__class__, oper.id)
            service.structure_helper.remove_operation_data(project.name, oper.id)

        if not correct:
            raise RemoveDataTypeException("Could not remove Burst because a linked operation could not be dropped!!")
        return True


    @staticmethod
    def get_portlet_status(portlet_cfg):
        """ 
        Get the status of a portlet configuration. 
        """
        if portlet_cfg.analyzers:
            for analyze_step in portlet_cfg.analyzers:
                operation = dao.try_get_operation_by_id(analyze_step.fk_operation)
                if operation is None:
                    return model.STATUS_ERROR, "Operation has been removed"
                if operation.status != model.STATUS_FINISHED:
                    return operation.status, operation.additional_info or ''
        else:
            ## Simulator is first step so now decide if we are waiting for input or output ##
            visualizer = portlet_cfg.visualizer
            wait_on_outputs = False
            for entry in visualizer.dynamic_param:
                if type(visualizer.dynamic_param[entry][WorkflowStepConfiguration.DATATYPE_INDEX_KEY]) == IntType:
                    wait_on_outputs = True
                    break
            if wait_on_outputs:
                simulator_step = dao.get_workflow_step_by_step_index(visualizer.fk_workflow, 0)
                operation = dao.try_get_operation_by_id(simulator_step.fk_operation)
                if operation is None:
                    error_msg = ("At least one simulation result was not found, it might have been removed. <br\>"
                                 "You can copy and relaunch current simulation, if you are interested in having "
                                 "your results re-computed.")
                    return model.STATUS_ERROR, error_msg
                else:
                    return operation.status, operation.additional_info or ''
        return model.STATUS_FINISHED, ''
class OperationServiceTest(BaseTestCase):
    """
    Test class for the introspection module. Some tests from here do async launches. For those
    cases Transactional tests won't work.
    TODO: this is still to be refactored, for being huge, with duplicates and many irrelevant checks
    """

    def setUp(self):
        """
        Reset the database before each test.
        """
        self.clean_database()
        initialize_storage()
        self.test_user = TestFactory.create_user()
        self.test_project = TestFactory.create_project(self.test_user)
        self.operation_service = OperationService()
        self.backup_hdd_size = TvbProfile.current.MAX_DISK_SPACE

    def tearDown(self):
        """
        Reset the database when test is done.
        """
        TvbProfile.current.MAX_DISK_SPACE = self.backup_hdd_size
        self.clean_database()

    def _assert_no_dt2(self):
        count = dao.count_datatypes(self.test_project.id, Datatype2)
        self.assertEqual(0, count)

    def _assert_stored_dt2(self, expected_cnt=1):
        count = dao.count_datatypes(self.test_project.id, Datatype2)
        self.assertEqual(expected_cnt, count)
        datatype = dao.try_load_last_entity_of_type(self.test_project.id, Datatype2)
        self.assertEqual(datatype.subject, DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored.")
        return datatype

    def test_datatypes_groups(self):
        """
        Tests if the dataType group is set correct on the dataTypes resulted from the same operation group.
        """
        flow_service = FlowService()

        all_operations = dao.get_filtered_operations(self.test_project.id, None)
        self.assertEqual(len(all_operations), 0, "There should be no operation")

        algogroup = dao.find_group("tvb.tests.framework.adapters.testadapter3", "TestAdapter3")
        group, _ = flow_service.prepare_adapter(self.test_project.id, algogroup)
        adapter_instance = flow_service.build_adapter_instance(group)
        data = {model.RANGE_PARAMETER_1: "param_5", "param_5": [1, 2]}
        ## Create Group of operations
        flow_service.fire_operation(adapter_instance, self.test_user, self.test_project.id, **data)

        all_operations = dao.get_filtered_operations(self.test_project.id, None)
        self.assertEqual(len(all_operations), 1, "Expected one operation group")
        self.assertEqual(all_operations[0][2], 2, "Expected 2 operations in group")

        operation_group_id = all_operations[0][3]
        self.assertNotEquals(operation_group_id, None, "The operation should be part of a group.")

        self.operation_service.stop_operation(all_operations[0][0])
        self.operation_service.stop_operation(all_operations[0][1])
        ## Make sure operations are executed
        self.operation_service.launch_operation(all_operations[0][0], False)
        self.operation_service.launch_operation(all_operations[0][1], False)

        resulted_datatypes = dao.get_datatype_in_group(operation_group_id=operation_group_id)
        self.assertTrue(len(resulted_datatypes) >= 2, "Expected at least 2, but: " + str(len(resulted_datatypes)))

        dt = dao.get_datatype_by_id(resulted_datatypes[0].id)
        datatype_group = dao.get_datatypegroup_by_op_group_id(operation_group_id)
        self.assertEqual(dt.fk_datatype_group, datatype_group.id, "DataTypeGroup is incorrect")

    def test_initiate_operation(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        module = "tvb.tests.framework.adapters.testadapter1"
        class_name = "TestAdapter1"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        output = adapter.get_output()
        output_type = output[0].__name__
        data = {"test1_val1": 5, "test1_val2": 5}
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        res = self.operation_service.initiate_operation(
            self.test_user, self.test_project.id, adapter, tmp_folder, **data
        )
        self.assertTrue(res.index("has finished.") > 10, "Operation didn't finish")
        group = dao.find_group(module, class_name)
        self.assertEqual(group.module, "tvb.tests.framework.adapters.testadapter1", "Wrong data stored.")
        self.assertEqual(group.classname, "TestAdapter1", "Wrong data stored.")
        dts, count = dao.get_values_of_datatype(self.test_project.id, Datatype1)
        self.assertEqual(count, 1)
        self.assertEqual(len(dts), 1)
        datatype = dao.get_datatype_by_id(dts[0][0])
        self.assertEqual(datatype.subject, DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored.")
        self.assertEqual(datatype.type, output_type, "Wrong data stored.")

    def test_delete_dt_free_HDD_space(self):
        """
        Launch two operations and give enough available space for user so that both should finish.
        """
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")

        self._assert_no_dt2()
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)
        datatype = self._assert_stored_dt2()

        # Now free some space and relaunch
        ProjectService().remove_datatype(self.test_project.id, datatype.gid)
        self._assert_no_dt2()
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)
        self._assert_stored_dt2()

    def test_launch_two_ops_HDD_with_space(self):
        """
        Launch two operations and give enough available space for user so that both should finish.
        """
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = 2 * float(adapter.get_required_disk_size(**data))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")

        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)
        datatype = self._assert_stored_dt2()

        # Now update the maximum disk size to be the size of the previously resulted datatypes (transform from kB to MB)
        # plus what is estimated to be required from the next one (transform from B to MB)
        TvbProfile.current.MAX_DISK_SPACE = float(datatype.disk_size) + float(adapter.get_required_disk_size(**data))

        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)
        self._assert_stored_dt2(2)

    def test_launch_two_ops_HDD_full_space(self):
        """
        Launch two operations and give available space for user so that the first should finish,
        but after the update to the user hdd size the second should not.
        """
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)

        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = 1 + float(adapter.get_required_disk_size(**data))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)

        datatype = self._assert_stored_dt2()
        # Now update the maximum disk size to be less than size of the previously resulted datatypes (transform kB to MB)
        # plus what is estimated to be required from the next one (transform from B to MB)
        TvbProfile.current.MAX_DISK_SPACE = float(datatype.disk_size - 1) + float(
            adapter.get_required_disk_size(**data) - 1
        )

        self.assertRaises(
            NoMemoryAvailableException,
            self.operation_service.initiate_operation,
            self.test_user,
            self.test_project.id,
            adapter,
            tmp_folder,
            **data
        )
        self._assert_stored_dt2()

    def test_launch_operation_HDD_with_space(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        data = {"test": 100}

        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)
        self._assert_stored_dt2()

    def test_launch_operation_HDD_with_space_started_ops(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        space_taken_by_started = 100
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        started_operation = model.Operation(
            self.test_user.id,
            self.test_project.id,
            group.id,
            "",
            status=model.STATUS_STARTED,
            estimated_disk_size=space_taken_by_started,
        )
        dao.store_entity(started_operation)
        adapter = FlowService().build_adapter_instance(group)
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data) + space_taken_by_started)
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter, tmp_folder, **data)
        self._assert_stored_dt2()

    def test_launch_operation_HDD_full_space(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data) - 1)
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.assertRaises(
            NoMemoryAvailableException,
            self.operation_service.initiate_operation,
            self.test_user,
            self.test_project.id,
            adapter,
            tmp_folder,
            **data
        )
        self._assert_no_dt2()

    def test_launch_operation_HDD_full_space_started_ops(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        space_taken_by_started = 100
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        started_operation = model.Operation(
            self.test_user.id,
            self.test_project.id,
            group.id,
            "",
            status=model.STATUS_STARTED,
            estimated_disk_size=space_taken_by_started,
        )
        dao.store_entity(started_operation)
        adapter = FlowService().build_adapter_instance(group)
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data) + space_taken_by_started - 1)
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.assertRaises(
            NoMemoryAvailableException,
            self.operation_service.initiate_operation,
            self.test_user,
            self.test_project.id,
            adapter,
            tmp_folder,
            **data
        )
        self._assert_no_dt2()

    def test_stop_operation(self):
        """
        Test that an operation is successfully stopped.
        """
        module = "tvb.tests.framework.adapters.testadapter2"
        class_name = "TestAdapter2"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        data = {"test": 5}
        algo_group = adapter.algorithm_group
        algo_category = dao.get_category_by_id(algo_group.fk_category)
        algo = dao.get_algorithm_by_group(algo_group.id)
        operations, _ = self.operation_service.prepare_operations(
            self.test_user.id, self.test_project.id, algo, algo_category, {}, **data
        )
        self.operation_service._send_to_cluster(operations, adapter)
        self.operation_service.stop_operation(operations[0].id)
        operation = dao.get_operation_by_id(operations[0].id)
        self.assertEqual(operation.status, model.STATUS_CANCELED, "Operation should have been canceled!")

    def test_stop_operation_finished(self):
        """
        Test that an operation that is already finished is not changed by the stop operation.
        """
        module = "tvb.tests.framework.adapters.testadapter1"
        class_name = "TestAdapter1"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        data = {"test1_val1": 5, "test1_val2": 5}
        algo_group = adapter.algorithm_group
        algo_category = dao.get_category_by_id(algo_group.fk_category)
        algo = dao.get_algorithm_by_group(algo_group.id)
        operations, _ = self.operation_service.prepare_operations(
            self.test_user.id, self.test_project.id, algo, algo_category, {}, **data
        )
        self.operation_service._send_to_cluster(operations, adapter)
        operation = dao.get_operation_by_id(operations[0].id)
        operation.status = model.STATUS_FINISHED
        dao.store_entity(operation)
        self.operation_service.stop_operation(operations[0].id)
        operation = dao.get_operation_by_id(operations[0].id)
        self.assertEqual(operation.status, model.STATUS_FINISHED, "Operation shouldn't have been canceled!")

    def test_array_from_string(self):
        """
        Simple test for parse array on 1d, 2d and 3d array.
        """
        row = {
            "description": "test.",
            "default": "None",
            "required": True,
            "label": "test: ",
            "attributes": None,
            "quantifier": "manual",
            "elementType": "float",
            "type": "array",
            "options": None,
            "name": "test",
        }
        input_data_string = "[ [1 2 3] [4 5 6]]"
        output = string2array(input_data_string, " ", row["elementType"])
        self.assertEqual(output.shape, (2, 3), "Dimensions not properly parsed")
        for i in output[0]:
            self.assertTrue(i in [1, 2, 3])
        for i in output[1]:
            self.assertTrue(i in [4, 5, 6])
        input_data_string = "[1, 2, 3, 4, 5, 6]"
        output = string2array(input_data_string, ",", row["elementType"])
        self.assertEqual(output.shape, (6,), "Dimensions not properly parsed")
        for i in output:
            self.assertTrue(i in [1, 2, 3, 4, 5, 6])
        input_data_string = "[ [ [1,1], [2, 2] ], [ [3 ,3], [4,4] ] ]"
        output = string2array(input_data_string, ",", row["elementType"])
        self.assertEqual(output.shape, (2, 2, 2), "Wrong dimensions.")
        for i in output[0][0]:
            self.assertTrue(i == 1)
        for i in output[0][1]:
            self.assertTrue(i == 2)
        for i in output[1][0]:
            self.assertTrue(i == 3)
        for i in output[1][1]:
            self.assertTrue(i == 4)
        row = {
            "description": "test.",
            "default": "None",
            "required": True,
            "label": "test: ",
            "attributes": None,
            "quantifier": "manual",
            "elementType": "str",
            "type": "array",
            "options": None,
            "name": "test",
        }
        input_data_string = "[1, 2, 3, 4, 5, 6]"
        output = string2array(input_data_string, ",", row["elementType"])
        for i in output:
            self.assertTrue(i in [1, 2, 3, 4, 5, 6])

    def test_wrong_array_from_string(self):
        """Test that parsing an array from string is throwing the expected 
        exception when wrong input string"""
        row = {
            "description": "test.",
            "default": "None",
            "required": True,
            "label": "test: ",
            "attributes": None,
            "quantifier": "manual",
            "elementType": "float",
            "type": "array",
            "options": None,
            "name": "test",
        }
        input_data_string = "[ [1,2 3] [4,5,6]]"
        self.assertRaises(ValueError, string2array, input_data_string, ",", row["elementType"])
        input_data_string = "[ [1,2,wrong], [4, 5, 6]]"
        self.assertRaises(ValueError, string2array, input_data_string, ",", row["elementType"])
        row = {
            "description": "test.",
            "default": "None",
            "required": True,
            "label": "test: ",
            "attributes": None,
            "quantifier": "manual",
            "elementType": "str",
            "type": "array",
            "options": None,
            "name": "test",
        }
        output = string2array(input_data_string, ",", row["elementType"])
        self.assertEqual(output.shape, (2, 3))
        self.assertEqual(output[0][2], "wrong", "String data not converted properly")
        input_data_string = "[ [1,2 3] [4,5,6]]"
        output = string2array(input_data_string, ",", row["elementType"])
        self.assertEqual(output[0][1], "2 3")

    def test_reduce_dimension_component(self):
        """
         This method tests if the data passed to the launch method of
         the NDimensionArrayAdapter adapter is correct. The passed data should be a list
         of arrays with one dimension.
        """
        inserted_count = FlowService().get_available_datatypes(
            self.test_project.id, "tvb.datatypes.arrays.MappedArray"
        )[1]
        self.assertEqual(inserted_count, 0, "Expected to find no data.")
        # create an operation
        algorithm_id = (
            FlowService()
            .get_algorithm_by_module_and_class(
                "tvb.tests.framework.adapters.ndimensionarrayadapter", "NDimensionArrayAdapter"
            )[0]
            .id
        )
        operation = model.Operation(
            self.test_user.id,
            self.test_project.id,
            algorithm_id,
            "test params",
            meta=json.dumps({DataTypeMetaData.KEY_STATE: "RAW_DATA"}),
            status=model.STATUS_FINISHED,
        )
        operation = dao.store_entity(operation)
        # save the array wrapper in DB
        adapter_instance = NDimensionArrayAdapter()
        PARAMS = {}
        self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
        inserted_data = FlowService().get_available_datatypes(self.test_project.id, "tvb.datatypes.arrays.MappedArray")[
            0
        ]
        self.assertEqual(len(inserted_data), 1, "Problems when inserting data")
        gid = inserted_data[0][2]
        entity = dao.get_datatype_by_gid(gid)
        # from the 3D array do not select any array
        PARAMS = {
            "python_method": "reduce_dimension",
            "input_data": gid,
            "input_data_dimensions_0": "requiredDim_1",
            "input_data_dimensions_1": "",
            "input_data_dimensions_2": "",
        }
        try:
            self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
            self.fail("Test should not pass. The resulted array should be a 1D array.")
        except Exception:
            # OK, do nothing; we were expecting to produce a 1D array
            pass
        # from the 3D array select only a 1D array
        first_dim = [gid + "_1_0", "requiredDim_1"]
        PARAMS = {
            "python_method": "reduce_dimension",
            "input_data": gid,
            "input_data_dimensions_0": first_dim,
            "input_data_dimensions_1": gid + "_2_1",
        }
        self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
        expected_result = entity.array_data[:, 0, 1]
        actual_result = adapter_instance.launch_param
        self.assertEqual(len(actual_result), len(expected_result), "Not the same size for results!")
        self.assertTrue(numpy.equal(actual_result, expected_result).all())

        # from the 3D array select a 2D array
        first_dim = [gid + "_1_0", gid + "_1_1", "requiredDim_2"]
        PARAMS = {
            "python_method": "reduce_dimension",
            "input_data": gid,
            "input_data_dimensions_0": first_dim,
            "input_data_dimensions_1": gid + "_2_1",
        }
        self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
        expected_result = entity.array_data[slice(0, None), [0, 1], 1]
        actual_result = adapter_instance.launch_param
        self.assertEqual(len(actual_result), len(expected_result), "Not the same size for results!")
        self.assertTrue(numpy.equal(actual_result, expected_result).all())

        # from 3D array select 1D array by applying SUM function on the first
        # dimension and average function on the second dimension
        PARAMS = {
            "python_method": "reduce_dimension",
            "input_data": gid,
            "input_data_dimensions_0": ["requiredDim_1", "func_sum"],
            "input_data_dimensions_1": "func_average",
            "input_data_dimensions_2": "",
        }
        self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
        aux = numpy.sum(entity.array_data, axis=0)
        expected_result = numpy.average(aux, axis=0)
        actual_result = adapter_instance.launch_param
        self.assertEqual(len(actual_result), len(expected_result), "Not the same size of results!")
        self.assertTrue(numpy.equal(actual_result, expected_result).all())

        # from 3D array select a 2D array and apply op. on the second dimension
        PARAMS = {
            "python_method": "reduce_dimension",
            "input_data": gid,
            "input_data_dimensions_0": ["requiredDim_2", "func_sum", "expected_shape_x,512", "operations_x,&gt;"],
            "input_data_dimensions_1": "",
            "input_data_dimensions_2": "",
        }
        try:
            self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
            self.fail("Test should not pass! The second dimension of the array should be >512.")
        except Exception:
            # OK, do nothing;
            pass
示例#16
0
class BurstService:
    """
    Service layer for Burst related entities.
    """

    def __init__(self):
        self.operation_service = OperationService()
        self.workflow_service = WorkflowService()
        self.logger = get_logger(self.__class__.__module__)

    def build_portlet_interface(self, portlet_configuration, project_id):
        """
        From a portlet_id and a project_id, first build the portlet
        entity then get it's configurable interface. 
        
        :param portlet_configuration: a portlet configuration entity. It holds at the
            least the portlet_id, and in case any default parameters were saved
            they can be rebuilt from the analyzers // visualizer parameters
        :param project_id: the id of the current project   
            
        :returns: the portlet interface will be of the following form::
            [{'interface': adapter_interface, 
            'prefix': prefix_for_parameter_names, 
            'subalg': {algorithm_field_name: default_algorithm_value},
            'algo_group': algorithm_group,
            'alg_ui_name': displayname},
            ......]
            A list of dictionaries for each adapter that makes up the portlet.
            
        """
        portlet_entity = dao.get_portlet_by_id(portlet_configuration.portlet_id)
        if portlet_entity is None:
            raise InvalidPortletConfiguration(
                "No portlet entity located in database with id=%s. "
                "Portlet configuration %s is not valid." % (portlet_configuration.portlet_id, portlet_configuration)
            )
        portlet_configurer = PortletConfigurer(portlet_entity)
        portlet_interface = portlet_configurer.get_configurable_interface()
        self.logger.debug("Created interface for portlet " + str([portlet_entity]))

        for adapter_conf in portlet_interface:
            interface = adapter_conf.interface
            interface = FlowService().prepare_parameters(interface, project_id, adapter_conf.group.fk_category)
            interface = ABCAdapter.prepare_param_names(interface, adapter_conf.prefix)
            adapter_conf.interface = interface

        portlet_configurer.update_default_values(portlet_interface, portlet_configuration)
        portlet_configurer.prefix_adapters_parameters(portlet_interface)

        return portlet_interface

    @staticmethod
    def update_portlet_configuration(portlet_configuration, submited_parameters):
        """
        :param portlet_configuration: the portlet configuration that needs to be updated
        :param submited_parameters: a list of parameters as submitted from the UI. This 
            is a dictionary in the form : 
            {'dynamic' : {name:value pairs}, 'static' : {name:value pairs}}
            
        All names are prefixed with adapter specific generated prefix.
        """
        portlet_entity = dao.get_portlet_by_id(portlet_configuration.portlet_id)
        portlet_configurer = PortletConfigurer(portlet_entity)
        return portlet_configurer.update_portlet_configuration(portlet_configuration, submited_parameters)

    @staticmethod
    def new_burst_configuration(project_id):
        """
        Return a new burst configuration entity with all the default values.
        """
        burst_configuration = model.BurstConfiguration(project_id)
        burst_configuration.selected_tab = 0
        BurstService.set_default_portlets(burst_configuration)
        return burst_configuration

    @staticmethod
    def set_default_portlets(burst_configuration):
        """
        Sets the default portlets for the specified burst configuration.
        The default portlets are specified in the __init__.py script from tvb root.
        """
        for tab_idx, value in DEFAULT_PORTLETS.items():
            for sel_idx, portlet_identifier in value.items():
                portlet = BurstService.get_portlet_by_identifier(portlet_identifier)
                if portlet is not None:
                    portlet_configuration = BurstService.new_portlet_configuration(
                        portlet.id, tab_idx, sel_idx, portlet.algorithm_identifier
                    )
                    burst_configuration.set_portlet(tab_idx, sel_idx, portlet_configuration)

    @staticmethod
    def _store_burst_config(burst_config):
        """
        Store a burst configuration entity.
        """
        burst_config.prepare_before_save()
        saved_entity = dao.store_entity(burst_config)
        return saved_entity.id

    @staticmethod
    def get_available_bursts(project_id):
        """
        Return all the burst for the current project.
        """
        bursts = dao.get_bursts_for_project(project_id, page_size=MAX_BURSTS_DISPLAYED) or []
        for burst in bursts:
            burst.prepare_after_load()
        return bursts

    @staticmethod
    def populate_burst_disk_usage(bursts):
        """
        Adds a disk_usage field to each burst object.
        The disk usage is computed as the sum of the datatypes generated by a burst
        """
        sizes = dao.compute_bursts_disk_size([b.id for b in bursts])
        for b in bursts:
            b.disk_size = format_bytes_human(sizes[b.id])

    @staticmethod
    def rename_burst(burst_id, new_name):
        """
        Rename the burst given by burst_id, setting it's new name to
        burst_name.
        """
        burst = dao.get_burst_by_id(burst_id)
        burst.name = new_name
        dao.store_entity(burst)

    def load_burst(self, burst_id):
        """
        :param burst_id: the id of the burst that should be loaded
        
        Having this input the method should:
        
            - load the entity from the DB
            - get all the workflow steps for the saved burst id
            - go trough the visualization workflow steps to create the tab 
                configuration of the burst using the tab_index and index_in_tab 
                fields saved on each workflow_step
                
        """
        burst = dao.get_burst_by_id(burst_id)
        burst.prepare_after_load()
        burst.reset_tabs()
        burst_workflows = dao.get_workflows_for_burst(burst.id)

        group_gid = None
        if len(burst_workflows) == 1:
            # A simple burst with no range parameters
            burst = self.__populate_tabs_from_workflow(burst, burst_workflows[0])
        elif len(burst_workflows) > 1:
            # A burst workflow with a range of values, created multiple workflows and need
            # to launch parameter space exploration with the resulted group
            self.__populate_tabs_from_workflow(burst, burst_workflows[0])
            executed_steps = dao.get_workflow_steps(burst_workflows[0].id)

            operation = dao.get_operation_by_id(executed_steps[0].fk_operation)
            if operation.operation_group:
                workflow_group = dao.get_datatypegroup_by_op_group_id(operation.operation_group.id)
                group_gid = workflow_group.gid
        return burst, group_gid

    @staticmethod
    def __populate_tabs_from_workflow(burst_entity, workflow):
        """
        Given a burst and a workflow populate the tabs of the burst with the PortletConfigurations
        generated from the steps of the workflow.
        """
        visualizers = dao.get_visualization_steps(workflow.id)
        for entry in visualizers:
            ## For each visualize step, also load all of the analyze steps.
            portlet_cfg = PortletConfiguration(entry.fk_portlet)
            portlet_cfg.set_visualizer(entry)
            analyzers = dao.get_workflow_steps_for_position(entry.fk_workflow, entry.tab_index, entry.index_in_tab)
            portlet_cfg.set_analyzers(analyzers)
            burst_entity.tabs[entry.tab_index].portlets[entry.index_in_tab] = portlet_cfg
        return burst_entity

    def load_tab_configuration(self, burst_entity, op_id):
        """
        Given a burst entity and an operation id, find the workflow to which the op_id
        belongs and the load the burst_entity's tab configuration with those workflow steps.
        """
        originating_workflow = dao.get_workflow_for_operation_id(op_id)
        burst_entity = self.__populate_tabs_from_workflow(burst_entity, originating_workflow)
        return burst_entity

    @staticmethod
    def new_portlet_configuration(portlet_id, tab_nr=-1, index_in_tab=-1, portlet_name="Default"):
        """
        Return a new portlet configuration entitiy with default parameters.
        
        :param portlet_id: the id of the portlet for which a configuration will
            be stored
        :param tab_nr: the index of the currently selected tab
        :param index_in_tab: the index from the currently selected tab
        
        """
        portlet_entity = dao.get_portlet_by_id(portlet_id)
        if portlet_entity is None:
            raise InvalidPortletConfiguration("No portlet entity located in database with id=%s." % portlet_id)
        portlet_configurer = PortletConfigurer(portlet_entity)
        configuration = portlet_configurer.create_new_portlet_configuration(portlet_name)
        for wf_step in configuration.analyzers:
            wf_step.tab_index = tab_nr
            wf_step.index_in_tab = index_in_tab
        configuration.visualizer.tab_index = tab_nr
        configuration.visualizer.index_in_tab = index_in_tab
        return configuration

    @staticmethod
    def get_available_portlets():
        """
        :returns: a list of all the available portlet entites
        """
        return dao.get_available_portlets()

    @staticmethod
    def get_portlet_by_id(portlet_id):
        """
        :returns: the portlet entity with the id =@portlet_id
        """
        return dao.get_portlet_by_id(portlet_id)

    @staticmethod
    def get_portlet_by_identifier(portlet_identifier):
        """
        :returns: the portlet entity with the algorithm identifier =@portlet_identifier
        """
        return dao.get_portlet_by_identifier(portlet_identifier)

    def launch_burst(self, burst_configuration, simulator_index, simulator_id, user_id, launch_mode=LAUNCH_NEW):
        """
        Given a burst configuration and all the necessary data do the actual launch.
        
        :param burst_configuration: BurstConfiguration   
        :param simulator_index: the position within the workflows step list that the simulator will take. This is needed
            so that the rest of the portlet workflow steps know what steps do their dynamic parameters come from.
        :param simulator_id: the id of the simulator adapter as stored in the DB. It's needed to load the simulator algo
            group and category that are then passed to the launcher's prepare_operation method.
        :param user_id: the id of the user that launched this burst
        :param launch_mode: new/branch/continue
        """
        ## 1. Prepare BurstConfiguration entity
        if launch_mode == LAUNCH_NEW:
            ## Fully new entity for new simulation
            burst_config = burst_configuration.clone()
            if burst_config.name is None:
                new_id = dao.get_max_burst_id() + 1
                burst_config.name = "simulation_" + str(new_id)
        else:
            ## Branch or Continue simulation
            burst_config = burst_configuration
            simulation_state = dao.get_generic_entity(
                SIMULATION_DATATYPE_MODULE + "." + SIMULATION_DATATYPE_CLASS, burst_config.id, "fk_parent_burst"
            )
            if simulation_state is None or len(simulation_state) < 1:
                exc = BurstServiceException(
                    "Simulation State not found for %s, " "thus we are unable to branch from it!" % burst_config.name
                )
                self.logger.error(exc)
                raise exc

            simulation_state = simulation_state[0]
            burst_config.update_simulation_parameter("simulation_state", simulation_state.gid)
            burst_config = burst_configuration.clone()

            count = dao.count_bursts_with_name(burst_config.name, burst_config.fk_project)
            burst_config.name = burst_config.name + "_" + launch_mode + str(count)

        ## 2. Create Operations and do the actual launch
        if launch_mode in [LAUNCH_NEW, LAUNCH_BRANCH]:
            ## New Burst entry in the history
            burst_id = self._store_burst_config(burst_config)
            thread = threading.Thread(
                target=self._async_launch_and_prepare,
                kwargs={
                    "burst_config": burst_config,
                    "simulator_index": simulator_index,
                    "simulator_id": simulator_id,
                    "user_id": user_id,
                },
            )
            thread.start()
            return burst_id, burst_config.name
        else:
            ## Continue simulation
            ## TODO
            return burst_config.id, burst_config.name

    @transactional
    def _prepare_operations(self, burst_config, simulator_index, simulator_id, user_id):
        """
        Prepare all required operations for burst launch.
        """
        project_id = burst_config.fk_project
        burst_id = burst_config.id
        workflow_step_list = []
        starting_index = simulator_index + 1

        sim_algo = FlowService().get_algorithm_by_identifier(simulator_id)
        metadata = {DataTypeMetaData.KEY_BURST: burst_id}
        launch_data = burst_config.get_all_simulator_values()[0]
        operations, group = self.operation_service.prepare_operations(
            user_id, project_id, sim_algo, sim_algo.algo_group.group_category, metadata, **launch_data
        )
        group_launched = group is not None
        if group_launched:
            starting_index += 1

        for tab in burst_config.tabs:
            for portlet_cfg in tab.portlets:
                ### For each portlet configuration stored, update the step index ###
                ### and also change the dynamic parameters step indexes to point ###
                ### to the simulator outputs.                                     ##
                if portlet_cfg is not None:
                    analyzers = portlet_cfg.analyzers
                    visualizer = portlet_cfg.visualizer
                    for entry in analyzers:
                        entry.step_index = starting_index
                        self.workflow_service.set_dynamic_step_references(entry, simulator_index)
                        workflow_step_list.append(entry)
                        starting_index += 1
                    ### Change the dynamic parameters to point to the last adapter from this portlet execution.
                    visualizer.step_visible = False
                    if len(workflow_step_list) > 0 and isinstance(workflow_step_list[-1], model.WorkflowStep):
                        self.workflow_service.set_dynamic_step_references(visualizer, workflow_step_list[-1].step_index)
                    else:
                        self.workflow_service.set_dynamic_step_references(visualizer, simulator_index)
                    ### Only for a single operation have the step of visualization, otherwise is useless.
                    if not group_launched:
                        workflow_step_list.append(visualizer)

        if group_launched:
            ###  For a group of operations, make sure the metric for PSE view
            ### is also computed, immediately after the simulation.
            metric_algo, metric_group = FlowService().get_algorithm_by_module_and_class(
                MEASURE_METRICS_MODULE, MEASURE_METRICS_CLASS
            )
            _, metric_interface = FlowService().prepare_adapter(project_id, metric_group)
            dynamics = {}
            for entry in metric_interface:
                # We have a select that should be the dataType and a select multiple with the
                # required metric algorithms to be evaluated. Only dynamic parameter should be
                # the select type.
                if entry[ABCAdapter.KEY_TYPE] == "select":
                    dynamics[entry[ABCAdapter.KEY_NAME]] = {
                        WorkflowStepConfiguration.DATATYPE_INDEX_KEY: 0,
                        WorkflowStepConfiguration.STEP_INDEX_KEY: simulator_index,
                    }
            metric_step = model.WorkflowStep(
                algorithm_id=metric_algo.id, step_index=simulator_index + 1, static_param={}, dynamic_param=dynamics
            )
            metric_step.step_visible = False
            workflow_step_list.insert(0, metric_step)

        workflows = self.workflow_service.create_and_store_workflow(
            project_id, burst_id, simulator_index, simulator_id, operations
        )
        self.operation_service.prepare_operations_for_workflowsteps(
            workflow_step_list, workflows, user_id, burst_id, project_id, group, operations
        )
        operation_ids = [operation.id for operation in operations]
        return operation_ids

    def _async_launch_and_prepare(self, burst_config, simulator_index, simulator_id, user_id):
        """
        Prepare operations asynchronously.
        """
        try:
            operation_ids = self._prepare_operations(burst_config, simulator_index, simulator_id, user_id)
            self.logger.debug("Starting a total of %s workflows" % (len(operation_ids)))
            wf_errs = 0
            for operation_id in operation_ids:
                try:
                    OperationService().launch_operation(operation_id, True)
                except Exception, excep:
                    self.logger.error(excep)
                    wf_errs += 1
                    self.workflow_service.mark_burst_finished(burst_config, error_message=str(excep))

            self.logger.debug(
                "Finished launching workflows. "
                + str(len(operation_ids) - wf_errs)
                + " were launched successfully, "
                + str(wf_errs)
                + " had error on pre-launch steps"
            )
        except Exception, excep:
            self.logger.error(excep)
            self.workflow_service.mark_burst_finished(burst_config, error_message=str(excep))
示例#17
0
class OperationServiceTest(BaseTestCase):
    """
    Test class for the introspection module. Some tests from here do async launches. For those
    cases Transactional tests won't work.
    """


    def setUp(self):
        """
        Reset the database before each test.
        """
        self.clean_database()
        initialize_storage()
        self.test_user = TestFactory.create_user()
        self.test_project = TestFactory.create_project(self.test_user)
        self.operation_service = OperationService()
        self.backup_hdd_size = TvbProfile.current.MAX_DISK_SPACE


    def tearDown(self):
        """
        Reset the database when test is done.
        """
        TvbProfile.current.MAX_DISK_SPACE = self.backup_hdd_size
        self.clean_database()


    def test_datatypes_groups(self):
        """
        Tests if the dataType group is set correct on the dataTypes resulted from the same operation group.
        """
        flow_service = FlowService()

        all_operations = dao.get_filtered_operations(self.test_project.id, None)
        self.assertEqual(len(all_operations), 0, "There should be no operation")

        algogroup = dao.find_group('tvb.tests.framework.adapters.testadapter3', 'TestAdapter3')
        group, _ = flow_service.prepare_adapter(self.test_project.id, algogroup)
        adapter_instance = flow_service.build_adapter_instance(group)
        data = {model.RANGE_PARAMETER_1: 'param_5', 'param_5': [1, 2]}
        ## Create Group of operations
        flow_service.fire_operation(adapter_instance, self.test_user, self.test_project.id, **data)

        all_operations = dao.get_filtered_operations(self.test_project.id, None)
        self.assertEqual(len(all_operations), 1, "Expected one operation group")
        self.assertEqual(all_operations[0][2], 2, "Expected 2 operations in group")

        operation_group_id = all_operations[0][3]
        self.assertNotEquals(operation_group_id, None, "The operation should be part of a group.")

        self.operation_service.stop_operation(all_operations[0][0])
        self.operation_service.stop_operation(all_operations[0][1])
        ## Make sure operations are executed
        self.operation_service.launch_operation(all_operations[0][0], False)
        self.operation_service.launch_operation(all_operations[0][1], False)

        resulted_datatypes = dao.get_datatype_in_group(operation_group_id=operation_group_id)
        self.assertTrue(len(resulted_datatypes) >= 2, "Expected at least 2, but: " + str(len(resulted_datatypes)))

        dt = dao.get_datatype_by_id(resulted_datatypes[0].id)
        datatype_group = dao.get_datatypegroup_by_op_group_id(operation_group_id)
        self.assertEqual(dt.fk_datatype_group, datatype_group.id, "DataTypeGroup is incorrect")


    def test_initiate_operation(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        module = "tvb.tests.framework.adapters.testadapter1"
        class_name = "TestAdapter1"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        output = adapter.get_output()
        output_type = output[0].__name__
        data = {"test1_val1": 5, "test1_val2": 5}
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        res = self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter,
                                                        tmp_folder, method_name=ABCAdapter.LAUNCH_METHOD, **data)
        self.assertTrue(res.index("has finished.") > 10, "Operation didn't finish")
        group = dao.find_group(module, class_name)
        self.assertEqual(group.module, 'tvb.tests.framework.adapters.testadapter1', "Wrong data stored.")
        self.assertEqual(group.classname, 'TestAdapter1', "Wrong data stored.")
        dts, count = dao.get_values_of_datatype(self.test_project.id, Datatype1)
        self.assertEqual(count, 1)
        self.assertEqual(len(dts), 1)
        datatype = dao.get_datatype_by_id(dts[0][0])
        self.assertEqual(datatype.subject, DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored.")
        self.assertEqual(datatype.type, output_type, "Wrong data stored.")


    def test_delete_dt_free_HDD_space(self):
        """
        Launch two operations and give enough available space for user so that both should finish.
        """
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        output = adapter.get_output()
        output_type = output[0].__name__
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")

        dts = dao.get_values_of_datatype(self.test_project.id, Datatype2)[0]
        self.assertEqual(len(dts), 0)
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter,
                                                  tmp_folder, method_name=ABCAdapter.LAUNCH_METHOD, **data)
        dts = dao.get_values_of_datatype(self.test_project.id, Datatype2)[0]
        self.assertEqual(len(dts), 1)

        datatype = dao.get_datatype_by_id(dts[0][0])
        self.assertEqual(datatype.subject, DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored.")
        self.assertEqual(datatype.type, output_type, "Wrong data stored.")

        #Now update the maximum disk size to be the size of the previously resulted datatypes (transform from kB to MB)
        #plus what is estimated to be required from the next one (transform from B to MB)
        ProjectService().remove_datatype(self.test_project.id, datatype.gid)
        dts = dao.get_values_of_datatype(self.test_project.id, Datatype2)[0]
        self.assertEqual(len(dts), 0)

        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter,
                                                  tmp_folder, method_name=ABCAdapter.LAUNCH_METHOD, **data)
        dts = dao.get_values_of_datatype(self.test_project.id, Datatype2)[0]
        self.assertEqual(len(dts), 1)
        datatype = dao.get_datatype_by_id(dts[0][0])
        self.assertEqual(datatype.subject, DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored.")
        self.assertEqual(datatype.type, output_type, "Wrong data stored.")


    def test_launch_two_ops_HDD_with_space(self):
        """
        Launch two operations and give enough available space for user so that both should finish.
        """
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        output = adapter.get_output()
        output_type = output[0].__name__
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = 2 * float(adapter.get_required_disk_size(**data))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter,
                                                  tmp_folder, method_name=ABCAdapter.LAUNCH_METHOD, **data)
        dts = dao.get_values_of_datatype(self.test_project.id, Datatype2)[0]
        self.assertEqual(len(dts), 1)
        datatype = dao.get_datatype_by_id(dts[0][0])
        self.assertEqual(datatype.subject, DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored.")
        self.assertEqual(datatype.type, output_type, "Wrong data stored.")
        #Now update the maximum disk size to be the size of the previously resulted datatypes (transform from kB to MB)
        #plus what is estimated to be required from the next one (transform from B to MB)
        TvbProfile.current.MAX_DISK_SPACE = float(datatype.disk_size) + float(adapter.get_required_disk_size(**data))

        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter,
                                                  tmp_folder, method_name=ABCAdapter.LAUNCH_METHOD, **data)
        dts = dao.get_values_of_datatype(self.test_project.id, Datatype2)[0]
        self.assertEqual(len(dts), 2)
        datatype = dao.get_datatype_by_id(dts[1][0])
        self.assertEqual(datatype.subject, DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored.")
        self.assertEqual(datatype.type, output_type, "Wrong data stored.")


    def test_launch_two_ops_HDD_full_space(self):
        """
        Launch two operations and give available space for user so that the first should finish,
        but after the update to the user hdd size the second should not.
        """
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        output = adapter.get_output()
        output_type = output[0].__name__
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = (1 + float(adapter.get_required_disk_size(**data)))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter,
                                                  tmp_folder, method_name=ABCAdapter.LAUNCH_METHOD, **data)
        dts = dao.get_values_of_datatype(self.test_project.id, Datatype2)[0]
        self.assertEqual(len(dts), 1)
        datatype = dao.get_datatype_by_id(dts[0][0])
        self.assertEqual(datatype.subject, DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored.")
        self.assertEqual(datatype.type, output_type, "Wrong data stored.")
        #Now update the maximum disk size to be less than size of the previously resulted datatypes (transform kB to MB)
        #plus what is estimated to be required from the next one (transform from B to MB)
        TvbProfile.current.MAX_DISK_SPACE = float(datatype.disk_size - 1) + \
                                                float(adapter.get_required_disk_size(**data) - 1)

        self.assertRaises(NoMemoryAvailableException, self.operation_service.initiate_operation, self.test_user,
                          self.test_project.id, adapter,
                          tmp_folder, method_name=ABCAdapter.LAUNCH_METHOD, **data)
        dts = dao.get_values_of_datatype(self.test_project.id, Datatype2)[0]
        self.assertEqual(len(dts), 1)


    def test_launch_operation_HDD_with_space(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        output = adapter.get_output()
        output_type = output[0].__name__
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data))
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter,
                                                  tmp_folder, method_name=ABCAdapter.LAUNCH_METHOD, **data)
        dts = dao.get_values_of_datatype(self.test_project.id, Datatype2)[0]
        self.assertEqual(len(dts), 1)
        datatype = dao.get_datatype_by_id(dts[0][0])
        self.assertEqual(datatype.subject, DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored.")
        self.assertEqual(datatype.type, output_type, "Wrong data stored.")


    def test_launch_operation_HDD_with_space_started_ops(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        space_taken_by_started = 100
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        started_operation = model.Operation(self.test_user.id, self.test_project.id, group.id, "",
                                            status=model.STATUS_STARTED, estimated_disk_size=space_taken_by_started)
        dao.store_entity(started_operation)
        adapter = FlowService().build_adapter_instance(group)
        output = adapter.get_output()
        output_type = output[0].__name__
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data) + space_taken_by_started)
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.operation_service.initiate_operation(self.test_user, self.test_project.id, adapter,
                                                  tmp_folder, method_name=ABCAdapter.LAUNCH_METHOD, **data)
        dts = dao.get_values_of_datatype(self.test_project.id, Datatype2)[0]
        self.assertEqual(len(dts), 1)
        datatype = dao.get_datatype_by_id(dts[0][0])
        self.assertEqual(datatype.subject, DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored.")
        self.assertEqual(datatype.type, output_type, "Wrong data stored.")


    def test_launch_operation_HDD_full_space(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data) - 1)
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.assertRaises(NoMemoryAvailableException, self.operation_service.initiate_operation, self.test_user,
                          self.test_project.id, adapter,
                          tmp_folder, method_name=ABCAdapter.LAUNCH_METHOD, **data)
        dts = dao.get_values_of_datatype(self.test_project.id, Datatype2)[0]
        self.assertEqual(len(dts), 0)


    def test_launch_operation_HDD_full_space_started_ops(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        space_taken_by_started = 100
        module = "tvb.tests.framework.adapters.testadapter3"
        class_name = "TestAdapterHDDRequired"
        group = dao.find_group(module, class_name)
        started_operation = model.Operation(self.test_user.id, self.test_project.id, group.id, "",
                                            status=model.STATUS_STARTED, estimated_disk_size=space_taken_by_started)
        dao.store_entity(started_operation)
        adapter = FlowService().build_adapter_instance(group)
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(adapter.get_required_disk_size(**data) + space_taken_by_started - 1)
        tmp_folder = FilesHelper().get_project_folder(self.test_project, "TEMP")
        self.assertRaises(NoMemoryAvailableException, self.operation_service.initiate_operation, self.test_user,
                          self.test_project.id, adapter,
                          tmp_folder, method_name=ABCAdapter.LAUNCH_METHOD, **data)
        dts = dao.get_values_of_datatype(self.test_project.id, Datatype2)[0]
        self.assertEqual(len(dts), 0)


    def test_stop_operation(self):
        """
        Test that an operation is successfully stopped.
        """
        module = "tvb.tests.framework.adapters.testadapter2"
        class_name = "TestAdapter2"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        data = {"test": 5}
        algo_group = adapter.algorithm_group
        algo_category = dao.get_category_by_id(algo_group.fk_category)
        algo = dao.get_algorithm_by_group(algo_group.id)
        operations, _ = self.operation_service.prepare_operations(self.test_user.id, self.test_project.id, algo,
                                                                  algo_category, {}, ABCAdapter.LAUNCH_METHOD, **data)
        self.operation_service._send_to_cluster(operations, adapter)
        self.operation_service.stop_operation(operations[0].id)
        operation = dao.get_operation_by_id(operations[0].id)
        self.assertEqual(operation.status, model.STATUS_CANCELED, "Operation should have been canceled!")


    def test_stop_operation_finished(self):
        """
        Test that an operation that is already finished is not changed by the stop operation.
        """
        module = "tvb.tests.framework.adapters.testadapter1"
        class_name = "TestAdapter1"
        group = dao.find_group(module, class_name)
        adapter = FlowService().build_adapter_instance(group)
        data = {"test1_val1": 5, 'test1_val2': 5}
        algo_group = adapter.algorithm_group
        algo_category = dao.get_category_by_id(algo_group.fk_category)
        algo = dao.get_algorithm_by_group(algo_group.id)
        operations, _ = self.operation_service.prepare_operations(self.test_user.id, self.test_project.id, algo,
                                                                  algo_category, {}, ABCAdapter.LAUNCH_METHOD, **data)
        self.operation_service._send_to_cluster(operations, adapter)
        operation = dao.get_operation_by_id(operations[0].id)
        operation.status = model.STATUS_FINISHED
        dao.store_entity(operation)
        self.operation_service.stop_operation(operations[0].id)
        operation = dao.get_operation_by_id(operations[0].id)
        self.assertEqual(operation.status, model.STATUS_FINISHED, "Operation shouldn't have been canceled!")


    def test_array_from_string(self):
        """
        Simple test for parse array on 1d, 2d and 3d array.
        """
        row = {'description': 'test.',
               'default': 'None',
               'required': True,
               'label': 'test: ',
               'attributes': None,
               'quantifier': 'manual',
               'elementType': 'float',
               'type': 'array',
               'options': None,
               'name': 'test'}
        input_data_string = '[ [1 2 3] [4 5 6]]'
        output = string2array(input_data_string, ' ', row['elementType'])
        self.assertEqual(output.shape, (2, 3), "Dimensions not properly parsed")
        for i in output[0]:
            self.assertTrue(i in [1, 2, 3])
        for i in output[1]:
            self.assertTrue(i in [4, 5, 6])
        input_data_string = '[1, 2, 3, 4, 5, 6]'
        output = string2array(input_data_string, ',', row['elementType'])
        self.assertEqual(output.shape, (6,), "Dimensions not properly parsed")
        for i in output:
            self.assertTrue(i in [1, 2, 3, 4, 5, 6])
        input_data_string = '[ [ [1,1], [2, 2] ], [ [3 ,3], [4,4] ] ]'
        output = string2array(input_data_string, ',', row['elementType'])
        self.assertEqual(output.shape, (2, 2, 2), "Wrong dimensions.")
        for i in output[0][0]:
            self.assertTrue(i == 1)
        for i in output[0][1]:
            self.assertTrue(i == 2)
        for i in output[1][0]:
            self.assertTrue(i == 3)
        for i in output[1][1]:
            self.assertTrue(i == 4)
        row = {'description': 'test.',
               'default': 'None',
               'required': True,
               'label': 'test: ',
               'attributes': None,
               'quantifier': 'manual',
               'elementType': 'str',
               'type': 'array',
               'options': None,
               'name': 'test'}
        input_data_string = '[1, 2, 3, 4, 5, 6]'
        output = string2array(input_data_string, ',', row['elementType'])
        for i in output:
            self.assertTrue(i in [1, 2, 3, 4, 5, 6])


    def test_wrong_array_from_string(self):
        """Test that parsing an array from string is throwing the expected 
        exception when wrong input string"""
        row = {'description': 'test.',
               'default': 'None',
               'required': True,
               'label': 'test: ',
               'attributes': None,
               'quantifier': 'manual',
               'elementType': 'float',
               'type': 'array',
               'options': None,
               'name': 'test'}
        input_data_string = '[ [1,2 3] [4,5,6]]'
        self.assertRaises(ValueError, string2array, input_data_string, ',', row['elementType'])
        input_data_string = '[ [1,2,wrong], [4, 5, 6]]'
        self.assertRaises(ValueError, string2array, input_data_string, ',', row['elementType'])
        row = {'description': 'test.',
               'default': 'None',
               'required': True,
               'label': 'test: ',
               'attributes': None,
               'quantifier': 'manual',
               'elementType': 'str',
               'type': 'array',
               'options': None,
               'name': 'test'}
        output = string2array(input_data_string, ',', row['elementType'])
        self.assertEqual(output.shape, (2, 3))
        self.assertEqual(output[0][2], 'wrong', 'String data not converted properly')
        input_data_string = '[ [1,2 3] [4,5,6]]'
        output = string2array(input_data_string, ',', row['elementType'])
        self.assertEqual(output[0][1], '2 3')


    def test_reduce_dimension_component(self):
        """
         This method tests if the data passed to the launch method of
         the NDimensionArrayAdapter adapter is correct. The passed data should be a list
         of arrays with one dimension.
        """
        inserted_count = FlowService().get_available_datatypes(self.test_project.id,
                                                               "tvb.datatypes.arrays.MappedArray")[1]
        self.assertEqual(inserted_count, 0, "Expected to find no data.")
        #create an operation
        algorithm_id = FlowService().get_algorithm_by_module_and_class('tvb.tests.framework.adapters.ndimensionarrayadapter',
                                                                       'NDimensionArrayAdapter')[0].id
        operation = model.Operation(self.test_user.id, self.test_project.id, algorithm_id, 'test params',
                                    meta=json.dumps({DataTypeMetaData.KEY_STATE: "RAW_DATA"}),
                                    status=model.STATUS_FINISHED, method_name=ABCAdapter.LAUNCH_METHOD)
        operation = dao.store_entity(operation)
        #save the array wrapper in DB
        adapter_instance = NDimensionArrayAdapter()
        PARAMS = {}
        self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
        inserted_data = FlowService().get_available_datatypes(self.test_project.id,
                                                              "tvb.datatypes.arrays.MappedArray")[0]
        self.assertEqual(len(inserted_data), 1, "Problems when inserting data")
        gid = inserted_data[0][2]
        entity = dao.get_datatype_by_gid(gid)
        #from the 3D array do not select any array
        PARAMS = {"python_method": "reduce_dimension", "input_data": gid,
                  "input_data_dimensions_0": "requiredDim_1",
                  "input_data_dimensions_1": "",
                  "input_data_dimensions_2": ""}
        try:
            self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
            self.fail("Test should not pass. The resulted array should be a 1D array.")
        except Exception:
            # OK, do nothing; we were expecting to produce a 1D array
            pass
        #from the 3D array select only a 1D array
        first_dim = [gid + '_1_0', 'requiredDim_1']
        PARAMS = {"python_method": "reduce_dimension", "input_data": gid,
                  "input_data_dimensions_0": first_dim,
                  "input_data_dimensions_1": gid + "_2_1"}
        self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
        expected_result = entity.array_data[:, 0, 1]
        actual_result = adapter_instance.launch_param
        self.assertEqual(len(actual_result), len(expected_result), "Not the same size for results!")
        self.assertTrue(numpy.equal(actual_result, expected_result).all())

        #from the 3D array select a 2D array
        first_dim = [gid + '_1_0', gid + '_1_1', 'requiredDim_2']
        PARAMS = {"python_method": "reduce_dimension", "input_data": gid,
                  "input_data_dimensions_0": first_dim,
                  "input_data_dimensions_1": gid + "_2_1"}
        self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
        expected_result = entity.array_data[slice(0, None), [0, 1], 1]
        actual_result = adapter_instance.launch_param
        self.assertEqual(len(actual_result), len(expected_result), "Not the same size for results!")
        self.assertTrue(numpy.equal(actual_result, expected_result).all())

        #from 3D array select 1D array by applying SUM function on the first
        #dimension and average function on the second dimension
        PARAMS = {"python_method": "reduce_dimension", "input_data": gid,
                  "input_data_dimensions_0": ["requiredDim_1", "func_sum"],
                  "input_data_dimensions_1": "func_average",
                  "input_data_dimensions_2": ""}
        self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
        aux = numpy.sum(entity.array_data, axis=0)
        expected_result = numpy.average(aux, axis=0)
        actual_result = adapter_instance.launch_param
        self.assertEqual(len(actual_result), len(expected_result), "Not the same size of results!")
        self.assertTrue(numpy.equal(actual_result, expected_result).all())

        #from 3D array select a 2D array and apply op. on the second dimension
        PARAMS = {"python_method": "reduce_dimension", "input_data": gid,
                  "input_data_dimensions_0": ["requiredDim_2", "func_sum",
                                              "expected_shape_x,512", "operations_x,&gt;"],
                  "input_data_dimensions_1": "",
                  "input_data_dimensions_2": ""}
        try:
            self.operation_service.initiate_prelaunch(operation, adapter_instance, {}, **PARAMS)
            self.fail("Test should not pass! The second dimension of the array should be >512.")
        except Exception:
            # OK, do nothing;
            pass
示例#18
0
class TestOperationService(BaseTestCase):
    """
    Test class for the introspection module. Some tests from here do async launches. For those
    cases Transactional tests won't work.
    TODO: this is still to be refactored, for being huge, with duplicates and many irrelevant checks
    """
    def setup_method(self):
        """
        Reset the database before each test.
        """
        self.clean_database()
        initialize_storage()
        self.test_user = TestFactory.create_user()
        self.test_project = TestFactory.create_project(self.test_user)
        self.operation_service = OperationService()
        self.backup_hdd_size = TvbProfile.current.MAX_DISK_SPACE

    def teardown_method(self):
        """
        Reset the database when test is done.
        """
        TvbProfile.current.MAX_DISK_SPACE = self.backup_hdd_size
        self.clean_database()

    def _assert_no_dt2(self):
        count = dao.count_datatypes(self.test_project.id, DummyDataTypeIndex)
        assert 0 == count

    def _assert_stored_dt2(self, expected_cnt=1):
        count = dao.count_datatypes(self.test_project.id, Datatype2)
        assert expected_cnt == count
        datatype = dao.try_load_last_entity_of_type(self.test_project.id,
                                                    Datatype2)
        assert datatype.subject == DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored."
        return datatype

    def test_datatypes_groups(self, test_adapter_factory):
        """
        Tests if the dataType group is set correct on the dataTypes resulted from the same operation group.
        """

        all_operations = dao.get_filtered_operations(self.test_project.id,
                                                     None)
        assert len(all_operations) == 0, "There should be no operation"

        test_adapter_factory(TestAdapter3)
        algo = dao.get_algorithm_by_module(
            'tvb.tests.framework.adapters.testadapter3', 'TestAdapter3')
        adapter_instance = ABCAdapter.build_adapter(algo)
        data = {model_burst.RANGE_PARAMETER_1: 'param_5', 'param_5': [1, 2]}
        ## Create Group of operations
        FlowService().fire_operation(adapter_instance, self.test_user,
                                     self.test_project.id, **data)

        all_operations = dao.get_filtered_operations(self.test_project.id,
                                                     None)
        assert len(all_operations) == 1, "Expected one operation group"
        assert all_operations[0][2] == 2, "Expected 2 operations in group"

        operation_group_id = all_operations[0][3]
        assert operation_group_id != None, "The operation should be part of a group."

        self.operation_service.stop_operation(all_operations[0][0])
        self.operation_service.stop_operation(all_operations[0][1])
        ## Make sure operations are executed
        self.operation_service.launch_operation(all_operations[0][0], False)
        self.operation_service.launch_operation(all_operations[0][1], False)

        resulted_datatypes = dao.get_datatype_in_group(
            operation_group_id=operation_group_id)
        assert len(
            resulted_datatypes) >= 2, "Expected at least 2, but: " + str(
                len(resulted_datatypes))

        dt = dao.get_datatype_by_id(resulted_datatypes[0].id)
        datatype_group = dao.get_datatypegroup_by_op_group_id(
            operation_group_id)
        assert dt.fk_datatype_group == datatype_group.id, "DataTypeGroup is incorrect"

    def test_initiate_operation(self, test_adapter_factory):
        """
        Test the actual operation flow by executing a test adapter.
        """
        module = "tvb.tests.framework.adapters.testadapter1"
        class_name = "TestAdapter1"
        test_adapter_factory()
        adapter = TestFactory.create_adapter(module, class_name)
        output = adapter.get_output()
        output_type = output[0].__name__
        data = {"test1_val1": 5, "test1_val2": 5}
        tmp_folder = FilesHelper().get_project_folder(self.test_project,
                                                      "TEMP")
        self.operation_service.initiate_operation(self.test_user,
                                                  self.test_project, adapter,
                                                  tmp_folder, **data)

        group = dao.get_algorithm_by_module(module, class_name)
        assert group.module == 'tvb.tests.framework.adapters.testadapter1', "Wrong data stored."
        assert group.classname == 'TestAdapter1', "Wrong data stored."
        dts, count = dao.get_values_of_datatype(self.test_project.id,
                                                DummyDataTypeIndex)
        assert count == 1
        assert len(dts) == 1
        datatype = dao.get_datatype_by_id(dts[0][0])
        assert datatype.subject == DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored."
        assert datatype.type == output_type, "Wrong data stored."

    def test_delete_dt_free_hdd_space(self, test_adapter_factory,
                                      operation_factory):
        """
        Launch two operations and give enough available space for user so that both should finish.
        """
        test_adapter_factory(adapter_class=TestAdapterHDDRequired)
        adapter = TestFactory.create_adapter(
            "tvb.tests.framework.adapters.testadapter3",
            "TestAdapterHDDRequired")
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(
            adapter.get_required_disk_size(**data))
        tmp_folder = FilesHelper().get_project_folder(self.test_project,
                                                      "TEMP")

        self._assert_no_dt2()
        self.operation_service.initiate_operation(self.test_user,
                                                  self.test_project, adapter,
                                                  tmp_folder, **data)
        datatype = self._assert_stored_dt2()

        # Now free some space and relaunch
        ProjectService().remove_datatype(self.test_project.id, datatype.gid)
        self._assert_no_dt2()
        self.operation_service.initiate_operation(self.test_user,
                                                  self.test_project, adapter,
                                                  tmp_folder, **data)
        self._assert_stored_dt2()

    def test_launch_two_ops_hdd_with_space(self):
        """
        Launch two operations and give enough available space for user so that both should finish.
        """
        adapter = TestFactory.create_adapter(
            "tvb.tests.framework.adapters.testadapter3",
            "TestAdapterHDDRequired")
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = 2 * float(
            adapter.get_required_disk_size(**data))
        tmp_folder = FilesHelper().get_project_folder(self.test_project,
                                                      "TEMP")

        self.operation_service.initiate_operation(self.test_user,
                                                  self.test_project, adapter,
                                                  tmp_folder, **data)
        datatype = self._assert_stored_dt2()

        #Now update the maximum disk size to be the size of the previously resulted datatypes (transform from kB to MB)
        #plus what is estimated to be required from the next one (transform from B to MB)
        TvbProfile.current.MAX_DISK_SPACE = float(datatype.disk_size) + float(
            adapter.get_required_disk_size(**data))

        self.operation_service.initiate_operation(self.test_user,
                                                  self.test_project, adapter,
                                                  tmp_folder, **data)
        self._assert_stored_dt2(2)

    def test_launch_two_ops_hdd_full_space(self):
        """
        Launch two operations and give available space for user so that the first should finish,
        but after the update to the user hdd size the second should not.
        """
        adapter = TestFactory.create_adapter(
            "tvb.tests.framework.adapters.testadapter3",
            "TestAdapterHDDRequired")
        data = {"test": 100}

        TvbProfile.current.MAX_DISK_SPACE = (
            1 + float(adapter.get_required_disk_size(**data)))
        tmp_folder = FilesHelper().get_project_folder(self.test_project,
                                                      "TEMP")
        self.operation_service.initiate_operation(self.test_user,
                                                  self.test_project, adapter,
                                                  tmp_folder, **data)

        datatype = self._assert_stored_dt2()
        #Now update the maximum disk size to be less than size of the previously resulted datatypes (transform kB to MB)
        #plus what is estimated to be required from the next one (transform from B to MB)
        TvbProfile.current.MAX_DISK_SPACE = float(datatype.disk_size - 1) + \
                                            float(adapter.get_required_disk_size(**data) - 1)

        with pytest.raises(NoMemoryAvailableException):
            self.operation_service.initiate_operation(self.test_user,
                                                      self.test_project,
                                                      adapter, tmp_folder,
                                                      **data)
        self._assert_stored_dt2()

    def test_launch_operation_hdd_with_space(self):
        """
        Test the actual operation flow by executing a test adapter.
        """
        adapter = TestFactory.create_adapter(
            "tvb.tests.framework.adapters.testadapter3",
            "TestAdapterHDDRequired")
        data = {"test": 100}

        TvbProfile.current.MAX_DISK_SPACE = float(
            adapter.get_required_disk_size(**data))
        tmp_folder = FilesHelper().get_project_folder(self.test_project,
                                                      "TEMP")
        self.operation_service.initiate_operation(self.test_user,
                                                  self.test_project, adapter,
                                                  tmp_folder, **data)
        self._assert_stored_dt2()

    def test_launch_operation_hdd_with_space_started_ops(
            self, test_adapter_factory):
        """
        Test the actual operation flow by executing a test adapter.
        """
        test_adapter_factory(adapter_class=TestAdapterHDDRequired)

        space_taken_by_started = 100
        adapter = TestFactory.create_adapter(
            "tvb.tests.framework.adapters.testadapter3",
            "TestAdapterHDDRequired")
        form = TestAdapterHDDRequiredForm()
        form.fill_from_post({'_test': "100"})
        adapter.submit_form(form)
        started_operation = model_operation.Operation(
            self.test_user.id,
            self.test_project.id,
            adapter.stored_adapter.id,
            "",
            status=model_operation.STATUS_STARTED,
            estimated_disk_size=space_taken_by_started)
        dao.store_entity(started_operation)
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(
            adapter.get_required_disk_size(**data) + space_taken_by_started)
        tmp_folder = FilesHelper().get_project_folder(self.test_project,
                                                      "TEMP")
        self.operation_service.initiate_operation(self.test_user,
                                                  self.test_project, adapter,
                                                  tmp_folder, **data)
        self._assert_stored_dt2()

    def test_launch_operation_hdd_full_space(self, test_adapter_factory):
        """
        Test the actual operation flow by executing a test adapter.
        """
        test_adapter_factory(adapter_class=TestAdapterHDDRequired)

        adapter = TestFactory.create_adapter(
            "tvb.tests.framework.adapters.testadapter3",
            "TestAdapterHDDRequired")
        form = TestAdapterHDDRequiredForm()
        adapter.submit_form(form)
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(
            adapter.get_required_disk_size(**data) - 1)
        tmp_folder = FilesHelper().get_project_folder(self.test_project,
                                                      "TEMP")
        with pytest.raises(NoMemoryAvailableException):
            self.operation_service.initiate_operation(self.test_user,
                                                      self.test_project,
                                                      adapter, tmp_folder,
                                                      **data)
        self._assert_no_dt2()

    def test_launch_operation_hdd_full_space_started_ops(
            self, test_adapter_factory):
        """
        Test the actual operation flow by executing a test adapter.
        """
        test_adapter_factory(adapter_class=TestAdapterHDDRequired)

        space_taken_by_started = 100
        adapter = TestFactory.create_adapter(
            "tvb.tests.framework.adapters.testadapter3",
            "TestAdapterHDDRequired")
        form = TestAdapterHDDRequiredForm()
        adapter.submit_form(form)
        started_operation = model_operation.Operation(
            self.test_user.id,
            self.test_project.id,
            adapter.stored_adapter.id,
            "",
            status=model_operation.STATUS_STARTED,
            estimated_disk_size=space_taken_by_started)
        dao.store_entity(started_operation)
        data = {"test": 100}
        TvbProfile.current.MAX_DISK_SPACE = float(
            adapter.get_required_disk_size(**data) + space_taken_by_started -
            1)
        tmp_folder = FilesHelper().get_project_folder(self.test_project,
                                                      "TEMP")
        with pytest.raises(NoMemoryAvailableException):
            self.operation_service.initiate_operation(self.test_user,
                                                      self.test_project,
                                                      adapter, tmp_folder,
                                                      **data)
        self._assert_no_dt2()

    def test_stop_operation(self, test_adapter_factory):
        """
        Test that an operation is successfully stopped.
        """
        test_adapter_factory(adapter_class=TestAdapter2)
        adapter = TestFactory.create_adapter(
            "tvb.tests.framework.adapters.testadapter2", "TestAdapter2")
        data = {"test": 5}
        algo = adapter.stored_adapter
        algo_category = dao.get_category_by_id(algo.fk_category)
        operations, _ = self.operation_service.prepare_operations(
            self.test_user.id, self.test_project, algo, algo_category, {},
            **data)
        self.operation_service._send_to_cluster(operations, adapter)
        self.operation_service.stop_operation(operations[0].id)
        operation = dao.get_operation_by_id(operations[0].id)
        assert operation.status, model_operation.STATUS_CANCELED == "Operation should have been canceled!"

    def test_stop_operation_finished(self, test_adapter_factory):
        """
        Test that an operation that is already finished is not changed by the stop operation.
        """
        test_adapter_factory()
        adapter = TestFactory.create_adapter(
            "tvb.tests.framework.adapters.testadapter1", "TestAdapter1")
        data = {"test1_val1": 5, 'test1_val2': 5}
        algo = adapter.stored_adapter
        algo_category = dao.get_category_by_id(algo.fk_category)
        operations, _ = self.operation_service.prepare_operations(
            self.test_user.id, self.test_project, algo, algo_category, {},
            **data)
        self.operation_service._send_to_cluster(operations, adapter)
        operation = dao.get_operation_by_id(operations[0].id)
        operation.status = model_operation.STATUS_FINISHED
        dao.store_entity(operation)
        self.operation_service.stop_operation(operations[0].id)
        operation = dao.get_operation_by_id(operations[0].id)
        assert operation.status, model_operation.STATUS_FINISHED == "Operation shouldn't have been canceled!"

    def test_array_from_string(self):
        """
        Simple test for parse array on 1d, 2d and 3d array.
        """
        row = {
            'description': 'test.',
            'default': 'None',
            'required': True,
            'label': 'test: ',
            'attributes': None,
            'elementType': 'float',
            'type': 'array',
            'options': None,
            'name': 'test'
        }
        input_data_string = '[ [1 2 3] [4 5 6]]'
        output = string2array(input_data_string, ' ', row['elementType'])
        assert output.shape, (2, 3) == "Dimensions not properly parsed"
        for i in output[0]:
            assert i in [1, 2, 3]
        for i in output[1]:
            assert i in [4, 5, 6]
        input_data_string = '[1, 2, 3, 4, 5, 6]'
        output = string2array(input_data_string, ',', row['elementType'])
        assert output.shape == (6, ), "Dimensions not properly parsed"
        for i in output:
            assert i in [1, 2, 3, 4, 5, 6]
        input_data_string = '[ [ [1,1], [2, 2] ], [ [3 ,3], [4,4] ] ]'
        output = string2array(input_data_string, ',', row['elementType'])
        assert output.shape == (2, 2, 2), "Wrong dimensions."
        for i in output[0][0]:
            assert i == 1
        for i in output[0][1]:
            assert i == 2
        for i in output[1][0]:
            assert i == 3
        for i in output[1][1]:
            assert i == 4
        row = {
            'description': 'test.',
            'default': 'None',
            'required': True,
            'label': 'test: ',
            'attributes': None,
            'elementType': 'str',
            'type': 'array',
            'options': None,
            'name': 'test'
        }
        input_data_string = '[1, 2, 3, 4, 5, 6]'
        output = string2array(input_data_string, ',', row['elementType'])
        for i in output:
            assert i in [1, 2, 3, 4, 5, 6]

    def test_wrong_array_from_string(self):
        """Test that parsing an array from string is throwing the expected 
        exception when wrong input string"""
        row = {
            'description': 'test.',
            'default': 'None',
            'required': True,
            'label': 'test: ',
            'attributes': None,
            'elementType': 'float',
            'type': 'array',
            'options': None,
            'name': 'test'
        }
        input_data_string = '[ [1,2 3] [4,5,6]]'
        with pytest.raises(ValueError):
            string2array(input_data_string, ',', row['elementType'])
        input_data_string = '[ [1,2,wrong], [4, 5, 6]]'
        with pytest.raises(ValueError):
            string2array(input_data_string, ',', row['elementType'])
        row = {
            'description': 'test.',
            'default': 'None',
            'required': True,
            'label': 'test: ',
            'attributes': None,
            'elementType': 'str',
            'type': 'array',
            'options': None,
            'name': 'test'
        }
        output = string2array(input_data_string, ',', row['elementType'])
        assert output.shape == (2, 3)
        assert output[0][2] == 'wrong', 'String data not converted properly'
        input_data_string = '[ [1,2 3] [4,5,6]]'
        output = string2array(input_data_string, ',', row['elementType'])
        assert output[0][1] == '2 3'

    def test_reduce_dimension_component(self):
        """
         This method tests if the data passed to the launch method of
         the NDimensionArrayAdapter adapter is correct. The passed data should be a list
         of arrays with one dimension.
        """
        inserted_count = FlowService().get_available_datatypes(
            self.test_project.id, "tvb.datatypes.arrays.MappedArray")[1]
        assert inserted_count == 0, "Expected to find no data."
        #create an operation
        algorithm_id = FlowService().get_algorithm_by_module_and_class(
            'tvb.tests.framework.adapters.ndimensionarrayadapter',
            'NDimensionArrayAdapter').id
        operation = model_operation.Operation(
            self.test_user.id,
            self.test_project.id,
            algorithm_id,
            'test params',
            meta=json.dumps({DataTypeMetaData.KEY_STATE: "RAW_DATA"}),
            status=model_operation.STATUS_FINISHED)
        operation = dao.store_entity(operation)
        #save the array wrapper in DB
        adapter_instance = NDimensionArrayAdapter()
        PARAMS = {}
        self.operation_service.initiate_prelaunch(operation, adapter_instance,
                                                  {}, **PARAMS)
        inserted_data = FlowService().get_available_datatypes(
            self.test_project.id, "tvb.datatypes.arrays.MappedArray")[0]
        assert len(inserted_data) == 1, "Problems when inserting data"
        gid = inserted_data[0][2]
        entity = dao.get_datatype_by_gid(gid)
        #from the 3D array do not select any array
        PARAMS = {
            "python_method": "reduce_dimension",
            "input_data": gid,
            "input_data_dimensions_0": "requiredDim_1",
            "input_data_dimensions_1": "",
            "input_data_dimensions_2": ""
        }
        try:
            self.operation_service.initiate_prelaunch(operation,
                                                      adapter_instance, {},
                                                      **PARAMS)
            raise AssertionError(
                "Test should not pass. The resulted array should be a 1D array."
            )
        except Exception:
            # OK, do nothing; we were expecting to produce a 1D array
            pass
        #from the 3D array select only a 1D array
        first_dim = [gid + '_1_0', 'requiredDim_1']
        PARAMS = {
            "python_method": "reduce_dimension",
            "input_data": gid,
            "input_data_dimensions_0": first_dim,
            "input_data_dimensions_1": gid + "_2_1"
        }
        self.operation_service.initiate_prelaunch(operation, adapter_instance,
                                                  {}, **PARAMS)
        expected_result = entity.array_data[:, 0, 1]
        actual_result = adapter_instance.launch_param
        assert len(actual_result) == len(
            expected_result), "Not the same size for results!"
        assert numpy.equal(actual_result, expected_result).all()

        #from the 3D array select a 2D array
        first_dim = [gid + '_1_0', gid + '_1_1', 'requiredDim_2']
        PARAMS = {
            "python_method": "reduce_dimension",
            "input_data": gid,
            "input_data_dimensions_0": first_dim,
            "input_data_dimensions_1": gid + "_2_1"
        }
        self.operation_service.initiate_prelaunch(operation, adapter_instance,
                                                  {}, **PARAMS)
        expected_result = entity.array_data[slice(0, None), [0, 1], 1]
        actual_result = adapter_instance.launch_param
        assert len(actual_result) == len(
            expected_result), "Not the same size for results!"
        assert numpy.equal(actual_result, expected_result).all()

        #from 3D array select 1D array by applying SUM function on the first
        #dimension and average function on the second dimension
        PARAMS = {
            "python_method": "reduce_dimension",
            "input_data": gid,
            "input_data_dimensions_0": ["requiredDim_1", "func_sum"],
            "input_data_dimensions_1": "func_average",
            "input_data_dimensions_2": ""
        }
        self.operation_service.initiate_prelaunch(operation, adapter_instance,
                                                  {}, **PARAMS)
        aux = numpy.sum(entity.array_data, axis=0)
        expected_result = numpy.average(aux, axis=0)
        actual_result = adapter_instance.launch_param
        assert len(actual_result) == len(
            expected_result), "Not the same size of results!"
        assert numpy.equal(actual_result, expected_result).all()

        #from 3D array select a 2D array and apply op. on the second dimension
        PARAMS = {
            "python_method":
            "reduce_dimension",
            "input_data":
            gid,
            "input_data_dimensions_0": [
                "requiredDim_2", "func_sum", "expected_shape_x,512",
                "operations_x,&gt;"
            ],
            "input_data_dimensions_1":
            "",
            "input_data_dimensions_2":
            ""
        }
        try:
            self.operation_service.initiate_prelaunch(operation,
                                                      adapter_instance, {},
                                                      **PARAMS)
            raise AssertionError(
                "Test should not pass! The second dimension of the array should be >512."
            )
        except Exception:
            # OK, do nothing;
            pass