class LaunchOperationResource(RestResource): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.logger = get_logger(self.__class__.__module__) self.operation_service = OperationService() self.project_service = ProjectService() self.user_service = UserService() self.files_helper = FilesHelper() @check_permission(ProjectAccessPermission, 'project_gid') def post(self, project_gid, algorithm_module, algorithm_classname): """ :generic method of launching Analyzers """ model_file = self.extract_file_from_request(request_file_key=RequestFileKey.LAUNCH_ANALYZERS_MODEL_FILE.value) destination_folder = RestResource.get_destination_folder() h5_path = RestResource.save_temporary_file(model_file, destination_folder) try: project = self.project_service.find_project_lazy_by_gid(project_gid) except ProjectServiceException: raise InvalidIdentifierException(INVALID_PROJECT_GID_MESSAGE % project_gid) algorithm = FlowService.get_algorithm_by_module_and_class(algorithm_module, algorithm_classname) if algorithm is None: raise InvalidIdentifierException('No algorithm found for: %s.%s' % (algorithm_module, algorithm_classname)) try: adapter_instance = ABCAdapter.build_adapter(algorithm) view_model = adapter_instance.get_view_model_class()() view_model_h5 = ViewModelH5(h5_path, view_model) view_model_gid = view_model_h5.gid.load() current_user = get_current_user() operation = self.operation_service.prepare_operation(current_user.id, project.id, algorithm.id, algorithm.algorithm_category, view_model_gid.hex, None, {}) storage_path = self.files_helper.get_project_folder(project, str(operation.id)) if isinstance(adapter_instance, ABCUploader): for key, value in adapter_instance.get_form_class().get_upload_information().items(): data_file = self.extract_file_from_request(request_file_key=key, file_extension=value) data_file_path = RestResource.save_temporary_file(data_file, destination_folder) file_name = os.path.basename(data_file_path) upload_field = getattr(view_model_h5, key) upload_field.store(os.path.join(storage_path, file_name)) shutil.move(data_file_path, storage_path) shutil.move(h5_path, storage_path) os.rmdir(destination_folder) view_model_h5.close() OperationService().launch_operation(operation.id, True) except Exception as excep: self.logger.error(excep, exc_info=True) raise ServiceException(str(excep)) return operation.gid, HTTP_STATUS_CREATED
def launch_synchronously(test_user_id, test_project, adapter_instance, view_model): # Avoid the scheduled execution, as this is asynch, thus launch it immediately service = OperationService() algorithm = adapter_instance.stored_adapter operation = service.prepare_operation(test_user_id, test_project, algorithm, True, view_model) service.initiate_prelaunch(operation, adapter_instance) operation = dao.get_operation_by_id(operation.id) # Check that operation status after execution is success. assert STATUS_FINISHED == operation.status # Make sure at least one result exists for each BCT algorithm return dao.get_generic_entity(DataType, operation.id, 'fk_from_operation')
class SimulatorService(object): def __init__(self): self.logger = get_logger(self.__class__.__module__) self.burst_service = BurstService() self.operation_service = OperationService() self.files_helper = FilesHelper() def _reset_model(self, session_stored_simulator): session_stored_simulator.model = type(session_stored_simulator.model)() vi_indexes = MonitorForm.determine_indexes_for_chosen_vars_of_interest( session_stored_simulator) vi_indexes = numpy.array(list(vi_indexes.values())) for monitor in session_stored_simulator.monitors: monitor.variables_of_interest = vi_indexes def reset_at_connectivity_change(self, is_simulator_copy, form, session_stored_simulator): """ In case the user copies a simulation and changes the Connectivity, we want to reset the Model and Noise parameters because they might not fit to the new Connectivity's nr of regions. """ if is_simulator_copy and form.connectivity.value != session_stored_simulator.connectivity: self._reset_model(session_stored_simulator) if issubclass(type(session_stored_simulator.integrator), IntegratorStochastic): session_stored_simulator.integrator.noise = type( session_stored_simulator.integrator.noise)() def reset_at_surface_change(self, is_simulator_copy, form, session_stored_simulator): """ In case the user copies a surface-simulation and changes the Surface, we want to reset the Model parameters because they might not fit to the new Surface's nr of vertices. """ if is_simulator_copy and ( session_stored_simulator.surface is None and form.surface.value or session_stored_simulator.surface and form.surface.value != session_stored_simulator.surface.surface_gid): self._reset_model(session_stored_simulator) @staticmethod def _set_simulator_range_parameter(simulator, range_parameter_name, range_parameter_value): range_param_name_list = range_parameter_name.split('.') current_attr = simulator for param_name in range_param_name_list[:len(range_param_name_list) - 1]: current_attr = getattr(current_attr, param_name) setattr(current_attr, range_param_name_list[-1], range_parameter_value) def async_launch_and_prepare_simulation(self, burst_config, user, project, simulator_algo, session_stored_simulator): try: operation = self.operation_service.prepare_operation( user.id, project.id, simulator_algo, session_stored_simulator.gid) ga = self.operation_service._prepare_metadata( simulator_algo.algorithm_category, {}, None, burst_config.gid) session_stored_simulator.generic_attributes = ga storage_path = self.files_helper.get_project_folder( project, str(operation.id)) h5.store_view_model(session_stored_simulator, storage_path) burst_config = self.burst_service.update_simulation_fields( burst_config.id, operation.id, session_stored_simulator.gid) self.burst_service.store_burst_configuration( burst_config, storage_path) wf_errs = 0 try: OperationService().launch_operation(operation.id, True) return operation except Exception as excep: self.logger.error(excep) wf_errs += 1 if burst_config: self.burst_service.mark_burst_finished( burst_config, error_message=str(excep)) self.logger.debug( "Finished launching workflow. The operation was launched successfully, " + str(wf_errs) + " had error on pre-launch steps") except Exception as excep: self.logger.error(excep) if burst_config: self.burst_service.mark_burst_finished( burst_config, error_message=str(excep)) def prepare_simulation_on_server(self, user_id, project, algorithm, zip_folder_path, simulator_file): simulator_vm = h5.load_view_model_from_file(simulator_file) operation = self.operation_service.prepare_operation( user_id, project.id, algorithm, simulator_vm.gid) storage_operation_path = self.files_helper.get_project_folder( project, str(operation.id)) self.async_launch_simulation_on_server(operation, zip_folder_path, storage_operation_path) return operation def async_launch_simulation_on_server(self, operation, zip_folder_path, storage_operation_path): try: for file in os.listdir(zip_folder_path): shutil.move(os.path.join(zip_folder_path, file), storage_operation_path) try: OperationService().launch_operation(operation.id, True) shutil.rmtree(zip_folder_path) return operation except Exception as excep: self.logger.error(excep) except Exception as excep: self.logger.error(excep) @staticmethod def _set_range_param_in_dict(param_value): if type(param_value) is numpy.ndarray: return param_value[0] elif isinstance(param_value, uuid.UUID): return param_value.hex else: return param_value def async_launch_and_prepare_pse(self, burst_config, user, project, simulator_algo, range_param1, range_param2, session_stored_simulator): try: algo_category = simulator_algo.algorithm_category operation_group = burst_config.operation_group metric_operation_group = burst_config.metric_operation_group operations = [] range_param2_values = [None] if range_param2: range_param2_values = range_param2.get_range_values() first_simulator = None ga = self.operation_service._prepare_metadata( simulator_algo.algorithm_category, {}, operation_group, burst_config.gid) session_stored_simulator.generic_attributes = ga for param1_value in range_param1.get_range_values(): for param2_value in range_param2_values: # Copy, but generate a new GUID for every Simulator in PSE simulator = copy.deepcopy(session_stored_simulator) simulator.gid = uuid.uuid4() self._set_simulator_range_parameter( simulator, range_param1.name, param1_value) ranges = { range_param1.name: self._set_range_param_in_dict(param1_value) } if param2_value is not None: self._set_simulator_range_parameter( simulator, range_param2.name, param2_value) ranges[ range_param2.name] = self._set_range_param_in_dict( param2_value) ranges = json.dumps(ranges) operation = self.operation_service.prepare_operation( user.id, project.id, simulator_algo, simulator.gid, operation_group, ranges) storage_path = self.files_helper.get_project_folder( project, str(operation.id)) h5.store_view_model(simulator, storage_path) operations.append(operation) if first_simulator is None: first_simulator = simulator first_operation = operations[0] storage_path = self.files_helper.get_project_folder( project, str(first_operation.id)) burst_config = self.burst_service.update_simulation_fields( burst_config.id, first_operation.id, first_simulator.gid) self.burst_service.store_burst_configuration( burst_config, storage_path) datatype_group = DataTypeGroup( operation_group, operation_id=first_operation.id, fk_parent_burst=burst_config.gid, state=algo_category.defaultdatastate) dao.store_entity(datatype_group) metrics_datatype_group = DataTypeGroup( metric_operation_group, fk_parent_burst=burst_config.gid) dao.store_entity(metrics_datatype_group) wf_errs = 0 for operation in operations: try: OperationService().launch_operation(operation.id, True) except Exception as excep: self.logger.error(excep) wf_errs += 1 self.burst_service.mark_burst_finished( burst_config, error_message=str(excep)) self.logger.debug("Finished launching workflows. " + str(len(operations) - wf_errs) + " were launched successfully, " + str(wf_errs) + " had error on pre-launch steps") return first_operation except Exception as excep: self.logger.error(excep) self.burst_service.mark_burst_finished(burst_config, error_message=str(excep)) def load_from_zip(self, zip_file, project): import_service = ImportService() simulator_folder = import_service.import_simulator_configuration_zip( zip_file) simulator_h5_filename = DirLoader( simulator_folder, None).find_file_for_has_traits_type(SimulatorAdapterModel) simulator_h5_filepath = os.path.join(simulator_folder, simulator_h5_filename) simulator = h5.load_view_model_from_file(simulator_h5_filepath) burst_config = self.burst_service.load_burst_configuration_from_folder( simulator_folder, project) return simulator, burst_config
class SimulatorService(object): def __init__(self): self.logger = get_logger(self.__class__.__module__) self.burst_service = BurstService() self.operation_service = OperationService() self.algorithm_service = AlgorithmService() self.storage_interface = StorageInterface() @staticmethod def _reset_model(session_stored_simulator): session_stored_simulator.model = type(session_stored_simulator.model)() vi_indexes = session_stored_simulator.determine_indexes_for_chosen_vars_of_interest( ) vi_indexes = numpy.array(list(vi_indexes.values())) for monitor in session_stored_simulator.monitors: monitor.variables_of_interest = vi_indexes def reset_at_connectivity_change(self, is_simulator_copy, form, session_stored_simulator): """ In case the user copies a simulation and changes the Connectivity, we want to reset the Model and Noise parameters because they might not fit to the new Connectivity's nr of regions. """ if is_simulator_copy and form.connectivity.value != session_stored_simulator.connectivity: self._reset_model(session_stored_simulator) if issubclass(type(session_stored_simulator.integrator), IntegratorStochastic): session_stored_simulator.integrator.noise = type( session_stored_simulator.integrator.noise)() def reset_at_surface_change(self, is_simulator_copy, form, session_stored_simulator): """ In case the user copies a surface-simulation and changes the Surface, we want to reset the Model parameters because they might not fit to the new Surface's nr of vertices. """ if is_simulator_copy and ( session_stored_simulator.surface is None and form.surface.value or session_stored_simulator.surface and form.surface.value != session_stored_simulator.surface.surface_gid): self._reset_model(session_stored_simulator) @staticmethod def _set_simulator_range_parameter(simulator, range_parameter_name, range_parameter_value): range_param_name_list = range_parameter_name.split('.') current_attr = simulator for param_name in range_param_name_list[:len(range_param_name_list) - 1]: current_attr = getattr(current_attr, param_name) setattr(current_attr, range_param_name_list[-1], range_parameter_value) def async_launch_and_prepare_simulation(self, burst_config, user, project, simulator_algo, simulator): try: operation = self.operation_service.prepare_operation( user.id, project, simulator_algo, view_model=simulator, burst_gid=burst_config.gid, op_group_id=burst_config.fk_operation_group) burst_config = self.burst_service.update_simulation_fields( burst_config, operation.id, simulator.gid) storage_path = self.storage_interface.get_project_folder( project.name, str(operation.id)) self.burst_service.store_burst_configuration( burst_config, storage_path) wf_errs = 0 try: OperationService().launch_operation(operation.id, True) return operation except Exception as excep: self.logger.error(excep) wf_errs += 1 if burst_config: self.burst_service.mark_burst_finished( burst_config, error_message=str(excep)) self.logger.debug( "Finished launching workflow. The operation was launched successfully, " + str(wf_errs) + " had error on pre-launch steps") except Exception as excep: self.logger.error(excep) if burst_config: self.burst_service.mark_burst_finished( burst_config, error_message=str(excep)) def prepare_simulation_on_server(self, user_id, project, algorithm, zip_folder_path, simulator_file): simulator_vm = h5.load_view_model_from_file(simulator_file) operation = self.operation_service.prepare_operation( user_id, project, algorithm, view_model=simulator_vm) self.async_launch_simulation_on_server(operation, zip_folder_path) return operation def async_launch_simulation_on_server(self, operation, zip_folder_path): try: OperationService().launch_operation(operation.id, True) return operation except Exception as excep: self.logger.error(excep) finally: shutil.rmtree(zip_folder_path) @staticmethod def _set_range_param_in_dict(param_value): if type(param_value) is numpy.ndarray: return param_value[0] elif isinstance(param_value, uuid.UUID): return param_value.hex else: return param_value def async_launch_and_prepare_pse(self, burst_config, user, project, simulator_algo, range_param1, range_param2, session_stored_simulator): try: algo_category = simulator_algo.algorithm_category operation_group = burst_config.operation_group metric_operation_group = burst_config.metric_operation_group range_param2_values = [None] if range_param2: range_param2_values = range_param2.get_range_values() GROUP_BURST_PENDING[burst_config.id] = True operations, pse_canceled = self._prepare_operations( algo_category, burst_config, metric_operation_group, operation_group, project, range_param1, range_param2, range_param2_values, session_stored_simulator, simulator_algo, user) GROUP_BURST_PENDING[burst_config.id] = False if pse_canceled: return wf_errs = self._launch_operations(operations, burst_config) self.logger.debug("Finished launching workflows. " + str(len(operations) - wf_errs) + " were launched successfully, " + str(wf_errs) + " had error on pre-launch steps") return operations[0] if len(operations) > 0 else None except Exception as excep: self.logger.error(excep) self.burst_service.mark_burst_finished(burst_config, error_message=str(excep)) def _launch_operations(self, operations, burst_config): wf_errs = 0 for operation in operations: try: burst_config = dao.get_burst_by_id(burst_config.id) if burst_config is None or burst_config.status in [ BurstConfiguration.BURST_CANCELED, BurstConfiguration.BURST_ERROR ]: self.logger.debug( "Preparing operations cannot continue. Burst config {}" .format(burst_config)) return OperationService().launch_operation(operation.id, True) except Exception as excep: self.logger.error(excep) wf_errs += 1 self.burst_service.mark_burst_finished( burst_config, error_message=str(excep)) return wf_errs def _prepare_operations(self, algo_category, burst_config, metric_operation_group, operation_group, project, range_param1, range_param2, range_param2_values, session_stored_simulator, simulator_algo, user): first_simulator = None pse_canceled = False operations = [] for param1_value in range_param1.get_range_values(): for param2_value in range_param2_values: burst_config = dao.get_burst_by_id(burst_config.id) if burst_config is None: self.logger.debug("Burst config was deleted") pse_canceled = True break if burst_config.status in [ BurstConfiguration.BURST_CANCELED, BurstConfiguration.BURST_ERROR ]: self.logger.debug( "Current burst status is {}. Preparing operations cannot continue." .format(burst_config.status)) pse_canceled = True break # Copy, but generate a new GUID for every Simulator in PSE simulator = copy.deepcopy(session_stored_simulator) simulator.gid = uuid.uuid4() self._set_simulator_range_parameter(simulator, range_param1.name, param1_value) ranges = { range_param1.name: self._set_range_param_in_dict(param1_value) } if param2_value is not None: self._set_simulator_range_parameter( simulator, range_param2.name, param2_value) ranges[range_param2.name] = self._set_range_param_in_dict( param2_value) ranges = json.dumps(ranges) operation = self.operation_service.prepare_operation( user.id, project, simulator_algo, view_model=simulator, ranges=ranges, burst_gid=burst_config.gid, op_group_id=burst_config.fk_operation_group) simulator.range_values = ranges operations.append(operation) if first_simulator is None: first_simulator = simulator storage_path = self.storage_interface.get_project_folder( project.name, str(operation.id)) burst_config = self.burst_service.update_simulation_fields( burst_config, operation.id, first_simulator.gid) self.burst_service.store_burst_configuration( burst_config, storage_path) datatype_group = DataTypeGroup( operation_group, operation_id=operation.id, fk_parent_burst=burst_config.gid, state=algo_category.defaultdatastate) dao.store_entity(datatype_group) metrics_datatype_group = DataTypeGroup( metric_operation_group, fk_parent_burst=burst_config.gid, state=algo_category.defaultdatastate) dao.store_entity(metrics_datatype_group) return operations, pse_canceled @staticmethod def compute_conn_branch_conditions(is_branch, simulator): if not is_branch: return None conn = load.load_entity_by_gid(simulator.connectivity) if conn.number_of_regions: return FilterChain( fields=[FilterChain.datatype + '.number_of_regions'], operations=["=="], values=[conn.number_of_regions]) @staticmethod def validate_first_fragment(form, project_id, conn_idx): conn_count = dao.count_datatypes(project_id, conn_idx) if conn_count == 0: form.connectivity.errors.append( "No connectivity in the project! Simulation cannot be started without " "a connectivity!") def get_simulation_state_index(self, burst_config, simulation_history_class): parent_burst = burst_config.parent_burst_object simulation_state_index = dao.get_generic_entity( simulation_history_class, parent_burst.gid, "fk_parent_burst") if simulation_state_index is None or len(simulation_state_index) < 1: exc = BurstServiceException( "Simulation State not found for %s, thus we are unable to branch from " "it!" % burst_config.name) self.logger.error(exc) raise exc return simulation_state_index
class TestFlowController(BaseControllersTest): """ Unit tests for FlowController """ def setup_method(self): """ Sets up the environment for testing; creates a `FlowController` """ self.init() self.flow_c = FlowController() self.burst_c = SimulatorController() self.operation_service = OperationService() def teardown_method(self): """ Cleans up the testing environment """ self.cleanup() self.clean_database() def test_context_selected(self): """ Remove the project from CherryPy session and check that you are redirected to projects page. """ del cherrypy.session[common.KEY_PROJECT] self._expect_redirect('/project/viewall', self.flow_c.step_analyzers) def test_valid_step(self): """ For all algorithm categories check that a submenu is generated and the result page has it's title given by category name. """ result_dict = self.flow_c.step_analyzers() assert common.KEY_SUBMENU_LIST in result_dict, \ "Expect to have a submenu with available algorithms for category." assert result_dict["section_name"] == 'analyze' def test_step_connectivity(self): """ Check that the correct section name and connectivity sub-menu are returned for the connectivity step. """ result_dict = self.flow_c.step_connectivity() assert result_dict['section_name'] == 'connectivity' assert result_dict['submenu_list'] == self.flow_c.connectivity_submenu def test_default(self): """ Test default method from step controllers. Check that the submit link is ok, that a mainContent is present in result dict and that the isAdapter flag is set to true. """ cherrypy.request.method = "GET" categories = dao.get_algorithm_categories() for categ in categories: # Ignore creators, as those won't go through this flow if categ.displayname in [ CreateAlgorithmCategoryConfig.category_name ]: continue algo_groups = dao.get_adapters_from_categories([categ.id]) for algo in algo_groups: result_dict = self.flow_c.default(categ.id, algo.id) assert result_dict[common.KEY_SUBMIT_LINK] == '/flow/%i/%i' % ( categ.id, algo.id) assert 'mainContent' in result_dict assert result_dict['isAdapter'] def test_default_cancel(self): """ On cancel we should get a redirect to the back page link. """ cherrypy.request.method = "POST" categories = dao.get_algorithm_categories() algo_groups = dao.get_adapters_from_categories([categories[0].id]) self._expect_redirect('/project/viewoperations/%i' % self.test_project.id, self.flow_c.default, categories[0].id, algo_groups[0].id, cancel=True, back_page='operations') def test_default_invalid_key(self): """ Pass invalid keys for adapter and step and check you get redirect to tvb entry page with error set. """ self._expect_redirect('/tvb?error=True', self.flow_c.default, 'invalid', 'invalid') def test_read_datatype_attribute(self, dummy_datatype_index_factory): """ Read an attribute from a datatype. """ dt = dummy_datatype_index_factory(row1='This is stored data') dt.subject = "test_subject" dt.state = "RAW_STATE" returned_data = self.flow_c.read_datatype_attribute(dt.gid, "row1") assert returned_data == '"This is stored data"' def test_read_datatype_attribute_method_call(self, dummy_datatype_index_factory): """ Call method on given datatype. """ dt = dummy_datatype_index_factory(row1='This is stored data') args = {'length': 101} returned_data = self.flow_c.read_datatype_attribute( dt.gid, 'return_test_data', **args) assert returned_data.replace('"', '') == " ".join( str(x) for x in range(101)) def test_get_simple_adapter_interface(self, test_adapter_factory): algo = test_adapter_factory() form = TestAdapter1Form() adapter = TestFactory.create_adapter( 'tvb.tests.framework.adapters.testadapter1', 'TestAdapter1') adapter.submit_form(form) result = self.flow_c.get_simple_adapter_interface(algo.id) expected_interface = adapter.get_form() found_form = result['adapter_form']['adapter_form'] assert isinstance(result['adapter_form'], dict) assert isinstance(found_form, TestAdapter1Form) assert found_form.test1_val1.value == expected_interface.test1_val1.value assert found_form.test1_val2.value == expected_interface.test1_val2.value def test_stop_burst_operation(self, simulation_launch): operation = simulation_launch(self.test_user, self.test_project, 1000) assert not operation.has_finished self.flow_c.cancel_or_remove_operation(operation.id, 0, False) operation = dao.get_operation_by_id(operation.id) assert operation.status == STATUS_CANCELED def test_stop_burst_operation_group(self, simulation_launch): first_op = simulation_launch(self.test_user, self.test_project, 1000, True) operations_group_id = first_op.fk_operation_group assert not first_op.has_finished self.flow_c.cancel_or_remove_operation(operations_group_id, 1, False) operations = dao.get_operations_in_group(operations_group_id) for operation in operations: operation = dao.get_operation_by_id(operation.id) assert operation.status == STATUS_CANCELED def test_remove_burst_operation(self, simulation_launch): operation = simulation_launch(self.test_user, self.test_project, 1000) assert not operation.has_finished self.flow_c.cancel_or_remove_operation(operation.id, 0, True) operation = dao.try_get_operation_by_id(operation.id) assert operation is None def test_remove_burst_operation_group(self, simulation_launch): first_op = simulation_launch(self.test_user, self.test_project, 1000, True) operations_group_id = first_op.fk_operation_group assert not first_op.has_finished self.flow_c.cancel_or_remove_operation(operations_group_id, 1, True) operations = dao.get_operations_in_group(operations_group_id) for operation in operations: operation = dao.try_get_operation_by_id(operation.id) assert operation is None def _asynch_launch_simple_op(self): adapter = TestFactory.create_adapter( 'tvb.tests.framework.adapters.testadapter1', 'TestAdapter1') view_model = TestModel() view_model.test1_val1 = 5 view_model.test1_val2 = 6 algo = adapter.stored_adapter operation = self.operation_service.prepare_operation( self.test_user.id, self.test_project, algo, view_model=view_model) self.operation_service._send_to_cluster(operation, adapter) return operation def test_stop_operation(self): operation = self._asynch_launch_simple_op() operation = dao.get_operation_by_id(operation.id) assert not operation.has_finished self.flow_c.cancel_or_remove_operation(operation.id, 0, False) operation = dao.get_operation_by_id(operation.id) assert operation.status == STATUS_CANCELED def test_stop_operations_group(self, test_adapter_factory, datatype_group_factory): group = datatype_group_factory(status=STATUS_STARTED, store_vm=True) operations = dao.get_operations_in_group(group.fk_from_operation) operation_group_id = 0 for operation in operations: operation = dao.get_operation_by_id(operation.id) assert not operation.has_finished operation_group_id = operation.fk_operation_group self.flow_c.cancel_or_remove_operation(operation_group_id, 1, False) for operation in operations: operation = dao.get_operation_by_id(operation.id) assert operation.status == STATUS_CANCELED
class OperationFacade: def __init__(self): self.logger = get_logger(self.__class__.__module__) self.operation_service = OperationService() self.project_service = ProjectService() self.user_service = UserService() self.files_helper = FilesHelper() @staticmethod def get_operation_status(operation_gid): operation = ProjectService.load_operation_by_gid(operation_gid) if operation is None: get_logger().warning( "Invalid operation GID: {}".format(operation_gid)) raise InvalidIdentifierException() return operation.status @staticmethod def get_operations_results(operation_gid): operation = ProjectService.load_operation_lazy_by_gid(operation_gid) if operation is None: get_logger().warning( "Invalid operation GID: {}".format(operation_gid)) raise InvalidIdentifierException() data_types = ProjectService.get_results_for_operation(operation.id) if data_types is None: return [] return [DataTypeDto(datatype) for datatype in data_types] def launch_operation(self, current_user_id, model_file, project_gid, algorithm_module, algorithm_classname, fetch_file): temp_folder = FilesHelper.create_temp_folder() model_h5_path = FilesHelper.save_temporary_file( model_file, temp_folder) try: project = self.project_service.find_project_lazy_by_gid( project_gid) except ProjectServiceException: raise InvalidIdentifierException() algorithm = AlgorithmService.get_algorithm_by_module_and_class( algorithm_module, algorithm_classname) if algorithm is None: raise InvalidIdentifierException( 'No algorithm found for: %s.%s' % (algorithm_module, algorithm_classname)) try: adapter_instance = ABCAdapter.build_adapter(algorithm) view_model = adapter_instance.get_view_model_class()() view_model_h5 = ViewModelH5(model_h5_path, view_model) view_model_gid = view_model_h5.gid.load() operation = self.operation_service.prepare_operation( current_user_id, project.id, algorithm, view_model_gid.hex) storage_path = self.files_helper.get_project_folder( project, str(operation.id)) if isinstance(adapter_instance, ABCUploader): for key, value in adapter_instance.get_form_class( ).get_upload_information().items(): data_file = fetch_file(request_file_key=key, file_extension=value) data_file_path = FilesHelper.save_temporary_file( data_file, temp_folder) file_name = os.path.basename(data_file_path) upload_field = getattr(view_model_h5, key) upload_field.store(os.path.join(storage_path, file_name)) shutil.move(data_file_path, storage_path) shutil.move(model_h5_path, storage_path) os.rmdir(temp_folder) view_model_h5.close() OperationService().launch_operation(operation.id, True) return operation.gid except Exception as excep: self.logger.error(excep, exc_info=True) raise ServiceException(str(excep))
class TestOperationService(BaseTestCase): """ Test class for the introspection module. Some tests from here do async launches. For those cases Transactional tests won't work. """ def setup_method(self): """ Reset the database before each test. """ self.clean_database() initialize_storage() self.test_user = TestFactory.create_user() self.test_project = TestFactory.create_project(self.test_user) self.operation_service = OperationService() self.backup_hdd_size = TvbProfile.current.MAX_DISK_SPACE def teardown_method(self): """ Reset the database when test is done. """ TvbProfile.current.MAX_DISK_SPACE = self.backup_hdd_size self.clean_database() def _assert_no_ddti(self): count = dao.count_datatypes(self.test_project.id, DummyDataTypeIndex) assert 0 == count def _assert_stored_ddti(self, expected_cnt=1): count = dao.count_datatypes(self.test_project.id, DummyDataTypeIndex) assert expected_cnt == count datatype = dao.try_load_last_entity_of_type(self.test_project.id, DummyDataTypeIndex) assert datatype.subject == DataTypeMetaData.DEFAULT_SUBJECT, "Wrong data stored." return datatype def test_datatypes_groups(self, test_adapter_factory, datatype_group_factory): """ Tests if the dataType group is set correct on the dataTypes resulted from the same operation group. """ all_operations = dao.get_filtered_operations(self.test_project.id, None) assert len(all_operations) == 0, "There should be no operation" dt_group = datatype_group_factory(project=self.test_project) model = TestModel() test_adapter_factory() adapter = TestFactory.create_adapter( "tvb.tests.framework.adapters.testadapter1", "TestAdapter1") operations = dao.get_operations_in_group(dt_group.id) for op in operations: model.gid = uuid.uuid4() op_path = StorageInterface().get_project_folder( self.test_project.name, str(op.id)) op.view_model_gid = model.gid.hex op.algorithm = adapter.stored_adapter h5.store_view_model(model, op_path) dao.store_entity(op) all_operations = dao.get_filtered_operations(self.test_project.id, None) assert len(all_operations) == 2, "Expected two operation groups" assert all_operations[0][2] == 6, "Expected 6 operations in one group" operation_group_id = all_operations[0][3] assert operation_group_id != None, "The operation should be part of a group." self.operation_service.stop_operation(all_operations[1][0]) self.operation_service.stop_operation(all_operations[1][1]) # Make sure operations are executed self.operation_service.launch_operation(all_operations[1][0], False) self.operation_service.launch_operation(all_operations[1][1], False) resulted_datatypes = dao.get_datatype_in_group( operation_group_id=operation_group_id) assert len( resulted_datatypes) >= 2, "Expected at least 2, but: " + str( len(resulted_datatypes)) dt = dao.get_datatype_by_id(resulted_datatypes[0].id) datatype_group = dao.get_datatypegroup_by_op_group_id( operation_group_id) assert dt.fk_datatype_group == datatype_group.id, "DataTypeGroup is incorrect" def test_initiate_operation(self, test_adapter_factory): """ Test the actual operation flow by executing a test adapter. """ test_adapter_factory() adapter = TestFactory.create_adapter( "tvb.tests.framework.adapters.testadapter1", "TestAdapter1") view_model = TestModel() view_model.test1_val1 = 5 view_model.test1_val2 = 5 adapter.generic_attributes.subject = "Test4242" self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, model_view=view_model) dts, count = dao.get_values_of_datatype(self.test_project.id, DummyDataTypeIndex) assert count == 1 assert len(dts) == 1 datatype = dao.get_datatype_by_id(dts[0][0]) assert datatype.subject == "Test4242", "Wrong data stored." assert datatype.type == adapter.get_output( )[0].__name__, "Wrong data stored." def test_delete_dt_free_hdd_space(self, test_adapter_factory, operation_factory): """ Launch two operations and give enough available space for user so that both should finish. """ test_adapter_factory(adapter_class=TestAdapterHDDRequired) adapter = TestFactory.create_adapter( "tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired") view_model = adapter.get_view_model()() TvbProfile.current.MAX_DISK_SPACE = float( adapter.get_required_disk_size(view_model)) self._assert_no_ddti() self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, model_view=view_model) datatype = self._assert_stored_ddti() # Now free some space and relaunch ProjectService().remove_datatype(self.test_project.id, datatype.gid) self._assert_no_ddti() self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, model_view=view_model) self._assert_stored_ddti() def test_launch_two_ops_hdd_with_space(self, test_adapter_factory): """ Launch two operations and give enough available space for user so that both should finish. """ test_adapter_factory(adapter_class=TestAdapterHDDRequired) adapter = TestFactory.create_adapter( "tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired") view_model = adapter.get_view_model()() TvbProfile.current.MAX_DISK_SPACE = 2 * float( adapter.get_required_disk_size(view_model)) self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, model_view=view_model) datatype = self._assert_stored_ddti() # Now update the maximum disk size to be the size of the previously resulted datatypes (transform from kB to MB) # plus what is estimated to be required from the next one (transform from B to MB) TvbProfile.current.MAX_DISK_SPACE = float(datatype.disk_size) + float( adapter.get_required_disk_size(view_model)) self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, model_view=view_model) self._assert_stored_ddti(2) def test_launch_two_ops_hdd_full_space(self): """ Launch two operations and give available space for user so that the first should finish, but after the update to the user hdd size the second should not. """ adapter = TestFactory.create_adapter( "tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired") view_model = adapter.get_view_model()() TvbProfile.current.MAX_DISK_SPACE = ( 1 + float(adapter.get_required_disk_size(view_model))) self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, model_view=view_model) datatype = self._assert_stored_ddti() # Now update the maximum disk size to be less than size of the previously resulted dts (transform kB to MB) # plus what is estimated to be required from the next one (transform from B to MB) TvbProfile.current.MAX_DISK_SPACE = float(datatype.disk_size - 1) + \ float(adapter.get_required_disk_size(view_model) - 1) with pytest.raises(NoMemoryAvailableException): self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, model_view=view_model) self._assert_stored_ddti() def test_launch_operation_hdd_with_space(self): """ Test the actual operation flow by executing a test adapter. """ adapter = TestFactory.create_adapter( "tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired") view_model = adapter.get_view_model()() TvbProfile.current.MAX_DISK_SPACE = float( adapter.get_required_disk_size(view_model)) self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, model_view=view_model) self._assert_stored_ddti() def test_launch_operation_hdd_with_space_started_ops( self, test_adapter_factory): """ Test the actual operation flow by executing a test adapter. """ test_adapter_factory(adapter_class=TestAdapterHDDRequired) space_taken_by_started = 100 adapter = TestFactory.create_adapter( "tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired") form = TestAdapterHDDRequiredForm() adapter.submit_form(form) started_operation = model_operation.Operation( None, self.test_user.id, self.test_project.id, adapter.stored_adapter.id, status=model_operation.STATUS_STARTED, estimated_disk_size=space_taken_by_started) view_model = adapter.get_view_model()() dao.store_entity(started_operation) TvbProfile.current.MAX_DISK_SPACE = float( adapter.get_required_disk_size(view_model) + space_taken_by_started) self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, model_view=view_model) self._assert_stored_ddti() def test_launch_operation_hdd_full_space(self, test_adapter_factory): """ Test the actual operation flow by executing a test adapter. """ test_adapter_factory(adapter_class=TestAdapterHDDRequired) adapter = TestFactory.create_adapter( "tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired") form = TestAdapterHDDRequiredForm() adapter.submit_form(form) view_model = adapter.get_view_model()() TvbProfile.current.MAX_DISK_SPACE = float( adapter.get_required_disk_size(view_model) - 1) with pytest.raises(NoMemoryAvailableException): self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, model_view=view_model) self._assert_no_ddti() def test_launch_operation_hdd_full_space_started_ops( self, test_adapter_factory): """ Test the actual operation flow by executing a test adapter. """ test_adapter_factory(adapter_class=TestAdapterHDDRequired) space_taken_by_started = 100 adapter = TestFactory.create_adapter( "tvb.tests.framework.adapters.testadapter3", "TestAdapterHDDRequired") form = TestAdapterHDDRequiredForm() adapter.submit_form(form) started_operation = model_operation.Operation( None, self.test_user.id, self.test_project.id, adapter.stored_adapter.id, status=model_operation.STATUS_STARTED, estimated_disk_size=space_taken_by_started) view_model = adapter.get_view_model()() dao.store_entity(started_operation) TvbProfile.current.MAX_DISK_SPACE = float( adapter.get_required_disk_size(view_model) + space_taken_by_started - 1) with pytest.raises(NoMemoryAvailableException): self.operation_service.initiate_operation(self.test_user, self.test_project, adapter, model_view=view_model) self._assert_no_ddti() def test_stop_operation(self, test_adapter_factory): """ Test that an operation is successfully stopped. """ test_adapter_factory(adapter_class=TestAdapter2) adapter = TestFactory.create_adapter( "tvb.tests.framework.adapters.testadapter2", "TestAdapter2") view_model = adapter.get_view_model()() view_model.test = 5 algo = adapter.stored_adapter operation = self.operation_service.prepare_operation( self.test_user.id, self.test_project, algo, view_model=view_model) self.operation_service._send_to_cluster(operation, adapter) self.operation_service.stop_operation(operation) operation = dao.get_operation_by_id(operation.id) assert operation.status, model_operation.STATUS_CANCELED == "Operation should have been canceled!" def test_stop_operation_finished(self, test_adapter_factory): """ Test that an operation that is already finished is not changed by the stop operation. """ test_adapter_factory() adapter = TestFactory.create_adapter( "tvb.tests.framework.adapters.testadapter1", "TestAdapter1") view_model = adapter.get_view_model()() view_model.test1_val1 = 5 view_model.test1_val2 = 5 algo = adapter.stored_adapter operation = self.operation_service.prepare_operation( self.test_user.id, self.test_project, algo, view_model=view_model) self.operation_service._send_to_cluster(operation, adapter) operation = dao.get_operation_by_id(operation.id) operation.status = model_operation.STATUS_FINISHED dao.store_entity(operation) self.operation_service.stop_operation(operation.id) operation = dao.get_operation_by_id(operation.id) assert operation.status, model_operation.STATUS_FINISHED == "Operation shouldn't have been canceled!" def test_fire_operation(self): """ Test preparation of an adapter and launch mechanism. """ adapter = TestFactory.create_adapter( "tvb.tests.framework.adapters.testadapter1", "TestAdapter1") test_user = TestFactory.create_user(username="******") test_project = TestFactory.create_project(admin=test_user, name="test_project_fire_sim") result = OperationService().fire_operation( adapter, test_user, test_project.id, view_model=adapter.get_view_model()()) assert result.endswith("has finished."), "Operation fail"
class OperationFacade: def __init__(self): self.logger = get_logger(self.__class__.__module__) self.operation_service = OperationService() self.project_service = ProjectService() self.user_service = UserService() @staticmethod def get_operation_status(operation_gid): operation = ProjectService.load_operation_by_gid(operation_gid) if operation is None: get_logger().warning( "Invalid operation GID: {}".format(operation_gid)) raise InvalidIdentifierException() return operation.status @staticmethod def get_operations_results(operation_gid): operation = ProjectService.load_operation_lazy_by_gid(operation_gid) if operation is None: get_logger().warning( "Invalid operation GID: {}".format(operation_gid)) raise InvalidIdentifierException() data_types = ProjectService.get_results_for_operation(operation.id) if data_types is None: return [] return [DataTypeDto(datatype) for datatype in data_types] def launch_operation(self, current_user_id, model_file, project_gid, algorithm_module, algorithm_classname, fetch_file): temp_folder = create_temp_folder() model_h5_path = save_temporary_file(model_file, temp_folder) try: project = self.project_service.find_project_lazy_by_gid( project_gid) except ProjectServiceException: raise InvalidIdentifierException() try: algorithm = AlgorithmService.get_algorithm_by_module_and_class( algorithm_module, algorithm_classname) if algorithm is None: raise InvalidIdentifierException( 'No algorithm found for: %s.%s' % (algorithm_module, algorithm_classname)) adapter_instance = ABCAdapter.build_adapter(algorithm) view_model = h5.load_view_model_from_file(model_h5_path) if isinstance(adapter_instance, ABCUploader): with ViewModelH5(model_h5_path, view_model) as view_model_h5: for key, value in adapter_instance.get_form_class( ).get_upload_information().items(): data_file = fetch_file(request_file_key=key, file_extension=value) data_file_path = save_temporary_file( data_file, temp_folder) view_model_h5.store_metadata_param(key, data_file_path) view_model = h5.load_view_model_from_file(model_h5_path) operation = self.operation_service.prepare_operation( current_user_id, project, algorithm, view_model=view_model) if os.path.exists(model_h5_path): os.remove(model_h5_path) OperationService().launch_operation(operation.id, True) return operation.gid except Exception as excep: self.logger.error(excep, exc_info=True) raise ServiceException(str(excep))