def search_for_simulation(self): terminal.show_info_message( "Searching for simulation with ID {}".format(self.search_id)) simulation = core.bench.entities.Simulation(self.app_session, self.search_id) if simulation.name: return simulation return None
def get_list_of_existing_simulation_sumbodels(self): terminal.show_info_message( "Trying to get list of existing submodels of simulation with ID {}" .format(self.search_id)) simulation = self.search_for_simulation() list_of_submodels = simulation.get_list_of_submodels() terminal.show_info_message("List of existing submodels: {}".format( str(list_of_submodels))) return list_of_submodels
def clone_simulation(self): terminal.show_info_message( "Trying to clone simulation with ID {}".format(self.search_id)) reference_simulation = self.search_for_simulation() if reference_simulation: cloned_simulation_id = reference_simulation.clone() terminal.show_info_message( "Cloned simulation ID: {}".format(cloned_simulation_id)) return terminal.show_error_message("Failed to clone simulation")
def handle_response_to_login_request(self): response_json = self.__response.json() if response_json and isinstance(response_json, dict): user = response_json.get("login") if user: terminal.show_info_message("Successfully connected as {}!".format(user)) return True terminal.show_error_message("Failed to connect!") return False
def get_list_of_existing_server_storage_submodels(self): terminal.show_info_message( "Trying to get list of existing submodels in server storage (S|Type {})" .format(self.app_session.cfg.server_storage)) stype = core.bench.entities.SubmodelType( self.app_session, self.app_session.cfg.server_storage) list_of_submodels = stype.get_list_of_submodels() terminal.show_info_message("List of existing submodels: {}".format( str(list_of_submodels))) return list_of_submodels
def get_list_of_submodels_to_be_uploaded(self): terminal.show_info_message( "Trying to get list of sumbodels for simulation with ID {}".format( self.search_id)) data_manager = LocalDataManager(self.app_session) simulation = self.search_for_simulation() list_of_sumbodels = data_manager.get_submodels_list_from_database( simulation.get_parent_loadcase().tree_path) terminal.show_info_message("List of files to be uploaded: {}".format( str(list_of_sumbodels))) return list_of_sumbodels
def handle_response_to_login_request(self): """ Handles response to login request :return: True if connection has been established, False otherwise """ response_json = self.__response.json() if response_json and isinstance(response_json, dict): user = response_json.get("login") if user: terminal.show_info_message( "Successfully connected as {}!".format(user)) return True terminal.show_error_message("Failed to connect!") return False
def add_new_submodels_to_simulation(self): terminal.show_info_message( "Trying to add new sumbodels to simulation {}".format( self.search_id)) uploaded_submodels = self.upload_submodels_to_server_storage() uploaded_submodels_ids = [ submodel.identifier for submodel in uploaded_submodels ] simulation = self.search_for_simulation() simulation_submodels = simulation.add_new_sumbodels( uploaded_submodels_ids) terminal.show_info_message("List of simulation submodels: {}".format( str(simulation_submodels))) return simulation_submodels
def _change_targets(self): """ Processing input JSON with behaviour `Update targets` Send requests to add new targets :return: """ if self.json_type != JSONTypes.UPDATE_TARGETS.value: raise ValueError( "Method `change_targets()` can not be called for JSON of type `{}`" .format(self.json_type)) # Walk over each vertices # Vertex contains information about base simulation # Obtain Loadcase ID from base simulation # Add new targets to this loadcase vertices = list(self.graph.vertices.values()) assert all(isinstance(v, Vertex) for v in vertices) for v in vertices: base_simulation = v.base_simulation vertex_loadcase = v.loadcase parent_loadcase = base_simulation.get_loadcase() if vertex_loadcase.identifier != parent_loadcase.identifier: terminal.show_error_message( "Mismatch loadcase from `loadcase_id` " "and parent loadcase from `base_simulation_id`") lc = vertex_loadcase if vertex_loadcase is not None else parent_loadcase targets = v.targets for t in targets: ans = lc.add_target(t.get("name"), t.get("value"), t.get("condition"), t.get("dimension"), t.get("tolerance"), t.get("description")) if ans is not None: terminal.show_info_message( "Successfully added target {} to loadcase {}", ans.identifier, lc.identifier) else: terminal.show_error_message( "Failed to add target to loadcase {}", lc.identifier)
def process_json(self): """ Processing input JSON file :return: """ if self.json_type == JSONTypes.SOLVE.value: self._run_all_tasks() if self.app_session.results_path is not None: # try to create directory dir_path = os.path.abspath(self.app_session.results_path) terminal.show_info_message( f"Trying to save key results to: {dir_path}") try: os.makedirs(dir_path) except FileExistsError: terminal.show_warning_message("Folder already exists") # collect values terminal.show_info_message("Collecting results...") data = self._collect_values() # save data terminal.show_info_message("Saving results...") file = os.path.join(dir_path, "Results.json") JSONDataManager.dump_data(data, file) elif self.json_type == JSONTypes.UPDATE_TARGETS.value: self._change_targets() else: terminal.show_warning_message( f"Cannot process JSON with behaviour: {self.json_type}")
def upload_submodels_to_server_storage(self): terminal.show_info_message( "Trying to upload files to S|Type {}".format( self.app_session.cfg.server_storage)) local_files = [ "test_uploading_submodel_01.wtf", "test_uploading_submodel_02.wtf", "test_uploading_submodel_03.wtf" ] local_paths = [ self.app_session.cfg.local_storage + "/" + f for f in local_files ] terminal.show_info_message("List of files: {}".format( str(local_paths))) stype = core.bench.entities.SubmodelType( self.app_session, self.app_session.cfg.server_storage) uploaded_submodels = stype.upload_new_submodel(*local_paths) terminal.show_info_message("List of uploaded submodels: {}".format( str(uploaded_submodels))) return uploaded_submodels
def terminate(code): if code == 0: terminal.show_info_message("Success") else: terminal.show_error_message("Failed with code {}", code) sys.exit(code)
def _run_all_tasks(self): """ Processing input JSON with behaviour `Solve` Run all simulations in graph vertices. :return: """ if self.json_type != JSONTypes.SOLVE.value: raise ValueError( "Method `run_all_tasks()` can not be called for JSON of type `{}`" .format(self.json_type)) @method_info def status_based_behaviour(vertex): """ Define main loop behaviour while walking through vertex basing on vertex status :param vertex: vertex in workflow graph :return terminate_loop: magic integer value: -1: error occurred and main loop shall be stopped 0: current simulation is not done yet, continue 1: current simulation is done """ assert isinstance(vertex, Vertex) terminal.show_info_message("Processing vertex with ID: {}", vertex.identifier) # if status is "New", # - clone base simulation # - upload submodels # - run cloned (current vertex) simulation # - update vertex status from simulation task status if vertex.status == "New": terminal.show_info_message("Vertex status: {}", vertex.status) terminal.show_info_message("Vertex base simulation ID: {}", vertex.base_simulation.identifier) base_simulation = vertex.base_simulation terminal.show_info_message( "Trying to clone base simulation...") current_simulation = base_simulation.clone() terminal.show_info_message( "Modify current simulation description...") current_simulation.set_description(vertex.description) terminal.show_info_message( "Update vertex current simulation...") vertex.current_simulation = current_simulation if current_simulation: # if cloned successfully, upload submodels terminal.show_info_message( "Cloned simulation ID: {}", vertex.current_simulation.identifier) terminal.show_info_message( "Uploading submodels for current simulation...") stype = vertex.stype uploaded_submodels = stype.upload_submodel( *vertex.submodels) # uploaded_submodels_ids = [submodel.identifier for submodel in uploaded_submodels] terminal.show_info_message( "Erasing current (cloned) simulation submodels...") status = current_simulation.erase_submodels() if status: terminal.show_info_message("Done") else: terminal.show_error_message("Failed") _ = current_simulation.add_submodels(*uploaded_submodels) terminal.show_info_message( "{} submodels added for current simulations", len(uploaded_submodels)) # start with default parameters terminal.show_info_message( "Trying to run current simulation...") # obtain default parameters to run tasks from base simulation current_task = current_simulation.run( bsi=base_simulation.identifier) vertex.current_task = current_task if current_task: # if task created successfully, get status terminal.show_info_message( f"Created task ID: {vertex.current_task.identifier}" ) vertex.status = current_task.get_status() return 0 terminal.show_error_message("Task has not been created.") return -1 terminal.show_error_message("Simulation has not been cloned.") return -1 # if status is "Finished", # - download vertex results # - save status; when all vertices will have the same status, loop can be stopped elif vertex.status == "Finished": terminal.show_info_message("Vertex status: {}", vertex.status) if len(vertex.results) == 0: terminal.show_info_message( "No results selected for download") else: terminal.show_info_message("Downloading results...") current_simulation = vertex.current_simulation lst = current_simulation.download_files(*vertex.results) terminal.show_info_message( "Successfully downloaded {} files", len(lst)) return 1 # if status is "Failed", # - terminate main loop elif vertex.status in ["Failed", "failed", "Error", "error"]: terminal.show_warning_message("Vertex status: {}", vertex.status) return -1 # if status is unknown, # - update vertex status from simulation task status else: terminal.show_info_message("Updating vertex status...") current_task = vertex.current_task if current_task: current_status = current_task.get_status() vertex.status = current_status task_end_waiting, task_end_solving = current_task.get_time_estimation( ) terminal.show_info_message( "Current task estimated end waiting time: {}", task_end_waiting) terminal.show_info_message( "Current task estimated end solving time: {}", task_end_solving) terminal.show_info_message("Vertex status: {}", vertex.status) return 0 # --- main section --- main section --- main section --- main section --- main section --- main section --- stop_main_loop = False # list of graph vertices to iterate over it with possibility to modify it vertices = list(self.graph.vertices.values()) assert all(isinstance(v, Vertex) for v in vertices) # initialize dictionary for saving loop results rs = {key: 0 for key in [v.identifier for v in vertices]} # terminal.show_info_dict("Initial state of results storage", rs) s = "" vals = [] for k, v in rs.items(): s += terminal.get_blank() + "{} → {}\n" vals.append(k) vals.append(v) terminal.show_info_message("Initial state of results storage:\n" + s, *vals) # main loop - while all tasks are done or some failure occurred while not stop_main_loop: # iterate over all workflow graph vertices # remove vertices wish status = "Finished" # modify original list, no list copies, only one pass: traditional solution is to iterate backwards for i in reversed(range(len(vertices))): v = vertices[i] # check vertex links # if links list is empty, vertex is at root level and it's simulation can be started if len(v.links) == 0: terminal.show_info_message( "Vertex {} has no linked vertices", v.identifier) r = status_based_behaviour(v) terminal.show_info_message( "Current vertex result status: {}", r) rs[v.identifier] = r # terminal.show_info_message(f"Current state of the list of vertices results status: {str(rs)}") if r == -1: terminal.show_error_message( "Failed while processing vertex {}", v.identifier) # stop_main_loop = True break if r == 1: terminal.show_info_message("Vertex {} is done", v.identifier) del vertices[i] # else, if links list is not empty, else: terminal.show_info_message( "Vertex {} has {} linked vertices", v.identifier, len(v.links)) terminal.show_info_message( "Checking status of linked vertices...") # check status of all linked vertices if all(l.status == "Finished" for l in v.links): # if all parent vertices successfully finished, # current vertex can run terminal.show_info_message( "All linked vertices successfully finished") r = status_based_behaviour(v) terminal.show_info_message( "Current vertex result status: {}", r) rs[v.identifier] = r # terminal.show_info_message(f"Current state of the list of vertices results status: {str(rs)}") if r == -1: terminal.show_error_message( "Failed while processing vertex {}", v.identifier) # stop_main_loop = True break if r == 1: terminal.show_info_message("Vertex {} is done", v.identifier) del vertices[i] else: terminal.show_info_message( "Some linked vertices is not finished yet...") stop_main_loop = all(item == 1 for item in rs.values()) or any( item == -1 for item in rs.values()) # terminal.show_info_message(f"List of vertices results status: {str(rs)}") # terminal.show_info_dict("Current state of results storage", rs) s = "" vals = [] for k, v in rs.items(): s += terminal.get_blank() + "{} → {}\n" vals.append(k) vals.append(v) terminal.show_info_message( "Initial state of results storage:\n" + s, *vals) if not stop_main_loop: terminal.show_info_message( f"Waiting for the next loop ... [{WorkFlow.WALK_INTERVAL} sec]" ) Timeout.pause(WorkFlow.WALK_INTERVAL) else: terminal.show_info_message("Terminating main loop ...")
def __init__(self, app_session): if not WorkFlow.__instance: terminal.show_info_message("Workflow initialization...") self.__app_session = app_session self.__graph = Graph(self.app_session) self.__json_data_manager = JSONDataManager(self.app_session.json) self.__json_behaviour = self.__json_data_manager.get_behaviour() self.__json_data = self.__json_data_manager.get_json_data() terminal.show_info_message("Application session: {}".format( self.app_session.sid)) # TODO: # Different logic which depends on JSON `Behaviour` field # - If `Behaviour` = `Solve`, # build workflow graph (add vertices, build edges), # `execute_all_tasks()` method can be called # - If `Behaviour` = `Update targets`, # only add vertices, determine CML-Bench loadcase from `bench_id` or `base_simulation_id`, # create or update (?) loadcase targets # - If `Behaviour` = `Dump`, # whole application must be run with `-r` key # - If `Behaviour` = `Values`, # JSON cannot be passed as input file if self.__json_behaviour == JSONTypes.SOLVE.value: terminal.show_info_message( "JSON behaviour: Solving. Building workflow graph...") for data in self.__json_data.values(): self.__graph.add_vertex(data) s = "" vals = [] for v in self.graph.vertices.values(): s += terminal.get_blank() + "{}\n" vals.append(v) terminal.show_info_message("Workflow graph vertices:\n" + s, *vals) # terminal.show_info_objects("Workflow graph vertices: ", list(self.graph.vertices.values())) self.__graph.build_graph_edges() s = "" vals = [] for v in self.graph.edges: s += terminal.get_blank() + "{}\n" vals.append(v) terminal.show_info_message("Workflow graph edges:\n" + s, *vals) # terminal.show_info_objects("Workflow graph edges: ", list(self.graph.edges)) elif self.__json_behaviour == JSONTypes.UPDATE_TARGETS.value: terminal.show_info_message("JSON behaviour: Update targets.") for data in self.__json_data.values(): self.__graph.add_vertex(data) else: raise TypeError("Unsupported JSON behaviour") else: terminal.show_info_message("Workflow already exists.") # FIXME lovely piece of code. Establish access to instance variables _ = self.get_instance(app_session)
def upload_submodel(self, *files, **params): """ :param files: paths to files to be uploaded into current s|type :param params: response parameters; `stype` - ID of s|type for uploading submodels; optional; default is current s|type `add_to_clipboard` - optional boolean parameter; default is False :return: list of uploaded submodels """ # FIXME wtf??? create instance of SubmodelType inside its method if "stype" in params.keys(): stype = SubmodelType(self._app_session, params.get("stype")) else: # stype = SubmodelType(self._app_session, self._app_session.cfg.server_storage) stype = self if "add_to_clipboard" in params.keys(): add_to_clipboard = "on" if bool( params.get("add_to_clipboard")) else "off" else: add_to_clipboard = "off" submodels = [] for file in files: response = self._sender.send_upload_submodel_request( file, stype.tree_id, add_to_clipboard) Timeout.hold_your_horses() self._handler.set_response(response) result = self._handler.handle_response_to_upload_submodel_request() if result is not None: submodel_ids_to_delete = result["to_delete"] submodel_ids_for_simulation = result["to_insert"] if len(submodel_ids_to_delete) == 0: terminal.show_info_message( "Uploaded submodel id to use in simulation: {}", submodel_ids_for_simulation[0]) submodels.append( Submodel(self._app_session, submodel_ids_for_simulation[0])) else: terminal.show_warning_message( "Uploaded submodel duplicates already existing submodel" ) terminal.show_warning_message( "Created submodel with id {} will be deleted", submodel_ids_to_delete[0]) response = self._sender.send_delete_submodel_from_server_request( submodel_ids_to_delete[0]) Timeout.hold_your_horses() self._handler.set_response(response) _ = self._handler.handle_response_to_delete_submodel_from_server_request( ) terminal.show_warning_message("Duplicate was deleted") terminal.show_info_message( "Already existing submodel id to use in simulation: {}", submodel_ids_for_simulation[0]) submodels.append( Submodel(self._app_session, submodel_ids_for_simulation[0])) return submodels
def add_target(self, name, value, condition, dimension, tolerance=None, description=None): """ Adds new target to loadcase. If target with same name already exists, deletes old target and adds new. :param name: target name :param value: target value :param condition: target condition: 1 - >, 2 - <, 3 - +/- :param dimension: target dimension :param tolerance: tolerance for condition :param description: description for target :return: target object """ if condition == 1: condition_name = ">" elif condition == 2: condition_name = "<" elif condition == 3: condition_name = "+/-" else: terminal.show_error_message( "Unsupported condition for new target: {}".format(condition)) return None if condition != 3: tolerance = None if tolerance is None: has_tolerance = False else: has_tolerance = True payload = { "conditionId": condition, "conditionName": condition_name, "description": description, "dimension": dimension, "hasTolerance": has_tolerance, "hierarchy": { "id": self.identifier, "objectType": { "displayName": "Loadcase", "iconSkin": "icon-loadcase", "isLeaf": False, "name": "loadcase", "subType": None, "tooltip": "Loadcase" }, "parent": None }, "name": name, "objectType": { "displayName": "Target value", "name": "targetValue", "subType": None, "tooltip": "Target value" }, "tolerance": tolerance, "value": value } response = self._sender.send_add_loadcase_target_request( self.identifier, payload) Timeout.hold_your_horses() # if no target with that name exists, create new # else, need to find ID of target by name # delete it # and create new if response.status_code == 400: terminal.show_warning_message(response.json().get("message")) existing_targets = self.get_targets() for t in existing_targets: if t.name == name: tid = self.delete_target(t) if tid is not None: terminal.show_info_message( "Old target successfully removed") else: terminal.show_error_message( "Failed removing old target") return None break response = self._sender.send_add_loadcase_target_request( self.identifier, payload) if response.status_code != 200: terminal.show_error_message("Failed adding new target") terminal.show_error_message(f"Response: {response.status_code}") return None terminal.show_info_message("Adding new target...") self._handler.set_response(response) target_data = self._handler.handle_response_to_add_loadcase_target_request( ) if target_data: return Target(target_data) return None
def status_based_behaviour(vertex): """ Define main loop behaviour while walking through vertex basing on vertex status :param vertex: vertex in workflow graph :return terminate_loop: magic integer value: -1: error occurred and main loop shall be stopped 0: current simulation is not done yet, continue 1: current simulation is done """ assert isinstance(vertex, Vertex) terminal.show_info_message("Processing vertex with ID: {}", vertex.identifier) # if status is "New", # - clone base simulation # - upload submodels # - run cloned (current vertex) simulation # - update vertex status from simulation task status if vertex.status == "New": terminal.show_info_message("Vertex status: {}", vertex.status) terminal.show_info_message("Vertex base simulation ID: {}", vertex.base_simulation.identifier) base_simulation = vertex.base_simulation terminal.show_info_message( "Trying to clone base simulation...") current_simulation = base_simulation.clone() terminal.show_info_message( "Modify current simulation description...") current_simulation.set_description(vertex.description) terminal.show_info_message( "Update vertex current simulation...") vertex.current_simulation = current_simulation if current_simulation: # if cloned successfully, upload submodels terminal.show_info_message( "Cloned simulation ID: {}", vertex.current_simulation.identifier) terminal.show_info_message( "Uploading submodels for current simulation...") stype = vertex.stype uploaded_submodels = stype.upload_submodel( *vertex.submodels) # uploaded_submodels_ids = [submodel.identifier for submodel in uploaded_submodels] terminal.show_info_message( "Erasing current (cloned) simulation submodels...") status = current_simulation.erase_submodels() if status: terminal.show_info_message("Done") else: terminal.show_error_message("Failed") _ = current_simulation.add_submodels(*uploaded_submodels) terminal.show_info_message( "{} submodels added for current simulations", len(uploaded_submodels)) # start with default parameters terminal.show_info_message( "Trying to run current simulation...") # obtain default parameters to run tasks from base simulation current_task = current_simulation.run( bsi=base_simulation.identifier) vertex.current_task = current_task if current_task: # if task created successfully, get status terminal.show_info_message( f"Created task ID: {vertex.current_task.identifier}" ) vertex.status = current_task.get_status() return 0 terminal.show_error_message("Task has not been created.") return -1 terminal.show_error_message("Simulation has not been cloned.") return -1 # if status is "Finished", # - download vertex results # - save status; when all vertices will have the same status, loop can be stopped elif vertex.status == "Finished": terminal.show_info_message("Vertex status: {}", vertex.status) if len(vertex.results) == 0: terminal.show_info_message( "No results selected for download") else: terminal.show_info_message("Downloading results...") current_simulation = vertex.current_simulation lst = current_simulation.download_files(*vertex.results) terminal.show_info_message( "Successfully downloaded {} files", len(lst)) return 1 # if status is "Failed", # - terminate main loop elif vertex.status in ["Failed", "failed", "Error", "error"]: terminal.show_warning_message("Vertex status: {}", vertex.status) return -1 # if status is unknown, # - update vertex status from simulation task status else: terminal.show_info_message("Updating vertex status...") current_task = vertex.current_task if current_task: current_status = current_task.get_status() vertex.status = current_status task_end_waiting, task_end_solving = current_task.get_time_estimation( ) terminal.show_info_message( "Current task estimated end waiting time: {}", task_end_waiting) terminal.show_info_message( "Current task estimated end solving time: {}", task_end_solving) terminal.show_info_message("Vertex status: {}", vertex.status) return 0
def main(): root_path = os.path.abspath( os.path.dirname( inspect.getsourcefile(lambda: 0))) terminal.show_info_message("Root directory : {}".format(root_path)) config_path = os.path.abspath(os.path.join(root_path, "cfg", "config.cfg")) config_info = ConfigurationInformation(config_path) terminal.show_info_message(f"Backend address: {config_info.backend_address}") terminal.show_info_message(f"Local storage : {config_info.local_storage}") terminal.show_info_message(f"Server storage : {config_info.server_storage}") credentials_file = None json_file = None save_results = False arguments = argparser.get_arguments() if arguments: key = arguments.k if key: credentials_file = os.path.abspath(key) # credentials_file = os.path.abspath(os.path.join(root_path, "cfg", "credentials")) json = arguments.j if json: json_file = os.path.abspath(json) else: terminal.show_error_message("No JSON file selected!") return 1 values_dir = arguments.v if values_dir: save_results = os.path.abspath(values_dir) else: save_results = None add_dsp = arguments.d if add_dsp: terminal.Output.set_type(2) else: terminal.show_error_message("No arguments passed!") return -1 # TODO: add -r key for restart # after script run, write `lck` file with current time and host name # after AppSession run, append AppSession UID to `lck` file # if -r key is present, try to read `lck` file, get data from it, compare UIDs if config_info.status_code != 0: terminal.show_error_message(config_info.status_description) return config_info.status_code else: try: app_session = AppSession(root=root_path, cfg=config_info, credentials=credentials_file, json=json_file, res=save_results) app_session.execute() except Exception as e: handle_unexpected_exception(e) return -666 return 0