def get_status(job_id, process_token): """ returns the status of the given process id and process token :param job_id: the job id :type job_id: int? :param process_token: the token being queried :type process_token: str (uuid) :return: the status of the task :rtype: str (containing a json) """ ssh_client = None try: ssh_client = lws_connect.connect_with_sshconfig( config, ssh_config_file) except Exception as excpt: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise excpt if ssh_client is None: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise ValueError("unable to log on %s, ABORTING", config["clstrHostName"]) logging.info("get_status for token %s", process_token) status_json = lws_connect.get_job_status(ssh_client, process_token, job_id) ssh_client.close() return jsonify(status_json)
def get_status(job_id, process_token): """ returns the status of the given process id and process token :param job_id: the job id :type job_id: int? :param process_token: the token being queried :type process_token: str (uuid) :return: the status of the task :rtype: str (containing a json) """ ssh_client = None process_ressources = { "nodes": 1, "cores": 1, "walltime": "00:10:00", "workdir": config["clstrBaseDir"] } try: ssh_client = lws_connect.connect_with_sshconfig( config, ssh_config_file) except Exception as excpt: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise excpt if ssh_client is None: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise ValueError("unable to log on %s, ABORTING", config["clstrHostName"]) status = lws_connect.get_job_status(ssh_client, process_token, remote_prefix) ssh_client.close() status_json = lws_nsbas.getJobStatus(job_id, process_token, status) return jsonify(status_json)
def get_result(job_id,process_token): """ returns the result of the given process on the subswath interferogram it transformed and the process token :param job_id: the job id :type job_id: int? :param process_token: the token being queried :type process_token: str (uuid) :return: the results of the task :type: str (containing a json) """ ssh_client = None try: ssh_client = lws_connect.connect_with_sshconfig(config, ssh_config_file) except Exception as excpt: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise excpt if ssh_client is None: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise ValueError("unable to log on %s, ABORTING", config["clstrHostName"]) logging.info("get_status for token %s", process_token) status_json = lws_connect.get_job_status(ssh_client, process_token, job_id) """Lorsqu'il est interrogé uniquement à fin de suivi, le webservice a besoin du job Id et, par sécurité, du jeton de suivi du processus de calcul pour répondre un simple message d'attente """ resultJson = { "job_id" : job_id , "processToken": process_token } """Lorsque le process est terminé, le web-service renvoit les url de ses produits sur le subswath traité (et non sur l'ensemble) """ statusTab=json.loads(status_json) if statusTab['Status']=="Terminated": """Lisons le contenu du repertoire de publication """ publishDir = "".join(config["clstrIrodsDir"],"/", config["apiVersion"],"/services/ws_dnldResult/" , process_token) command = "ils "+publishDir logging.critical("list of results: command=%s", command) ret = run_on_frontal(ssh_client, command) """ Elaborons la liste json des produits """ resultJson={ "job_id" : job_id ,\ "processToken": process_token ,\ "resNames" :[{"resName" : "unw interferogram" , "resURI" : "<resURI1>"},\ {"resName" : "jpeg high resolution interferogram" , "resURI" : "<resURI2>"},\ {"resName" : "jpeg low resolution interferogram" , "resURI" : "<resURI3>"} ] } ssh_client.close() return jsonify(resultJson), 200
def execute(): """ L'execute synchrone renvoit le resultat et la reponse http 200 : OK L'execute asynchrone doit renvoyer la reponse du GetStatus et la reponse http 201 ou celle du GetResult et la reponse http 200, selon L'execute du webservice ws_coregListInterf doit """ if request.values['mode'] == "async" : # TODO : estimer dynamiquement walltime process_token = request.json[0]['processToken'] subswath = request.json[1]['subSwath'] logging.critical("getting: token %s swath %s", str(process_token), str(subswath)) token_dir = remote_data_prefix + '/' + process_token working_dir = token_dir + '/iw' + subswath log_dir = token_dir + '/LOG' process_ressources = {"nodes" : 1, "cores" : 4, "walltime" : "00:30:00", "workdir": working_dir, "logdir" : log_dir} ret = "Error" error = "OK" job_id = -1 try: ssh_client = lws_connect.connect_with_sshconfig(config, ssh_config_file) except Exception as excpt: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise excpt if ssh_client is None: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise ValueError("unable to log on %s, ABORTING", config["clstrHostName"]) logging.info("connection OK") # command = " ".join(['nsb_make_geomaptrans.py', 'nsbas.proc', '4']) publishDir = "".join("/", config["apiVersion"],"/services/ws_dnldResult/" , process_token) command = " ".join(['wsc_geocod-publishInter.sh', config["thumbnailsWidth"] , publishDir]) try: logging.critical("launching command: %s", command) job_id = lws_connect.run_on_cluster_node(ssh_client, command, str(process_token), process_ressources) logging.critical("returned from submission %s", job_id) except Exception as excpt: error = error + "fail to run command on server: {}".format(excpt) logging.error(error) # Des lors qu'il est lance, le webservice donne son jeton via son GetStatus, sans attendre d'avoir terminé status_json = lws_connect.get_job_status(ssh_client, process_token, job_id) logging.critical("response=%s", status_json) ssh_client.close() return jsonify(status_json), 201 else : # En mode synchrone, le webservice donne illico sa réponse GetResult resultJson = { "job_id" : "NaN" , "processToken": request.json[0]['processToken'] } return jsonify(resultJson), 200
def execute(): logging.critical("getting: token %s", str(request.json[0]['processToken'])) logging.critical("getting: swath %s", str(request.json[1]['subSwath'])) process_token = request.json[0]['processToken'] str_swath = str(request.json[1]['subSwath']) token_dir = config['clstrDataDir'] + '/' + process_token # En mode synchrone, le webservice donne illico sa réponse GetResult try: ssh_client = lws_connect.connect_with_sshconfig( config, ssh_config_file) except Exception as excpt: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise excpt if ssh_client is None: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise ValueError("unable to log on %s, ABORTING", config["clstrHostName"]) logging.info("connection OK") # command is not expensive -> we can run it on frontal dem_dir = token_dir + '/DEM' slc_dir = token_dir + '/SLC' command = " ".join([ "cd", token_dir, ";", "nsb_mkworkdir.py -s s1 -d", dem_dir, "SLC", "iw" + str_swath ]) logging.critical("command = %s", command) ret = None try: ret = lws_connect.run_on_frontal(ssh_client, command) except Exception as excpt: logging.critical("ERROR: " + str(ret) + "--" + str(excpt)) resultJson = {"job_id": "NaN", "processToken": process_token} ssh_client.close() return jsonify(resultJson), 500 ssh_client.close() resultJson = {"job_id": "NaN", "processToken": process_token} return jsonify(resultJson), 200
def execute(): """ L'execute synchrone renvoit le resultat et la reponse http 200 : OK L'execute asynchrone doit renvoyer la reponse du GetStatus et la reponse http 201 ou celle du GetResult et la reponse http 200, selon L'execute du webservice ws_dnldDem2Clstr doit """ logging.critical("getting: %s", str(request.json[0]['processToken'])) process_token = request.json[0]['processToken'] if request.values['mode'] == "async": # TODO estimer dynamiquement walltime token_dir = config['clstrDataDir'] + '/' + process_token process_ressources = { "nodes": 1, "cores": 1, "walltime": "00:50:00", "workdir": token_dir } ret = "Error" error = "OK" job_id = -1 try: ssh_client = lws_connect.connect_with_sshconfig( config, ssh_config_file) except Exception as excpt: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise excpt if ssh_client is None: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise ValueError("unable to log on %s, ABORTING", config["clstrHostName"]) logging.info("connection OK") dem_dir = token_dir + '/DEM' slc_dir = token_dir + '/SLC' command = " ".join([ "mkdir", dem_dir + '; ', "nsb_getDemFile.py", "fromRadarImage", slc_dir, dem_dir ]) try: logging.critical("launching command: %s", command) job_id = lws_connect.run_on_cluster_node(ssh_client, command, str(process_token), process_ressources) logging.critical("returned from submission %s", job_id) except Exception as excpt: error = error + "fail to run command on server: {}".format(excpt) logging.error(error) # Des lors qu'il est lance, le webservice donne son jeton via son GetStatus, sans attendre d'avoir terminé status_json = lws_connect.get_job_status(ssh_client, process_token, job_id) ssh_client.close() return jsonify(status_json), 201 else: # En mode synchrone, le webservice donne illico sa réponse GetResult resultJson = { "job_id": "NaN", "processToken": request.json[0]['processToken'] } return jsonify(resultJson), 200
def execute(): """ L'execute synchrone renvoit le resultat et la reponse http 200 : OK L'execute asynchrone doit renvoyer la reponse du GetStatus et la reponse http 201 ou celle du GetResult et la reponse http 200, selon Le script WS0_samy.py utilisait une chaine passee comme valeur d'une variable de formulaire "jsondata" et formate dans le style {"IDS":"987,654,321"} L'execute du webservice ws_dnldSar2Clstr doit - prendre en arguments, dans les data de la requete http, un json listant les ids des images Peps a telecharger, ex : {"pepsDataIds" :[{"id":"56987456"} , {"id":"287946133"} , {"id":"4789654123"} , {"id":"852147963"}]} afin que request.json produise un tableau du style request.json['ids'][0]['id'] - donner en sortie un ticket permettant d'interroger le getstatus pour savoir ou en est le telechargement. Ce ticket pourrait etre un jobid. """ # Creons le jeton du processus dans le style "d9dc5248-e741-4ef0-a54fee1a0" processToken = str(uuid.uuid4()) ids = [numid['id'] for numid in request.json['pepsDataIds']] if request.values['mode'] == "async": print "trying to connect to server for request dwnlod images" print ids job_id = 0 error = "" ssh_client = None process_ressources = { "nodes": 1, "cores": 1, "walltime": "00:10:00", "workdir": remote_prefix } ret = "Error" try: ssh_client = lws_connect.connect_with_sshconfig( config, ssh_config_file) except Exception as excpt: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise excpt if ssh_client is None: logging.critical("unable to log on %s, ABORTING", config["clstrHostName"]) raise ValueError("unable to log on %s, ABORTING", config["clstrHostName"]) logging.critical("connection OK, managing %d images", len(ids)) command = " ".join([remote_prefix + "/bin/wsc_downloadPepsData.py", \ "-v", "4",\ "-token", str(processToken), \ "-wd", remote_prefix + "/" + str(processToken) + "/SLC"] + ids) try: logging.critical("launching command: %s", command) ret = lws_connect.run_on_cluster_node(ssh_client, command, str(processToken), process_ressources) logging.info("returned from submission %s", ret) except Exception as excpt: error = error + "fail to run command on server: {}".format(excpt) logging.error(error) ssh_client.close() # Des lors qu'il est lance, le webservice donne son jeton via son GetStatus, sans attendre d'avoir terminé statusJson = lws_nsbas.getJobStatus(job_id, processToken, error) return jsonify(statusJson), 201 else: # En mode synchrone, le webservice donne illico sa réponse GetResult resultJson = {"job_id": job_id, "processToken": processToken} return jsonify(resultJson), 200