def push_mda(prob): name = options["--name"] pbname = prob.model.__class__.__name__ if name and pbname != name: info("Analysis %s skipped" % pbname) pass # do not exit else: self.push_mda(prob, options) exit()
def _ask_and_write_api_key(self): log("You have to set your API key.") log("You can get it in your profile page on WhatsOpt (%s)." % self.url) info( "Please, copy/paste your API key below then hit return (characters are hidden)." ) api_key = getpass.getpass(prompt="Your API key: ") if not os.path.exists(WHATSOPT_DIRNAME): os.makedirs(WHATSOPT_DIRNAME) with open(API_KEY_FILENAME, "w") as f: f.write(api_key) return api_key
def list_analyses(self): url = self._endpoint("/api/v1/analyses") resp = self.session.get(url, headers=self.headers) if resp.ok: mdas = resp.json() headers = ["id", "name", "created at"] data = [] for mda in mdas: date = mda.get("created_at", None) data.append([mda["id"], mda["name"], date]) info("Server: {}".format(self._url)) log(tabulate(data, headers)) else: resp.raise_for_status()
def push_mda(prob): name = options["--name"] pbname = prob.model.__class__.__name__ if name and pbname != name: info("Analysis %s skipped" % pbname) # do not exit seeking for another problem (ie analysis) else: options["--pyfilename"] = py_filename xdsm = self.push_mda(prob, options) if options.get("--xdsm"): # show command # required to interrupt pb execution raise AnalysisPushedException(xdsm=xdsm) else: sys.exit()
def list_analyses(self, all=False, project_query=None): param = "" if all: param = "?all=true" elif project_query: param = "?design_project_query={}".format(project_query) url = self.endpoint("/api/v1/analyses" + param) resp = self.session.get(url, headers=self.headers) if resp.ok: mdas = resp.json() headers = ["id", "name", "created at"] data = [] for mda in mdas: date = mda.get("created_at", None) data.append([mda["id"], mda["name"], date]) info("Server: {}".format(self._url)) log(tabulate(data, headers)) log("") else: resp.raise_for_status()
def show_mda(self, analysis_id, pbfile, name, outfile, batch, depth): options = { "--xdsm": True, "--name": name, "--dry-run": False, "--depth": depth, } xdsm = None if pbfile: start = time.time() try: info("XDSM info retrieval...") self.push_mda_cmd(pbfile, options) except AnalysisPushedException as pushed: xdsm = pushed.xdsm end = time.time() log("Retrieved in {:.2f}s".format(end - start)) source = os.path.basename(pbfile) else: mda_id = analysis_id or get_analysis_id() if mda_id is None: error( "Unknown analysis with id={} (maybe use wop pull <analysis-id>)".format( mda_id ) ) sys.exit(-1) url = self.endpoint("/api/v1/analyses/{}.xdsm".format(mda_id)) resp = self.session.get(url, headers=self.headers) resp.raise_for_status() xdsm = resp.json() source = f"{mda_id}@{self._url}" info("XDSM building...") generate_xdsm_html(source, xdsm, outfile) if pbfile: log("XDSM of analysis from {} generated in {}".format(pbfile, outfile)) else: log("XDSM of analysis {} generated in {}".format(mda_id, outfile)) if not batch: webview(outfile)
def pull_mda(self, mda_id, options={}, msg=None): if not msg: msg = "Analysis %s pulled" % mda_id base = "" param = "" if options.get("--server"): param += "&with_server=true" if options.get("--run-ops"): param += "&with_runops=true" if options.get("--test-units"): param += "&with_unittests=true" if param is not "": param = "?" + param[1:] url = self._endpoint( ("/api/v1/analyses/%s/exports/new.openmdao" + base + param) % mda_id ) resp = self.session.get(url, headers=self.headers, stream=True) resp.raise_for_status() name = None with tempfile.NamedTemporaryFile(suffix=".zip", mode="wb", delete=False) as fd: for chunk in resp.iter_content(chunk_size=128): fd.write(chunk) name = fd.name zip = zipfile.ZipFile(name, "r") tempdir = tempfile.mkdtemp(suffix="wop", dir=tempfile.tempdir) zip.extractall(tempdir) filenames = zip.namelist() zip.close() file_to_move = {} for f in filenames: file_to = f file_to_move[file_to] = True if os.path.exists(file_to): if options.get("--force"): log("Update %s" % file_to) if options.get("--dry-run"): file_to_move[file_to] = False else: os.remove(file_to) elif options.get("--update"): if re.match(r"^run_.*\.py$", f) and not options.get("--run-ops"): # keep current run scripts if any info( "Keep existing %s (remove it or use --run-ops to override)" % file_to ) file_to_move[file_to] = False continue if is_user_file(f): file_to_move[file_to] = False continue log("Update %s" % file_to) if not options.get("--dry-run"): os.remove(file_to) else: warn( "File %s in the way: remove it or use --force to override" % file_to ) file_to_move[file_to] = False else: log("Pull %s" % file_to) if not options.get("--dry-run"): for f in file_to_move.keys(): file_from = os.path.join(tempdir, f) file_to = f dir_to = os.path.dirname(f) if dir_to == "": dir_to = "." elif not os.path.exists(dir_to): os.makedirs(dir_to) if file_to_move[file_to]: move(file_from, dir_to) log(msg)
def pull_mda(self, mda_id, options={}, msg=None): if not msg: msg = "Analysis %s pulled" % mda_id framework = FRAMEWORK_OPENMDAO if options.get("--gemseo"): framework = FRAMEWORK_GEMSEO param = "" if options.get("--run-ops"): param += "&with_runops=true" if options.get("--server"): if framework == FRAMEWORK_OPENMDAO: param += "&with_server=true" else: warn( "Can not generate server with GEMSEO framework. --server is ignored" ) if options.get("--egmdo"): if framework == FRAMEWORK_OPENMDAO: param += "&with_egmdo=true" else: warn("Can not generate EGMDO with GEMSEO framework. --egmdo is ignored") if options.get("--test-units"): if framework == FRAMEWORK_OPENMDAO: param += "&with_unittests=true" else: warn( "Can not generate tests with GEMSEO framework. --test-units is ignored" ) if param: param = "?" + param[1:] format_query = framework if options.get("--package"): format_query += "_pkg" url = self.endpoint( ("/api/v1/analyses/{}/exports/new.{}{}".format(mda_id, format_query, param)) ) resp = self.session.get(url, headers=self.headers, stream=True) resp.raise_for_status() name = None with tempfile.NamedTemporaryFile(suffix=".zip", mode="wb", delete=False) as fd: for chunk in resp.iter_content(chunk_size=128): fd.write(chunk) name = fd.name zipf = zipfile.ZipFile(name, "r") tempdir = tempfile.mkdtemp(suffix="wop", dir=tempfile.tempdir) zipf.extractall(tempdir) filenames = zipf.namelist() zipf.close() file_to_move = {} if options.get("--dry-run"): cmd = "Pull" if options.get("--update"): cmd = "Update" info( "*******************************************************************\n" f"* {cmd} is run in DRY RUN mode (actions are listed but not done) *\n" "*******************************************************************" ) for f in filenames: file_to = f file_to_move[file_to] = True if os.path.exists(file_to): if options.get("--force"): log("Update %s" % file_to) if options.get("--dry-run"): file_to_move[file_to] = False else: os.remove(file_to) elif options.get("--update"): if is_run_script_file(f) and not options.get("--run-ops"): info( f"Keep existing {file_to} (remove it or use -r to override)" ) file_to_move[file_to] = False continue if is_test_file(f) and not options.get("--test-units"): file_to_move[file_to] = False continue if is_user_file(f): file_to_move[file_to] = False # Have to update user analysis main file when switching frameworks url = self.endpoint(f"/api/v1/analyses/{mda_id}") resp = self.session.get(url, headers=self.headers, stream=True) resp.raise_for_status() mda_name = snakize(resp.json()["name"]) if is_analysis_user_file(mda_name, f) and is_framework_switch( framework ): file_to_move[file_to] = True else: continue log(f"Update {file_to}") if not options.get("--dry-run"): os.remove(file_to) else: warn( f"File {file_to} in the way: remove it or use --force to override" ) file_to_move[file_to] = False else: log(f"Pull {file_to}") if not options.get("--dry-run"): for f in file_to_move.keys(): file_from = os.path.join(tempdir, f) file_to = f dir_to = os.path.dirname(f) if dir_to == "": dir_to = "." elif not os.path.exists(dir_to): os.makedirs(dir_to) if file_to_move[file_to]: move(file_from, dir_to) save_state( self._url, mda_id, framework, MODE_PACKAGE if options.get("--package") else MODE_PLAIN, ) log(msg)
def get_status(self): connected = self.is_connected() whatsopt_url = get_whatsopt_url() or self.url if connected: info("You are logged in {}".format(self.url)) else: info("You are not connected.") mda_id = None try: mda_id = get_analysis_id() except ValueError as err: warn(str(err)) if mda_id: if connected and whatsopt_url == self.url: info("Found local analysis code (id=#{})".format(mda_id)) # connected to the right server from which the analysis was pulled url = self.endpoint("/api/v1/analyses/{}".format(mda_id)) resp = self.session.get(url, headers=self.headers) if resp.ok: mda = resp.json() if is_based_on(FRAMEWORK_GEMSEO): mda["framework"] = "GEMSEO" elif is_based_on(FRAMEWORK_OPENMDAO): mda["framework"] = "OpenMDAO" else: # should not happen raise ValueError( "No framework detected. Check your *_base.py files." ) headers = ["id", "name", "created_at", "owner_email", "framework"] data = [[mda[k] for k in headers]] log(tabulate(data, headers)) else: error("Analysis not found on the server anymore (probably deleted)") log( " (use 'wop push <analysis.py>' to push from an OpenMDAO code to the server)" ) else: info( "Found local analysis code (id=#{}) " "pulled from {}".format(mda_id, whatsopt_url) ) if connected: # connected to another server with a pulled analysis warn("You are connected to a different server") log( " (use 'wop push <analysis.py>' to push the local " "analysis in the current server {})".format(self.url) ) log( " (use 'wop logout' and 'wop login {}' " "to log in to the right server)".format(whatsopt_url) ) else: log(" (use 'wop login {}' command to log in)".format(whatsopt_url)) else: info("No local analysis found") if connected: log( " (use 'wop list' and 'wop pull <id>' to retrieve an existing analysis)\n" " (use 'wop push <analysis.py>' to push from an OpenMDAO code to the server)" ) log("")