Example #1
0
    def login(self, api_key=None, echo=None):
        already_logged = False
        if api_key:
            self.api_key = api_key
        elif os.path.exists(API_KEY_FILENAME):
            already_logged = True
            self.api_key = self._read_api_key()
        else:
            self.api_key = self._ask_and_write_api_key()
        self.headers = {
            "Authorization": "Token token=" + self.api_key,
            "User-Agent": "wop/{}".format(__version__),
        }

        url = self._endpoint("/api/v1/analyses")
        resp = self.session.get(url, headers=self.headers)
        if not api_key and already_logged and not resp.ok:
            self.logout(
                echo=False
            )  # log out silently, suppose one was logged on another server
            resp = self.login(api_key, echo)
        resp.raise_for_status()
        if echo:
            log("Successfully logged into WhatsOpt (%s)" % self.url)
        return resp
Example #2
0
def convert_sqlite_to_csv(sqlite_filename, basename):
    cr = om.CaseReader(sqlite_filename)
    driver_cases = cr.list_cases("driver", out_stream=None)
    design_vars = sorted(cr.get_case(driver_cases[0]).get_design_vars())
    driver_name, cases, statuses = load_sqlite_file(sqlite_filename)
    all_vars = [v["varname"] for v in cases]
    out_vars = sorted(list(set(all_vars) - set(design_vars)))

    fieldnames = []
    values = {}
    nb_cases = len(statuses)
    for name in design_vars + out_vars:
        for case in cases:
            if name == case["varname"]:
                if case["coord_index"] == -1:
                    fieldnames.append(name)
                    values[name] = case["values"]
                else:
                    name_i = f"{name}[{case['coord_index']}]"
                    fieldnames.append(name_i)
                    values[name_i] = case["values"]

    outfile = f"{basename}.csv"
    with open(outfile, "w") as f:
        writer = csv.writer(f, delimiter=";", lineterminator="\n")
        writer.writerow(["success"] + fieldnames)

        for i in range(nb_cases):
            data = [statuses[i]]
            for var in fieldnames:
                data.append(values[var][i])
            writer.writerow(data)
    log(f"Convert {nb_cases} cases ({driver_name}) to {outfile}")
Example #3
0
    def serve(port):
        try:
            import thrift
        except ImportError:
            error(
                "Apache Thrift is not installed. You can install it with : 'pip install thrift'"
            )
            sys.exit(-1)
        try:
            import os, sys

            # insert current dir first as another run_server exists under whatsopt/services
            sys.path.insert(0, os.getcwd())
            from run_server import run_server
        except ImportError as err:
            print(str(err))
            error("Server not found!")
            try:
                mda_id = get_analysis_id()
                if mda_id:
                    log(
                        f"  (use 'wop update -s' to generate server for current analysis #{mda_id})"
                    )
                else:
                    warn("No local analysis found")
                    log(
                        "  (use 'wop pull -s <id>' to generate server for the analysis #id)"
                    )
            except ValueError as err:
                warn(str(err))
            sys.exit(-1)
        run_server(port)
Example #4
0
    def push_mda(self, problem, options):
        scalar = options.get("--scalar")
        depth = options.get("--depth")
        push_cmd = UniversalPushCommand(problem, depth, scalar)

        mda_attrs = push_cmd.get_mda_attributes(
            problem.model, push_cmd.tree, use_depth=True
        )

        if mda_attrs["name"] == "Group" and options.get("--pyfilename"):
            mda_attrs["name"] = os.path.splitext(
                to_camelcase(os.path.basename(options.get("--pyfilename")))
            )[0]

        if options["--dry-run"]:
            log(json.dumps(mda_attrs, indent=2))
        else:
            suffix = ""
            if options.get("--xdsm"):
                suffix = ".xdsm"
            url = self.endpoint("/api/v1/analyses{}".format(suffix))
            resp = self.session.post(
                url, headers=self.headers, json={"analysis": mda_attrs}
            )
            resp.raise_for_status()
            log("Analysis %s pushed" % mda_attrs["name"])
            return resp.json()
Example #5
0
 def check_versions(self):
     url = self.endpoint("/api/v1/versioning")
     resp = self.session.get(url, headers=self.headers)
     resp.raise_for_status()
     version = resp.json()
     log("WhatsOpt {} requires wop {}".format(version["whatsopt"], version["wop"]))
     log(f"You are using wop {__version__}")
Example #6
0
 def logout(self, echo=True):
     if os.path.exists(API_KEY_FILENAME):
         os.remove(API_KEY_FILENAME)
     if os.path.exists(URL_FILENAME):
         os.remove(URL_FILENAME)
     if echo:
         log("Sucessfully logged out from WhatsOpt (%s)" % self.url)
Example #7
0
 def logout(echo=True):
     if os.path.exists(API_KEY_FILENAME):
         os.remove(API_KEY_FILENAME)
     if os.path.exists(URL_FILENAME):
         os.remove(URL_FILENAME)
     if echo:
         log("Sucessfully logged out from WhatsOpt")
         log("")
Example #8
0
 def check_versions(self):
     url = self._endpoint("/api/v1/versioning")
     resp = self.session.get(url, headers=self.headers)
     resp.raise_for_status()
     version = resp.json()
     log(
         "WhatsOpt:{} recommended wop:{}".format(version["whatsopt"], version["wop"])
     )
     log("current wop:{}".format(__version__))
Example #9
0
    def serve(self):
        from subprocess import call

        try:
            import thrift
        except ImportError:
            log(
                "Apache Thrift is not installed. You can install it with : 'pip install thrift'"
            )
            exit(-1)
        call(["python", "run_server.py"])
Example #10
0
 def _ask_and_write_api_key(self):
     log("You have to set your API key.")
     log("You can get it in your profile page on WhatsOpt (%s)." % self.url)
     info(
         "Please, copy/paste your API key below then hit return (characters are hidden)."
     )
     api_key = getpass.getpass(prompt="Your API key: ")
     if not os.path.exists(WHATSOPT_DIRNAME):
         os.makedirs(WHATSOPT_DIRNAME)
     with open(API_KEY_FILENAME, "w") as f:
         f.write(api_key)
     return api_key
Example #11
0
def print_cases(cases, statuses):
    headers = ["success"]
    n = len(cases[0]["values"]) if cases else 0
    for case in cases:
        h = case["varname"]
        if case["coord_index"] > -1:
            h += "[{}]".format(case["coord_index"])
        headers.append(h)
    data = []
    for i in range(n):
        data.append([statuses[i]] + [case["values"][i] for case in cases])
    log(tabulate(data, headers))
Example #12
0
 def list_analyses(self):
     url = self._endpoint("/api/v1/analyses")
     resp = self.session.get(url, headers=self.headers)
     if resp.ok:
         mdas = resp.json()
         headers = ["id", "name", "created at"]
         data = []
         for mda in mdas:
             date = mda.get("created_at", None)
             data.append([mda["id"], mda["name"], date])
         info("Server: {}".format(self._url))
         log(tabulate(data, headers))
     else:
         resp.raise_for_status()
Example #13
0
 def push_json(self, filename):
     with open(filename, "rb") as f:
         attrs = json.load(f)
     if "analyses_attributes" in attrs:  # project detection
         url = self.endpoint("/api/v1/design_projects")
         key = "Project"
     else:
         url = self.endpoint("/api/v1/analyses")
         key = "Analysis"
     params = {}
     params[key.lower()] = attrs
     resp = self.session.post(url, headers=self.headers, json=params)
     resp.raise_for_status()
     log("{} {} pushed".format(key, attrs["name"]))
Example #14
0
    def push_mda(self, problem, options):
        name = problem.model.__class__.__name__
        scalar_format = options["--scalar-format"]
        push_cmd = PushCommand(problem, scalar_format)
        mda_attrs = push_cmd.get_mda_attributes(problem.model, push_cmd.tree)

        if options["--dry-run"]:
            log(json.dumps(mda_attrs, indent=2))
        else:
            url = self._endpoint("/api/v1/analyses")
            resp = self.session.post(
                url, headers=self.headers, json={"analysis": mda_attrs}
            )
            resp.raise_for_status()
            log("Analysis %s pushed" % name)
Example #15
0
def load_sqlite_file(filename):
    log("Load {}...".format(filename))
    reader = CaseReader(filename)
    cases = reader.list_cases("driver", out_stream=None)
    if len(cases) == 0:
        raise Exception("No case found in {}".format(filename))

    # find driver name
    driver_first_coord = cases[0]
    m = re.match(r"\w+:(\w+)|.*", driver_first_coord)
    name = os.path.splitext(os.path.basename(filename))[0]
    if m:
        name = m.group(1)

    # format cases and statuses
    # cases : [{"varname": varname, "coord_index": idx, "values": [...]}*]
    cases, statuses = _format_upload_cases(reader)
    return name, cases, statuses
Example #16
0
 def list_analyses(self, all=False, project_query=None):
     param = ""
     if all:
         param = "?all=true"
     elif project_query:
         param = "?design_project_query={}".format(project_query)
     url = self.endpoint("/api/v1/analyses" + param)
     resp = self.session.get(url, headers=self.headers)
     if resp.ok:
         mdas = resp.json()
         headers = ["id", "name", "created at"]
         data = []
         for mda in mdas:
             date = mda.get("created_at", None)
             data.append([mda["id"], mda["name"], date])
         info("Server: {}".format(self._url))
         log(tabulate(data, headers))
         log("")
     else:
         resp.raise_for_status()
Example #17
0
    def login(self, api_key=None, echo=None):
        debug("login()")
        already_logged = False
        if api_key:
            self.api_key = api_key
        elif os.path.exists(API_KEY_FILENAME):
            already_logged = True
            self.api_key = self._read_api_key()
        else:
            debug("Ask for API key")
            self.api_key = self._ask_and_write_api_key()
        ok = self._test_connection(api_key)

        if not api_key and already_logged and not ok:
            # try to propose re-login
            self.logout(
                echo=False
            )  # log out silently, one may be logged on another server
            # save url again
            with open(URL_FILENAME, "w") as f:
                f.write(self._url)
            ok = self.login(api_key, echo=False)

        if not ok and echo:
            error("Login to WhatsOpt ({}) failed.".format(self.url))
            log("")
            sys.exit(-1)

        if echo:
            log("Successfully logged into WhatsOpt (%s)" % self.url)
            log("")
        return ok
Example #18
0
 def upload_vars_init(self, problem, options):
     mda_id = get_analysis_id() if get_analysis_id() else options["--analysis-id"]
     if mda_id is None:
         error("Unknown analysis with id={}".format(mda_id))
         sys.exit(-1)
     parameters = []
     headers = ["variable", "init value"]
     data = []
     for s in problem.model._subsystems_myproc:
         if isinstance(s, IndepVarComp):
             for absname in s._var_abs2meta["output"]:
                 name = find_indep_var_name(problem, absname)
                 value = s._outputs._views[absname][:]
                 if isinstance(value, np.ndarray):
                     value = str(value.tolist())
                 parameters.append({"varname": name, "value": value})
     data = [[p["varname"], p["value"]] for p in parameters]
     params = {"parameterization": {"parameters": parameters}}
     log(tabulate(data, headers))
     if not options["--dry-run"]:
         url = self.endpoint(f"/api/v1/analyses/{mda_id}/parameterization")
         resp = self.session.put(url, headers=self.headers, json=params)
         resp.raise_for_status()
         log("Variables init values uploaded")
Example #19
0
 def upload_parameters(self, problem, options):
     mda_id = self.get_analysis_id()
     if mda_id is None:
         error("Unknown analysis with id={}".format(mda_id))
         exit(-1)
     parameters = []
     headers = ["parameter", "value"]
     data = []
     for s in problem.model._subsystems_myproc:
         if isinstance(s, IndepVarComp):
             for absname in s._var_allprocs_abs_names["output"]:
                 name = s._var_allprocs_abs2prom["output"][absname]
                 value = s._outputs._views[absname][:]
                 if isinstance(value, np.ndarray):
                     value = str(value.tolist())
                 parameters.append({"varname": name, "value": value})
     data = [[p["varname"], p["value"]] for p in parameters]
     params = {"parameterization": {"parameters": parameters}}
     log(tabulate(data, headers))
     if not options["--dry-run"]:
         url = self._endpoint(("/api/v1/analyses/%s/parameterization") % mda_id)
         resp = self.session.put(url, headers=self.headers, json=params)
         resp.raise_for_status()
         log("Parameters uploaded")
Example #20
0
    def show_mda(self, analysis_id, pbfile, name, outfile, batch, depth):
        options = {
            "--xdsm": True,
            "--name": name,
            "--dry-run": False,
            "--depth": depth,
        }
        xdsm = None
        if pbfile:
            start = time.time()
            try:
                info("XDSM info retrieval...")
                self.push_mda_cmd(pbfile, options)
            except AnalysisPushedException as pushed:
                xdsm = pushed.xdsm
            end = time.time()
            log("Retrieved in {:.2f}s".format(end - start))
            source = os.path.basename(pbfile)
        else:
            mda_id = analysis_id or get_analysis_id()
            if mda_id is None:
                error(
                    "Unknown analysis with id={} (maybe use wop pull <analysis-id>)".format(
                        mda_id
                    )
                )
                sys.exit(-1)
            url = self.endpoint("/api/v1/analyses/{}.xdsm".format(mda_id))
            resp = self.session.get(url, headers=self.headers)
            resp.raise_for_status()
            xdsm = resp.json()
            source = f"{mda_id}@{self._url}"

        info("XDSM building...")
        generate_xdsm_html(source, xdsm, outfile)
        if pbfile:
            log("XDSM of analysis from {} generated in {}".format(pbfile, outfile))
        else:
            log("XDSM of analysis {} generated in {}".format(mda_id, outfile))
        if not batch:
            webview(outfile)
Example #21
0
    def pull_mda(self, mda_id, options={}, msg=None):
        if not msg:
            msg = "Analysis %s pulled" % mda_id

        framework = FRAMEWORK_OPENMDAO
        if options.get("--gemseo"):
            framework = FRAMEWORK_GEMSEO

        param = ""
        if options.get("--run-ops"):
            param += "&with_runops=true"
        if options.get("--server"):
            if framework == FRAMEWORK_OPENMDAO:
                param += "&with_server=true"
            else:
                warn(
                    "Can not generate server with GEMSEO framework. --server is ignored"
                )
        if options.get("--egmdo"):
            if framework == FRAMEWORK_OPENMDAO:
                param += "&with_egmdo=true"
            else:
                warn("Can not generate EGMDO with GEMSEO framework. --egmdo is ignored")
        if options.get("--test-units"):
            if framework == FRAMEWORK_OPENMDAO:
                param += "&with_unittests=true"
            else:
                warn(
                    "Can not generate tests with GEMSEO framework. --test-units is ignored"
                )
        if param:
            param = "?" + param[1:]

        format_query = framework
        if options.get("--package"):
            format_query += "_pkg"

        url = self.endpoint(
            ("/api/v1/analyses/{}/exports/new.{}{}".format(mda_id, format_query, param))
        )
        resp = self.session.get(url, headers=self.headers, stream=True)
        resp.raise_for_status()
        name = None
        with tempfile.NamedTemporaryFile(suffix=".zip", mode="wb", delete=False) as fd:
            for chunk in resp.iter_content(chunk_size=128):
                fd.write(chunk)
            name = fd.name
        zipf = zipfile.ZipFile(name, "r")
        tempdir = tempfile.mkdtemp(suffix="wop", dir=tempfile.tempdir)
        zipf.extractall(tempdir)
        filenames = zipf.namelist()
        zipf.close()
        file_to_move = {}
        if options.get("--dry-run"):
            cmd = "Pull"
            if options.get("--update"):
                cmd = "Update"
            info(
                "*******************************************************************\n"
                f"* {cmd} is run in DRY RUN mode (actions are listed but not done) *\n"
                "*******************************************************************"
            )
        for f in filenames:
            file_to = f
            file_to_move[file_to] = True
            if os.path.exists(file_to):
                if options.get("--force"):
                    log("Update %s" % file_to)
                    if options.get("--dry-run"):
                        file_to_move[file_to] = False
                    else:
                        os.remove(file_to)
                elif options.get("--update"):
                    if is_run_script_file(f) and not options.get("--run-ops"):
                        info(
                            f"Keep existing {file_to} (remove it or use -r to override)"
                        )
                        file_to_move[file_to] = False
                        continue
                    if is_test_file(f) and not options.get("--test-units"):
                        file_to_move[file_to] = False
                        continue
                    if is_user_file(f):
                        file_to_move[file_to] = False

                        # Have to update user analysis main file when switching frameworks
                        url = self.endpoint(f"/api/v1/analyses/{mda_id}")
                        resp = self.session.get(url, headers=self.headers, stream=True)
                        resp.raise_for_status()
                        mda_name = snakize(resp.json()["name"])
                        if is_analysis_user_file(mda_name, f) and is_framework_switch(
                            framework
                        ):
                            file_to_move[file_to] = True
                        else:
                            continue
                    log(f"Update {file_to}")
                    if not options.get("--dry-run"):
                        os.remove(file_to)
                else:
                    warn(
                        f"File {file_to} in the way: remove it or use --force to override"
                    )
                    file_to_move[file_to] = False
            else:
                log(f"Pull {file_to}")
        if not options.get("--dry-run"):
            for f in file_to_move.keys():
                file_from = os.path.join(tempdir, f)
                file_to = f
                dir_to = os.path.dirname(f)
                if dir_to == "":
                    dir_to = "."
                elif not os.path.exists(dir_to):
                    os.makedirs(dir_to)
                if file_to_move[file_to]:
                    move(file_from, dir_to)
            save_state(
                self._url,
                mda_id,
                framework,
                MODE_PACKAGE if options.get("--package") else MODE_PLAIN,
            )
            log(msg)
Example #22
0
    def upload(
        self,
        filename,
        driver_kind=None,
        analysis_id=None,
        operation_id=None,
        dry_run=False,
        outvar_count=1,
        only_success=False,
        parallel=False,
    ):
        from socket import gethostname

        mda_id = get_analysis_id() if not analysis_id else analysis_id

        name = cases = statuses = None
        if (
            os.path.basename(filename) == "run_parameters_init.py"
            or os.path.basename(filename) == "mda_init.py"
        ):
            self.upload_vars_init_cmd(
                filename, {"--dry-run": dry_run, "--analysis-id": mda_id}
            )
        elif filename.endswith(".csv"):
            name, cases, statuses = load_from_csv(filename)
        elif filename.endswith(".sqlite"):
            name, cases, statuses = load_from_sqlite(filename, parallel)
        elif filename.endswith(".hdf5"):
            name, cases, statuses = load_from_hdf5(filename)
        else:
            error(
                f"Can not upload file {filename}: extension not recognized"
                " (should be either .csv, .sqlite or .hdf5)"
            )
            exit(-1)

        if only_success:
            for c in cases:
                c["values"] = [
                    val for i, val in enumerate(c["values"]) if statuses[i] > 0
                ]
            statuses = [1 for s in statuses if s > 0]

        for c in cases:
            c["values"] = np.nan_to_num(np.array(c["values"])).tolist()

        if dry_run:
            print_cases(cases, statuses)
            sys.exit()

        resp = None
        if operation_id:
            url = self.endpoint(("/api/v1/operations/%s") % operation_id)
            operation_params = {"cases": cases}
            resp = self.session.patch(
                url, headers=self.headers, json={"operation": operation_params}
            )
        else:
            if mda_id:
                url = self.endpoint(("/api/v1/analyses/%s/operations") % mda_id)
            else:
                url = self.endpoint("/api/v1/operations")
            if driver_kind:
                driver = "user_{}_algo".format(driver_kind)
            else:
                # suppose name well-formed <lib>-<doe|optimizer|screening>-<algoname>
                # otherwise it will default to doe
                m = re.match(r"(\w+)_(doe|optimizer|screening)_(\w+)", name.lower())
                if m:
                    driver = name.lower()
                else:
                    driver = "user_data_uploading"
            operation_params = {
                "name": name,
                "driver": driver,
                "host": gethostname(),
                "cases": cases,
                "success": statuses,
            }
            params = {"operation": operation_params}
            if outvar_count > 0 and outvar_count < len(cases):
                params["outvar_count_hint"] = outvar_count
            resp = self.session.post(url, headers=self.headers, json=params)
        resp.raise_for_status()
        log("Results data from {} uploaded with driver {}".format(filename, driver))
Example #23
0
    def get_status(self):
        connected = self.is_connected()
        whatsopt_url = get_whatsopt_url() or self.url
        if connected:
            info("You are logged in {}".format(self.url))
        else:
            info("You are not connected.")
        mda_id = None
        try:
            mda_id = get_analysis_id()
        except ValueError as err:
            warn(str(err))
        if mda_id:
            if connected and whatsopt_url == self.url:
                info("Found local analysis code (id=#{})".format(mda_id))
                # connected to the right server from which the analysis was pulled
                url = self.endpoint("/api/v1/analyses/{}".format(mda_id))
                resp = self.session.get(url, headers=self.headers)

                if resp.ok:
                    mda = resp.json()
                    if is_based_on(FRAMEWORK_GEMSEO):
                        mda["framework"] = "GEMSEO"
                    elif is_based_on(FRAMEWORK_OPENMDAO):
                        mda["framework"] = "OpenMDAO"
                    else:  # should not happen
                        raise ValueError(
                            "No framework detected. Check your *_base.py files."
                        )
                    headers = ["id", "name", "created_at", "owner_email", "framework"]
                    data = [[mda[k] for k in headers]]
                    log(tabulate(data, headers))
                else:
                    error("Analysis not found on the server anymore (probably deleted)")
                    log(
                        "  (use 'wop push <analysis.py>' to push from an OpenMDAO code to the server)"
                    )
            else:
                info(
                    "Found local analysis code (id=#{}) "
                    "pulled from {}".format(mda_id, whatsopt_url)
                )
                if connected:
                    # connected to another server with a pulled analysis
                    warn("You are connected to a different server")
                    log(
                        "  (use 'wop push <analysis.py>' to push the local "
                        "analysis in the current server {})".format(self.url)
                    )
                    log(
                        "  (use 'wop logout' and 'wop login {}' "
                        "to log in to the right server)".format(whatsopt_url)
                    )
                else:
                    log("  (use 'wop login {}' command to log in)".format(whatsopt_url))
        else:
            info("No local analysis found")
            if connected:
                log(
                    "  (use 'wop list' and 'wop pull <id>' to retrieve an existing analysis)\n"
                    "  (use 'wop push <analysis.py>' to push from an OpenMDAO code to the server)"
                )
        log("")
Example #24
0
    def upload(
        self,
        filename,
        driver_kind=None,
        analysis_id=None,
        operation_id=None,
        dry_run=False,
        outvar_count=1,
        only_success=False,
    ):
        from socket import gethostname

        mda_id = self.get_analysis_id() if not analysis_id else analysis_id

        name = cases = statuses = None
        if filename == "run_parameters_init.py":
            if mda_id is None:
                error("Unknown analysis with id={}".format(mda_id))
                exit(-1)
            self.execute(
                "run_analysis.py", self.upload_parameters_cmd, {"--dry-run": dry_run}
            )
            exit()
        elif filename.endswith(".csv"):
            name, cases, statuses = load_from_csv(filename)
        else:
            name, cases, statuses = load_from_sqlite(filename)

        if only_success:
            for c in cases:
                c["values"] = [
                    val for i, val in enumerate(c["values"]) if statuses[i] > 0
                ]
            statuses = [1 for s in statuses if s > 0]

        for c in cases:
            c["values"] = np.nan_to_num(np.array(c["values"])).tolist()

        if dry_run:
            print_cases(cases, statuses)
            exit()

        resp = None
        if operation_id:
            url = self._endpoint(("/api/v1/operations/%s") % operation_id)
            operation_params = {"cases": cases}
            resp = self.session.patch(
                url, headers=self.headers, json={"operation": operation_params}
            )
        else:
            if mda_id:
                url = self._endpoint(("/api/v1/analyses/%s/operations") % mda_id)
            else:
                url = self._endpoint("/api/v1/operations")
            if driver_kind:
                driver = "user_{}_algo".format(driver_kind)
            else:
                if name == "LHS":
                    driver = "smt_doe_lhs"
                elif name == "Morris":
                    driver = "salib_doe_morris"
                elif name == "SLSQP":
                    driver = "scipy_optimizer_slsqp"
                else:
                    # suppose name well-formed <lib>-<doe|optimizer|screening>-<algoname>
                    # otherwise it will default to doe
                    m = re.match(r"(\w+)_(doe|optimizer|screening)_(\w+)", name.lower())
                    if m:
                        driver = name.lower()
                    else:
                        driver = "user_defined_algo"
            operation_params = {
                "name": name,
                "driver": driver,
                "host": gethostname(),
                "cases": cases,
                "success": statuses,
            }
            params = {"operation": operation_params}
            if outvar_count > 0 and outvar_count < len(cases):
                params["outvar_count_hint"] = outvar_count
            resp = self.session.post(url, headers=self.headers, json=params)
        resp.raise_for_status()
        log("Results data from {} uploaded with driver {}".format(filename, driver))
Example #25
0
 def pull_mda(self, mda_id, options={}, msg=None):
     if not msg:
         msg = "Analysis %s pulled" % mda_id
     base = ""
     param = ""
     if options.get("--server"):
         param += "&with_server=true"
     if options.get("--run-ops"):
         param += "&with_runops=true"
     if options.get("--test-units"):
         param += "&with_unittests=true"
     if param is not "":
         param = "?" + param[1:]
     url = self._endpoint(
         ("/api/v1/analyses/%s/exports/new.openmdao" + base + param) % mda_id
     )
     resp = self.session.get(url, headers=self.headers, stream=True)
     resp.raise_for_status()
     name = None
     with tempfile.NamedTemporaryFile(suffix=".zip", mode="wb", delete=False) as fd:
         for chunk in resp.iter_content(chunk_size=128):
             fd.write(chunk)
         name = fd.name
     zip = zipfile.ZipFile(name, "r")
     tempdir = tempfile.mkdtemp(suffix="wop", dir=tempfile.tempdir)
     zip.extractall(tempdir)
     filenames = zip.namelist()
     zip.close()
     file_to_move = {}
     for f in filenames:
         file_to = f
         file_to_move[file_to] = True
         if os.path.exists(file_to):
             if options.get("--force"):
                 log("Update %s" % file_to)
                 if options.get("--dry-run"):
                     file_to_move[file_to] = False
                 else:
                     os.remove(file_to)
             elif options.get("--update"):
                 if re.match(r"^run_.*\.py$", f) and not options.get("--run-ops"):
                     # keep current run scripts if any
                     info(
                         "Keep existing %s (remove it or use --run-ops to override)"
                         % file_to
                     )
                     file_to_move[file_to] = False
                     continue
                 if is_user_file(f):
                     file_to_move[file_to] = False
                     continue
                 log("Update %s" % file_to)
                 if not options.get("--dry-run"):
                     os.remove(file_to)
             else:
                 warn(
                     "File %s in the way: remove it or use --force to override"
                     % file_to
                 )
                 file_to_move[file_to] = False
         else:
             log("Pull %s" % file_to)
     if not options.get("--dry-run"):
         for f in file_to_move.keys():
             file_from = os.path.join(tempdir, f)
             file_to = f
             dir_to = os.path.dirname(f)
             if dir_to == "":
                 dir_to = "."
             elif not os.path.exists(dir_to):
                 os.makedirs(dir_to)
             if file_to_move[file_to]:
                 move(file_from, dir_to)
         log(msg)