def test_read_json_cannot_read(test_json, mocker): error = "foobar" mocker.patch("json.loads", side_effect=ValueError(error)) with pytest.raises(MoulinetteError) as exception: read_json(str(test_json)) translation = m18n.g("corrupted_json", ressource=str(test_json), error=error) expected_msg = translation.format(ressource=str(test_json), error=error) assert expected_msg in str(exception)
def text_write_list_to_json(tmp_path): new_file = tmp_path / "newfile.json" dummy_list = ["foo", "bar", "baz"] write_to_json(str(new_file), dummy_list) _json = read_json(str(new_file)) assert _json == ["foo", "bar", "baz"]
def text_write_dict_to_json(): dummy_dict = {"foo": 42, "bar": ["a", "b", "c"]} write_to_json(TMP_TEST_FILE, dummy_dict) j = read_json(TMP_TEST_FILE) assert "foo" in j.keys() assert "bar" in j.keys() assert j["foo"] == 42 assert j["bar"] == ["a", "b", "c"] assert read_file(TMP_TEST_FILE) == "foo\nbar\nyolo\nswag"
def test_apps_catalog_load_with_oudated_api_version(mocker): # Initialize ... _initialize_apps_catalog_system() # Update with requests_mock.Mocker() as m: mocker.spy(m18n, "n") m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG) _update_apps_catalog() # Cache shouldn't be empty anymore empty assert glob.glob(APPS_CATALOG_CACHE + "/*") # Tweak the cache to replace the from_api_version with a different one for cache_file in glob.glob(APPS_CATALOG_CACHE + "/*"): cache_json = read_json(cache_file) assert cache_json["from_api_version"] == APPS_CATALOG_API_VERSION cache_json["from_api_version"] = 0 write_to_json(cache_file, cache_json) # Update with requests_mock.Mocker() as m: # Mock the server response with a dummy apps catalog m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG) mocker.spy(m18n, "n") app_dict = _load_apps_catalog()["apps"] m18n.n.assert_any_call("apps_catalog_update_success") assert "foo" in app_dict.keys() assert "bar" in app_dict.keys() # Check that we indeed have the new api number in cache for cache_file in glob.glob(APPS_CATALOG_CACHE + "/*"): cache_json = read_json(cache_file) assert cache_json["from_api_version"] == APPS_CATALOG_API_VERSION
def test_write_dict_to_json(tmp_path): new_file = tmp_path / "newfile.json" dummy_dict = {"foo": 42, "bar": ["a", "b", "c"]} write_to_json(str(new_file), dummy_dict) _json = read_json(str(new_file)) assert "foo" in _json.keys() assert "bar" in _json.keys() assert _json["foo"] == 42 assert _json["bar"] == ["a", "b", "c"]
def get_cached_report(id_, item=None, warn_if_no_cache=True): cache_file = Diagnoser.cache_file(id_) if not os.path.exists(cache_file): if warn_if_no_cache: logger.warning(m18n.n("diagnosis_no_cache", category=id_)) report = { "id": id_, "cached_for": -1, "timestamp": -1, "items": [] } else: report = read_json(cache_file) report["timestamp"] = int(os.path.getmtime(cache_file)) if item: for report_item in report["items"]: if report_item.get("meta") == item: return report_item return {} else: return report
def is_vulnerable_to_meltdown(self): # meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754 # We use a cache file to avoid re-running the script so many times, # which can be expensive (up to around 5 seconds on ARM) # and make the admin appear to be slow (c.f. the calls to diagnosis # from the webadmin) # # The cache is in /tmp and shall disappear upon reboot # *or* we compare it to dpkg.log modification time # such that it's re-ran if there was package upgrades # (e.g. from yunohost) cache_file = "/tmp/yunohost-meltdown-diagnosis" dpkg_log = "/var/log/dpkg.log" if os.path.exists(cache_file): if not os.path.exists(dpkg_log) or os.path.getmtime( cache_file) > os.path.getmtime(dpkg_log): self.logger_debug( "Using cached results for meltdown checker, from %s" % cache_file) return read_json(cache_file)[0]["VULNERABLE"] # script taken from https://github.com/speed47/spectre-meltdown-checker # script commit id is store directly in the script SCRIPT_PATH = "/usr/lib/moulinette/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh" # '--variant 3' corresponds to Meltdown # example output from the script: # [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}] try: self.logger_debug("Running meltdown vulnerability checker") call = subprocess.Popen( "bash %s --batch json --variant 3" % SCRIPT_PATH, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) # TODO / FIXME : here we are ignoring error messages ... # in particular on RPi2 and other hardware, the script complains about # "missing some kernel info (see -v), accuracy might be reduced" # Dunno what to do about that but we probably don't want to harass # users with this warning ... output, _ = call.communicate() output = output.decode() assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode # If there are multiple lines, sounds like there was some messages # in stdout that are not json >.> ... Try to get the actual json # stuff which should be the last line output = output.strip() if "\n" in output: self.logger_debug("Original meltdown checker output : %s" % output) output = output.split("\n")[-1] CVEs = json.loads(output) assert len(CVEs) == 1 assert CVEs[0]["NAME"] == "MELTDOWN" except Exception as e: import traceback traceback.print_exc() self.logger_warning( "Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" % e) raise Exception("Command output for failed meltdown check: '%s'" % output) self.logger_debug( "Writing results from meltdown checker to cache file, %s" % cache_file) write_to_json(cache_file, CVEs) return CVEs[0]["VULNERABLE"]
def test_read_json_badjson(): os.system("echo '{ not valid json lol }' > %s" % TMP_TEST_JSON) with pytest.raises(MoulinetteError): read_json(TMP_TEST_JSON)
def test_read_json(): content = read_json(TMP_TEST_JSON) assert "foo" in content.keys() assert content["foo"] == "bar"
def text_write_list_to_json(): dummy_list = ["foo", "bar", "baz"] write_to_json(TMP_TEST_FILE, dummy_list) j = read_json(TMP_TEST_FILE) assert j == ["foo", "bar", "baz"]
def _hook_exec_bash(path, args, chdir, env, return_format, loggers): from moulinette.utils.process import call_async_output # Construct command variables cmd_args = "" if args and isinstance(args, list): # Concatenate escaped arguments cmd_args = " ".join(shell_quote(s) for s in args) if not chdir: # use the script directory as current one chdir, cmd_script = os.path.split(path) cmd_script = "./{0}".format(cmd_script) else: cmd_script = path # Add Execution dir to environment var if env is None: env = {} env["YNH_CWD"] = chdir env["YNH_INTERFACE"] = msettings.get("interface") stdreturn = os.path.join(tempfile.mkdtemp(), "stdreturn") with open(stdreturn, "w") as f: f.write("") env["YNH_STDRETURN"] = stdreturn # use xtrace on fd 7 which is redirected to stdout env["BASH_XTRACEFD"] = "7" cmd = '/bin/bash -x "{script}" {args} 7>&1' cmd = cmd.format(script=cmd_script, args=cmd_args) logger.debug("Executing command '%s'" % cmd) _env = os.environ.copy() _env.update(env) returncode = call_async_output(cmd, loggers, shell=True, cwd=chdir, env=_env) raw_content = None try: with open(stdreturn, "r") as f: raw_content = f.read() returncontent = {} if return_format == "json": if raw_content != "": try: returncontent = read_json(stdreturn) except Exception as e: raise YunohostError( "hook_json_return_error", path=path, msg=str(e), raw_content=raw_content, ) elif return_format == "plain_dict": for line in raw_content.split("\n"): if "=" in line: key, value = line.strip().split("=", 1) returncontent[key] = value else: raise YunohostError( "Expected value for return_format is either 'json' or 'plain_dict', got '%s'" % return_format) finally: stdreturndir = os.path.split(stdreturn)[0] os.remove(stdreturn) os.rmdir(stdreturndir) return returncode, returncontent
def test_read_json(test_json): content = read_json(str(test_json)) assert "foo" in content.keys() assert content["foo"] == "bar"