def text_write_list_to_json(tmp_path): new_file = tmp_path / "newfile.json" dummy_list = ["foo", "bar", "baz"] write_to_json(str(new_file), dummy_list) _json = read_json(str(new_file)) assert _json == ["foo", "bar", "baz"]
def text_write_dict_to_json(): dummy_dict = {"foo": 42, "bar": ["a", "b", "c"]} write_to_json(TMP_TEST_FILE, dummy_dict) j = read_json(TMP_TEST_FILE) assert "foo" in j.keys() assert "bar" in j.keys() assert j["foo"] == 42 assert j["bar"] == ["a", "b", "c"] assert read_file(TMP_TEST_FILE) == "foo\nbar\nyolo\nswag"
def test_write_to_json_bad_perms(test_json, mocker): error = "foobar" mocker.patch("builtins.open", side_effect=IOError(error)) with pytest.raises(MoulinetteError) as exception: write_to_json(str(test_json), {"a": 1}) translation = m18n.g("cannot_write_file", file=str(test_json), error=error) expected_msg = translation.format(file=str(test_json), error=error) assert expected_msg in str(exception)
def test_write_json_to_file_exception(test_file, mocker): error = "foobar" dummy_dict = {"foo": 42, "bar": ["a", "b", "c"]} mocker.patch("builtins.open", side_effect=Exception(error)) with pytest.raises(MoulinetteError) as exception: write_to_json(str(test_file), dummy_dict) translation = m18n.g("error_writing_file", file=str(test_file), error=error) expected_msg = translation.format(file=str(test_file), error=error) assert expected_msg in str(exception)
def test_write_dict_to_json(tmp_path): new_file = tmp_path / "newfile.json" dummy_dict = {"foo": 42, "bar": ["a", "b", "c"]} write_to_json(str(new_file), dummy_dict) _json = read_json(str(new_file)) assert "foo" in _json.keys() assert "bar" in _json.keys() assert _json["foo"] == 42 assert _json["bar"] == ["a", "b", "c"]
def test_apps_catalog_load_with_oudated_api_version(mocker): # Initialize ... _initialize_apps_catalog_system() # Update with requests_mock.Mocker() as m: mocker.spy(m18n, "n") m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG) _update_apps_catalog() # Cache shouldn't be empty anymore empty assert glob.glob(APPS_CATALOG_CACHE + "/*") # Tweak the cache to replace the from_api_version with a different one for cache_file in glob.glob(APPS_CATALOG_CACHE + "/*"): cache_json = read_json(cache_file) assert cache_json["from_api_version"] == APPS_CATALOG_API_VERSION cache_json["from_api_version"] = 0 write_to_json(cache_file, cache_json) # Update with requests_mock.Mocker() as m: # Mock the server response with a dummy apps catalog m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG) mocker.spy(m18n, "n") app_dict = _load_apps_catalog()["apps"] m18n.n.assert_any_call("apps_catalog_update_success") assert "foo" in app_dict.keys() assert "bar" in app_dict.keys() # Check that we indeed have the new api number in cache for cache_file in glob.glob(APPS_CATALOG_CACHE + "/*"): cache_json = read_json(cache_file) assert cache_json["from_api_version"] == APPS_CATALOG_API_VERSION
def is_vulnerable_to_meltdown(self): # meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754 # We use a cache file to avoid re-running the script so many times, # which can be expensive (up to around 5 seconds on ARM) # and make the admin appear to be slow (c.f. the calls to diagnosis # from the webadmin) # # The cache is in /tmp and shall disappear upon reboot # *or* we compare it to dpkg.log modification time # such that it's re-ran if there was package upgrades # (e.g. from yunohost) cache_file = "/tmp/yunohost-meltdown-diagnosis" dpkg_log = "/var/log/dpkg.log" if os.path.exists(cache_file): if not os.path.exists(dpkg_log) or os.path.getmtime( cache_file) > os.path.getmtime(dpkg_log): self.logger_debug( "Using cached results for meltdown checker, from %s" % cache_file) return read_json(cache_file)[0]["VULNERABLE"] # script taken from https://github.com/speed47/spectre-meltdown-checker # script commit id is store directly in the script SCRIPT_PATH = "/usr/lib/moulinette/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh" # '--variant 3' corresponds to Meltdown # example output from the script: # [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}] try: self.logger_debug("Running meltdown vulnerability checker") call = subprocess.Popen( "bash %s --batch json --variant 3" % SCRIPT_PATH, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) # TODO / FIXME : here we are ignoring error messages ... # in particular on RPi2 and other hardware, the script complains about # "missing some kernel info (see -v), accuracy might be reduced" # Dunno what to do about that but we probably don't want to harass # users with this warning ... output, _ = call.communicate() output = output.decode() assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode # If there are multiple lines, sounds like there was some messages # in stdout that are not json >.> ... Try to get the actual json # stuff which should be the last line output = output.strip() if "\n" in output: self.logger_debug("Original meltdown checker output : %s" % output) output = output.split("\n")[-1] CVEs = json.loads(output) assert len(CVEs) == 1 assert CVEs[0]["NAME"] == "MELTDOWN" except Exception as e: import traceback traceback.print_exc() self.logger_warning( "Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" % e) raise Exception("Command output for failed meltdown check: '%s'" % output) self.logger_debug( "Writing results from meltdown checker to cache file, %s" % cache_file) write_to_json(cache_file, CVEs) return CVEs[0]["VULNERABLE"]
def write_cache(self, report): if not os.path.exists(DIAGNOSIS_CACHE): os.makedirs(DIAGNOSIS_CACHE) return write_to_json(self.cache_file, report)
def test_write_to_json_badpermissions(): switch_to_non_root_user() dummy_dict = {"foo": 42, "bar": ["a", "b", "c"]} with pytest.raises(MoulinetteError): write_to_json(TMP_TEST_FILE, dummy_dict)
def text_write_list_to_json(): dummy_list = ["foo", "bar", "baz"] write_to_json(TMP_TEST_FILE, dummy_list) j = read_json(TMP_TEST_FILE) assert j == ["foo", "bar", "baz"]
def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False, force_password=False): """ YunoHost post-install Keyword argument: domain -- YunoHost main domain ignore_dyndns -- Do not subscribe domain to a DynDNS service (only needed for nohost.me, noho.st domains) password -- YunoHost admin password """ from yunohost.utils.password import assert_password_is_strong_enough from yunohost.domain import domain_main_domain dyndns_provider = "dyndns.yunohost.org" # Do some checks at first if os.path.isfile('/etc/yunohost/installed'): raise YunohostError('yunohost_already_installed') if os.path.isdir( "/etc/yunohost/apps") and os.listdir("/etc/yunohost/apps") != []: raise YunohostError( "It looks like you're trying to re-postinstall a system that was already working previously ... If you recently had some bug or issues with your installation, please first discuss with the team on how to fix the situation instead of savagely re-running the postinstall ...", raw_msg=True) # Check password if not force_password: assert_password_is_strong_enough("admin", password) if not ignore_dyndns: # Check if yunohost dyndns can handle the given domain # (i.e. is it a .nohost.me ? a .noho.st ?) try: is_nohostme_or_nohost = _dyndns_provides(dyndns_provider, domain) # If an exception is thrown, most likely we don't have internet # connectivity or something. Assume that this domain isn't manageable # and inform the user that we could not contact the dyndns host server. except: logger.warning( m18n.n('dyndns_provider_unreachable', provider=dyndns_provider)) is_nohostme_or_nohost = False # If this is a nohost.me/noho.st, actually check for availability if is_nohostme_or_nohost: # (Except if the user explicitly said he/she doesn't care about dyndns) if ignore_dyndns: dyndns = False # Check if the domain is available... elif _dyndns_available(dyndns_provider, domain): dyndns = True # If not, abort the postinstall else: raise YunohostError('dyndns_unavailable', domain=domain) else: dyndns = False else: dyndns = False if os.system("iptables -V >/dev/null 2>/dev/null") != 0: raise YunohostError( "iptables/nftables does not seems to be working on your setup. You may be in a container or your kernel does have the proper modules loaded. Sometimes, rebooting the machine may solve the issue.", raw_msg=True) operation_logger.start() logger.info(m18n.n('yunohost_installing')) regen_conf(['nslcd', 'nsswitch'], force=True) # Initialize LDAP for YunoHost # TODO: Improve this part by integrate ldapinit into conf_regen hook tools_ldapinit() # Create required folders folders_to_create = [ '/etc/yunohost/apps', '/etc/yunohost/certs', '/var/cache/yunohost/repo', '/home/yunohost.backup', '/home/yunohost.app' ] for folder in [x for x in folders_to_create if not os.path.exists(x)]: os.makedirs(folder) # Change folders permissions os.system('chmod 755 /home/yunohost.app') # Init ssowat's conf.json.persistent if not os.path.exists('/etc/ssowat/conf.json.persistent'): write_to_json('/etc/ssowat/conf.json.persistent', {}) os.system('chmod 644 /etc/ssowat/conf.json.persistent') # Create SSL CA regen_conf(['ssl'], force=True) ssl_dir = '/usr/share/yunohost/yunohost-config/ssl/yunoCA' # (Update the serial so that it's specific to this very instance) os.system("openssl rand -hex 19 > %s/serial" % ssl_dir) commands = [ 'rm %s/index.txt' % ssl_dir, 'touch %s/index.txt' % ssl_dir, 'cp %s/openssl.cnf %s/openssl.ca.cnf' % (ssl_dir, ssl_dir), 'sed -i s/yunohost.org/%s/g %s/openssl.ca.cnf ' % (domain, ssl_dir), 'openssl req -x509 -new -config %s/openssl.ca.cnf -days 3650 -out %s/ca/cacert.pem -keyout %s/ca/cakey.pem -nodes -batch -subj /CN=%s/O=%s' % (ssl_dir, ssl_dir, ssl_dir, domain, os.path.splitext(domain)[0]), 'cp %s/ca/cacert.pem /etc/ssl/certs/ca-yunohost_crt.pem' % ssl_dir, 'update-ca-certificates' ] for command in commands: p = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = p.communicate() if p.returncode != 0: logger.warning(out) raise YunohostError('yunohost_ca_creation_failed') else: logger.debug(out) logger.success(m18n.n('yunohost_ca_creation_success')) # New domain config regen_conf(['nsswitch'], force=True) domain_add(domain, dyndns) domain_main_domain(domain) # Change LDAP admin password tools_adminpw(password, check_strength=not force_password) # Enable UPnP silently and reload firewall firewall_upnp('enable', no_refresh=True) # Initialize the apps catalog system _initialize_apps_catalog_system() # Try to update the apps catalog ... # we don't fail miserably if this fails, # because that could be for example an offline installation... try: _update_apps_catalog() except Exception as e: logger.warning(str(e)) # Create the archive directory (makes it easier for people to upload backup # archives, otherwise it's only created after running `yunohost backup # create` once. from yunohost.backup import _create_archive_dir _create_archive_dir() # Init migrations (skip them, no need to run them on a fresh system) _skip_all_migrations() os.system('touch /etc/yunohost/installed') # Enable and start YunoHost firewall at boot time service_enable("yunohost-firewall") service_start("yunohost-firewall") regen_conf(names=["ssh"], force=True) # Restore original ssh conf, as chosen by the # admin during the initial install # # c.f. the install script and in particular # https://github.com/YunoHost/install_script/pull/50 # The user can now choose during the install to keep # the initial, existing sshd configuration # instead of YunoHost's recommended conf # original_sshd_conf = '/etc/ssh/sshd_config.before_yunohost' if os.path.exists(original_sshd_conf): os.rename(original_sshd_conf, '/etc/ssh/sshd_config') regen_conf(force=True) logger.success(m18n.n('yunohost_configured')) logger.warning(m18n.n('yunohost_postinstall_end_tip'))
def translate_legacy_rules_in_ssowant_conf_json_persistent(): persistent_file_name = "/etc/ssowat/conf.json.persistent" if not os.path.exists(persistent_file_name): return # Ugly hack to try not to misarably fail migration persistent = read_yaml(persistent_file_name) legacy_rules = [ "skipped_urls", "unprotected_urls", "protected_urls", "skipped_regex", "unprotected_regex", "protected_regex" ] if not any(legacy_rule in persistent for legacy_rule in legacy_rules): return if not isinstance(persistent.get("permissions"), dict): persistent["permissions"] = {} skipped_urls = persistent.get("skipped_urls", []) + [ "re:" + r for r in persistent.get("skipped_regex", []) ] protected_urls = persistent.get("protected_urls", []) + [ "re:" + r for r in persistent.get("protected_regex", []) ] unprotected_urls = persistent.get("unprotected_urls", []) + [ "re:" + r for r in persistent.get("unprotected_regex", []) ] known_users = list(user_list()["users"].keys()) for legacy_rule in legacy_rules: if legacy_rule in persistent: del persistent[legacy_rule] if skipped_urls: persistent["permissions"]['custom_skipped'] = { "users": [], "label": "Custom permissions - skipped", "show_tile": False, "auth_header": False, "public": True, "uris": skipped_urls + persistent["permissions"].get("custom_skipped", {}).get("uris", []), } if unprotected_urls: persistent["permissions"]['custom_unprotected'] = { "users": [], "label": "Custom permissions - unprotected", "show_tile": False, "auth_header": True, "public": True, "uris": unprotected_urls + persistent["permissions"].get( "custom_unprotected", {}).get("uris", []), } if protected_urls: persistent["permissions"]['custom_protected'] = { "users": known_users, "label": "Custom permissions - protected", "show_tile": False, "auth_header": True, "public": False, "uris": protected_urls + persistent["permissions"].get( "custom_protected", {}).get("uris", []), } write_to_json(persistent_file_name, persistent, sort_keys=True, indent=4) logger.warning( "Yunohost automatically translated some legacy rules in /etc/ssowat/conf.json.persistent to match the new permission system" )
def test_write_json_cannot_write_to_non_existant_folder(): with pytest.raises(AssertionError): write_to_json("/toto/test.json", ["a", "b"])