Example #1
0
def do_bundle_onprem(extra_files, gen_out, output_dir):
    # We are only being called via dcos_generate_config.sh with an output_dir
    assert output_dir is not None
    assert output_dir
    assert output_dir[-1] != '/'
    output_dir = output_dir + '/'

    # Copy the extra_files
    for filename in extra_files:
        shutil.copy(filename, output_dir + filename)

    # Copy the cluster packages
    for name, info in gen_out.cluster_packages.items():
        copy_makedirs(info['filename'], output_dir + info['filename'])

    # Write an index of the cluster packages
    write_json(output_dir + 'cluster-package-info.json',
               gen_out.cluster_packages)

    # Write the bootstrap id
    write_string(output_dir + 'bootstrap.latest',
                 gen_out.arguments['bootstrap_id'])

    # Make a package fetch script
    package_fetches = "\n".join(
        fetch_pkg_template.format(
            package_path='packages/{name}/{id}.tar.xz'.format(
                name=pkgpanda.PackageId(package).name, id=package),
            bootstrap_url='https://downloads.dcos.io/dcos/stable')
        for package in load_json("/artifacts/{}.active.json".format(
            gen_out.arguments['bootstrap_id'])))
    write_string(output_dir + 'fetch_packages.sh',
                 fetch_all_pkgs.format(package_fetches=package_fetches))
Example #2
0
def test_cosmos_package_add(cluster):
    r = cluster.post('/package/add',
                     headers={
                         'Accept':
                         ('application/vnd.dcos.package.add-response+json;'
                          'charset=utf-8;version=v1'),
                         'Content-Type':
                         ('application/vnd.dcos.package.add-request+json;'
                          'charset=utf-8;version=v1')
                     },
                     json={
                         'packageName': 'cassandra',
                         'packageVersion': '1.0.20-3.0.10'
                     })

    user_config = load_json("/opt/mesosphere/etc/expanded.config.json")
    if (user_config['cosmos_staged_package_storage_uri_flag']
            and user_config['cosmos_package_storage_uri_flag']):
        # if the config is enabled then Cosmos should accept the request and
        # return 202
        assert r.status_code == 202, 'status = {}, content = {}'.format(
            r.status_code, r.content)
    else:
        # if the config is disabled then Cosmos should accept the request and
        # return Not Implemented 501
        assert r.status_code == 501, 'status = {}, content = {}'.format(
            r.status_code, r.content)
Example #3
0
def do_bundle_onprem(extra_files, gen_out, output_dir):
    # We are only being called via dcos_generate_config.sh with an output_dir
    assert output_dir is not None
    assert output_dir
    assert output_dir[-1] != '/'
    output_dir = output_dir + '/'

    # Copy the extra_files
    for filename in extra_files:
        shutil.copy(filename, output_dir + filename)

    # Copy the cluster packages
    for name, info in gen_out.cluster_packages.items():
        copy_makedirs(info['filename'], output_dir + info['filename'])

    # Write an index of the cluster packages
    write_json(output_dir + 'cluster-package-info.json', gen_out.cluster_packages)

    # Write the bootstrap id
    write_string(output_dir + 'bootstrap.latest', gen_out.arguments['bootstrap_id'])

    # Make a package fetch script
    package_fetches = "\n".join(
        fetch_pkg_template.format(
            package_path='packages/{name}/{id}.tar.xz'.format(name=pkgpanda.PackageId(package).name, id=package),
            bootstrap_url='https://downloads.dcos.io/dcos/stable'
            ) for package in load_json("/artifacts/{}.active.json".format(gen_out.arguments['bootstrap_id'])))
    write_string(output_dir + 'fetch_packages.sh', fetch_all_pkgs.format(package_fetches=package_fetches))
Example #4
0
def test_cosmos_package_add(dcos_api_session):
    r = dcos_api_session.post(
        '/package/add',
        headers={
            'Accept': (
                'application/vnd.dcos.package.add-response+json;'
                'charset=utf-8;version=v1'
            ),
            'Content-Type': (
                'application/vnd.dcos.package.add-request+json;'
                'charset=utf-8;version=v1'
            )
        },
        json={
            'packageName': 'cassandra',
            'packageVersion': '1.0.20-3.0.10'
        }
    )

    user_config = load_json("/opt/mesosphere/etc/expanded.config.json")
    if (user_config['cosmos_staged_package_storage_uri_flag'] and
            user_config['cosmos_package_storage_uri_flag']):
        # if the config is enabled then Cosmos should accept the request and
        # return 202
        assert r.status_code == 202, 'status = {}, content = {}'.format(
            r.status_code,
            r.content
        )
    else:
        # if the config is disabled then Cosmos should accept the request and
        # return Not Implemented 501
        assert r.status_code == 501, 'status = {}, content = {}'.format(
            r.status_code,
            r.content
        )
Example #5
0
def apply_service_configuration(service):
    if not os.path.exists(DCOS_SERVICE_CONFIGURATION_PATH):
        return

    dcos_service_properties = load_json(DCOS_SERVICE_CONFIGURATION_PATH)
    if SYSCTL_SETTING_KEY in dcos_service_properties:
        _apply_sysctl_settings(dcos_service_properties[SYSCTL_SETTING_KEY], service)
Example #6
0
 def get_last_complete(variant):
     complete_latest = (
         self.get_complete_cache_dir() + '/' + pkgpanda.util.variant_prefix(variant) + 'complete.latest.json')
     if not os.path.exists(complete_latest):
         raise BuildError("No last complete found for variant {}. Expected to find {} to match "
                          "{}".format(pkgpanda.util.variant_name(variant), complete_latest,
                                      pkgpanda.util.variant_prefix(variant) + 'treeinfo.json'))
     return load_json(complete_latest)
Example #7
0
 def get_last_complete(variant):
     complete_latest = (
         self.get_complete_cache_dir() + '/' + pkgpanda.util.variant_prefix(variant) + 'complete.latest.json')
     if not os.path.exists(complete_latest):
         raise BuildError("No last complete found for variant {}. Expected to find {} to match "
                          "{}".format(pkgpanda.util.variant_name(variant), complete_latest,
                                      pkgpanda.util.variant_prefix(variant) + 'treeinfo.json'))
     return load_json(complete_latest)
Example #8
0
def load_optional_json(filename):
    # Load the package build info.
    try:
        return load_json(filename)
    except FileNotFoundError:
        # not existing -> empty dictionary / no specified values.
        return {}
    except ValueError as ex:
        raise BuildError("Unable to parse json: {}".format(ex))
Example #9
0
def check_success(capsys, tmpdir, config_str):
    """
    Runs through the required functions of a launcher and then
    runs through the default usage of the script for a
    given config path and info path, ensuring each step passes
    if all steps finished successfully, this parses and returns the generated
    info JSON and stdout description JSON for more specific checks
    """
    # Test launcher directly first
    config = yaml.safe_load(config_str)
    launcher = get_launcher(config['type'], config['provider_info'])
    info = launcher.create(config)
    launcher.wait(info)
    launcher.describe(info)
    launcher.test(info, 'py.test')
    launcher.delete(info)

    # add config to disk and make info path for CLI testing
    config_path = tmpdir.join(
        'my_specific_config.yaml')  # test non-default name
    config_path.write(config_str)
    config_path = str(config_path)
    info_path = str(
        tmpdir.join('my_specific_info.json'))  # test non-default name

    # Now check launcher via CLI
    check_cli([
        'create', '--config-path={}'.format(config_path),
        '--info-path={}'.format(info_path)
    ])
    # use the info written to disk to ensure JSON parsable
    info = load_json(info_path)
    # General assertions about info
    assert 'type' in info
    assert 'provider' in info
    assert 'ssh' in info
    assert 'user' in info['ssh']
    assert 'private_key' in info['ssh']

    check_cli(['wait', '--info-path={}'.format(info_path)])

    # clear stdout capture
    capsys.readouterr()
    check_cli(['describe', '--info-path={}'.format(info_path)])
    # capture stdout from describe and ensure JSON parse-able
    description = json.loads(capsys.readouterr()[0])

    # general assertions about description
    assert 'masters' in description
    assert 'private_agents' in description
    assert 'public_agents' in description

    check_cli(['pytest', '--info-path={}'.format(info_path)])

    check_cli(['delete', '--info-path={}'.format(info_path)])

    return info, description
Example #10
0
def load_optional_json(filename):
    # Load the package build info.
    try:
        return load_json(filename)
    except FileNotFoundError:
        # not existing -> empty dictionary / no specified values.
        return {}
    except ValueError as ex:
        raise BuildError("Unable to parse json: {}".format(ex))
Example #11
0
def do_main(args):
    if args['create']:
        info_path = args['--info-path']
        if os.path.exists(info_path):
            raise LauncherError('InputConflict',
                                'Target info path already exists!')
        config = load_yaml(args['--config-path'])
        check_keys(config, [
            'type', 'provider_info',
            'this_is_a_temporary_config_format_do_not_put_in_production'
        ])
        write_json(
            info_path,
            get_launcher(config['type'],
                         config['provider_info']).create(config))
        return 0

    info = load_json(args['--info-path'])
    check_keys(info, ['type', 'provider'])
    launcher = get_launcher(info['type'], info['provider'])

    if args['wait']:
        launcher.wait(info)
        print('Cluster is ready!')
        return 0

    if args['describe']:
        print(json_prettyprint(launcher.describe(info)))
        return 0

    if args['pytest']:
        test_cmd = 'py.test'
        if args['--env'] is not None:
            if '=' in args['--env']:
                # User is attempting to do an assigment with the option
                raise LauncherError(
                    'OptionError',
                    "The '--env' option can only pass through environment variables "
                    "from the current environment. Set variables according to the shell being used."
                )
            var_list = args['--env'].split(',')
            check_keys(os.environ, var_list)
            test_cmd = ' '.join(
                ['{}={}'.format(e, os.environ[e])
                 for e in var_list]) + ' ' + test_cmd
        if len(args['<pytest_extras>']) > 0:
            test_cmd += ' ' + ' '.join(args['<pytest_extras>'])
        launcher.test(info, test_cmd)
        return 0

    if args['delete']:
        launcher.delete(info)
        return 0
Example #12
0
def load_optional_json(filename):
    try:
        with open(filename) as f:
            text = f.read().strip()
            if text:
                return json.loads(text)
            return {}
        return load_json(filename)
    except FileNotFoundError:
        raise BuildError("Didn't find expected JSON file: {}".format(filename))
    except ValueError as ex:
        raise BuildError("Unable to parse json in {}: {}".format(filename, ex))
Example #13
0
def _get_package_list(package_list_id: str, repository_url: str) -> List[str]:
    package_list_url = repository_url + '/package_lists/{}.package_list.json'.format(package_list_id)
    with tempfile.NamedTemporaryFile() as f:
        download(f.name, package_list_url, os.getcwd(), rm_on_error=False)
        package_list = load_json(f.name)

    if not isinstance(package_list, list):
        raise ValidationError('{} should contain a JSON list of packages. Got a {}'.format(
            package_list_url, type(package_list)
        ))

    return package_list
Example #14
0
def load_optional_json(filename):
    try:
        with open(filename) as f:
            text = f.read().strip()
            if text:
                return json.loads(text)
            return {}
        return load_json(filename)
    except FileNotFoundError:
        raise BuildError("Didn't find expected JSON file: {}".format(filename))
    except ValueError as ex:
        raise BuildError("Unable to parse json in {}: {}".format(filename, ex))
Example #15
0
def do_main(args):
    _handle_logging(args['--log-level'].upper())

    config_path = args['--config-path']
    if args['create']:
        config = launch.config.get_validated_config(config_path)
        info_path = args['--info-path']
        if os.path.exists(info_path):
            raise launch.util.LauncherError(
                'InputConflict', 'Target info path already exists!')
        write_json(info_path, launch.get_launcher(config).create())
        return 0

    try:
        info = load_json(args['--info-path'])
    except FileNotFoundError as ex:
        raise launch.util.LauncherError('MissingInfoJSON', None) from ex

    launcher = launch.get_launcher(info)

    if args['wait']:
        launcher.wait()
        print('Cluster is ready!')
        return 0

    if args['describe']:
        print(json_prettyprint(launcher.describe()))
        return 0

    if args['pytest']:
        var_list = list()
        if args['--env'] is not None:
            if '=' in args['--env']:
                # User is attempting to do an assigment with the option
                raise launch.util.LauncherError(
                    'OptionError',
                    "The '--env' option can only pass through environment variables "
                    "from the current environment. Set variables according to the shell being used."
                )
            var_list = args['--env'].split(',')
            missing = [v for v in var_list if v not in os.environ]
            if len(missing) > 0:
                raise launch.util.LauncherError(
                    'MissingInput',
                    'Environment variable arguments have been indicated '
                    'but not set: {}'.format(repr(missing)))
        env_dict = {e: os.environ[e] for e in var_list}
        return launcher.test(args['<pytest_extras>'], env_dict)

    if args['delete']:
        launcher.delete()
        return 0
Example #16
0
def check_success(capsys, tmpdir, config_str):
    """
    Runs through the required functions of a launcher and then
    runs through the default usage of the script for a
    given config path and info path, ensuring each step passes
    if all steps finished successfully, this parses and returns the generated
    info JSON and stdout description JSON for more specific checks
    """
    # Test launcher directly first
    config = yaml.safe_load(config_str)
    launcher = get_launcher(config['type'], config['provider_info'])
    info = launcher.create(config)
    launcher.wait(info)
    launcher.describe(info)
    launcher.test(info, 'py.test')
    launcher.delete(info)

    # add config to disk and make info path for CLI testing
    config_path = tmpdir.join('my_specific_config.yaml')  # test non-default name
    config_path.write(config_str)
    config_path = str(config_path)
    info_path = str(tmpdir.join('my_specific_info.json'))  # test non-default name

    # Now check launcher via CLI
    check_cli(['create', '--config-path={}'.format(config_path), '--info-path={}'.format(info_path)])
    # use the info written to disk to ensure JSON parsable
    info = load_json(info_path)
    # General assertions about info
    assert 'type' in info
    assert 'provider' in info
    assert 'ssh' in info
    assert 'user' in info['ssh']
    assert 'private_key' in info['ssh']

    check_cli(['wait', '--info-path={}'.format(info_path)])

    # clear stdout capture
    capsys.readouterr()
    check_cli(['describe', '--info-path={}'.format(info_path)])
    # capture stdout from describe and ensure JSON parse-able
    description = json.loads(capsys.readouterr()[0])

    # general assertions about description
    assert 'masters' in description
    assert 'private_agents' in description
    assert 'public_agents' in description

    check_cli(['pytest', '--info-path={}'.format(info_path)])

    check_cli(['delete', '--info-path={}'.format(info_path)])

    return info, description
Example #17
0
    def recover_swap_active(self):
        state_filename = self._make_abs("install_progress")
        if not os.path.exists(state_filename):
            return False, "Path does not exist: {}".format(state_filename)
        state = load_json(state_filename)
        extension = state['extension']
        stage = state['stage']
        if stage == 'archive':
            self.swap_active(extension, True)
        elif stage == 'move_new':
            self.swap_active(extension, False)
        else:
            raise ValueError("Unexpected state to recover from {}".format(state))

        return True, ""
Example #18
0
    def recover_swap_active(self):
        state_filename = self._make_abs("install_progress")
        if not os.path.exists(state_filename):
            return False, "Path does not exist: {}".format(state_filename)
        state = load_json(state_filename)
        extension = state['extension']
        stage = state['stage']
        if stage == 'archive':
            self.swap_active(extension, True)
        elif stage == 'move_new':
            self.swap_active(extension, False)
        else:
            raise ValueError("Unexpected state to recover from {}".format(state))

        return True, ""
Example #19
0
File: cli.py Project: tamarrow/dcos
def do_main(args):
    _handle_logging(args['--log-level'].upper())

    config_path = args['--config-path']
    if args['create']:
        config = launch.config.get_validated_config(config_path)
        info_path = args['--info-path']
        if os.path.exists(info_path):
            raise launch.util.LauncherError('InputConflict', 'Target info path already exists!')
        write_json(info_path, launch.get_launcher(config).create())
        return 0

    try:
        info = load_json(args['--info-path'])
    except FileNotFoundError as ex:
        raise launch.util.LauncherError('MissingInfoJSON', None) from ex

    launcher = launch.get_launcher(info)

    if args['wait']:
        launcher.wait()
        print('Cluster is ready!')
        return 0

    if args['describe']:
        print(json_prettyprint(launcher.describe()))
        return 0

    if args['pytest']:
        var_list = list()
        if args['--env'] is not None:
            if '=' in args['--env']:
                # User is attempting to do an assigment with the option
                raise launch.util.LauncherError(
                    'OptionError', "The '--env' option can only pass through environment variables "
                    "from the current environment. Set variables according to the shell being used.")
            var_list = args['--env'].split(',')
            missing = [v for v in var_list if v not in os.environ]
            if len(missing) > 0:
                raise launch.util.LauncherError(
                    'MissingInput', 'Environment variable arguments have been indicated '
                    'but not set: {}'.format(repr(missing)))
        env_dict = {e: os.environ[e] for e in var_list}
        return launcher.test(args['<pytest_extras>'], env_dict)

    if args['delete']:
        launcher.delete()
        return 0
Example #20
0
def do_main(args):
    _handle_logging(args['--log-level'].upper())

    config_path = args['--config-path']
    if args['create']:
        config = launch.config.get_validated_config(config_path)
        info_path = args['--info-path']
        if os.path.exists(info_path):
            raise launch.util.LauncherError(
                'InputConflict', 'Target info path already exists!')
        write_json(info_path, launch.get_launcher(config).create(config))
        return 0

    info = load_json(args['--info-path'])
    launcher = launch.get_launcher(info)

    if args['wait']:
        launcher.wait(info)
        print('Cluster is ready!')
        return 0

    if args['describe']:
        print(json_prettyprint(launcher.describe(info)))
        return 0

    if args['pytest']:
        test_cmd = 'py.test'
        if args['--env'] is not None:
            if '=' in args['--env']:
                # User is attempting to do an assigment with the option
                raise launch.util.LauncherError(
                    'OptionError',
                    "The '--env' option can only pass through environment variables "
                    "from the current environment. Set variables according to the shell being used."
                )
            var_list = args['--env'].split(',')
            launch.util.check_keys(os.environ, var_list)
            test_cmd = ' '.join(
                ['{}={}'.format(e, os.environ[e])
                 for e in var_list]) + ' ' + test_cmd
        if len(args['<pytest_extras>']) > 0:
            test_cmd += ' ' + ' '.join(args['<pytest_extras>'])
        launcher.test(info, test_cmd)
        return 0

    if args['delete']:
        launcher.delete(info)
        return 0
Example #21
0
    def load(self, id):

        # Validate the package id.
        PackageId(id)

        path = self.package_path(id)
        filename = os.path.join(path, "pkginfo.json")
        try:
            pkginfo = load_json(filename)
        except FileNotFoundError as ex:
            raise PackageError("No / unreadable pkginfo.json in {0}: {1}".format(id, ex.strerror)) from ex

        if not isinstance(pkginfo, dict):
            raise PackageError("Usage should be a dictionary, not a {0}".format(type(pkginfo).__name__))

        return Package(path, id, pkginfo)
Example #22
0
def make_cluster_fixture():
    # token valid until 2036 for user [email protected]
    # {
    #   "email": "*****@*****.**",
    #   "email_verified": true,
    #   "iss": "https://dcos.auth0.com/",
    #   "sub": "google-oauth2|109964499011108905050",
    #   "aud": "3yF5TOSzdlI45Q1xspxzeoGBe9fNxm9m",
    #   "exp": 2090884974,
    #   "iat": 1460164974
    # }
    auth_json = {'token': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6Ik9UQkVOakZFTWtWQ09VRTRPRVpGTlRNMFJrWXlRa015Tnprd1JrSkVRemRCTWpBM1FqYzVOZyJ9.eyJlbWFpbCI6ImFsYmVydEBiZWtzdGlsLm5ldCIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJpc3MiOiJodHRwczovL2Rjb3MuYXV0aDAuY29tLyIsInN1YiI6Imdvb2dsZS1vYXV0aDJ8MTA5OTY0NDk5MDExMTA4OTA1MDUwIiwiYXVkIjoiM3lGNVRPU3pkbEk0NVExeHNweHplb0dCZTlmTnhtOW0iLCJleHAiOjIwOTA4ODQ5NzQsImlhdCI6MTQ2MDE2NDk3NH0.OxcoJJp06L1z2_41_p65FriEGkPzwFB_0pA9ULCvwvzJ8pJXw9hLbmsx-23aY2f-ydwJ7LSibL9i5NbQSR2riJWTcW4N7tLLCCMeFXKEK4hErN2hyxz71Fl765EjQSO5KD1A-HsOPr3ZZPoGTBjE0-EFtmXkSlHb1T2zd0Z8T5Z2-q96WkFoT6PiEdbrDA-e47LKtRmqsddnPZnp0xmMQdTr2MjpVgvqG7TlRvxDcYc-62rkwQXDNSWsW61FcKfQ-TRIZSf2GS9F9esDF4b5tRtrXcBNaorYa9ql0XAWH5W_ct4ylRNl3vwkYKWa4cmPvOqT5Wlj9Tf0af4lNO40PQ'}  # noqa
    if 'DCOS_AUTH_JSON_PATH' in os.environ:
        auth_json = load_json(os.environ['DCOS_AUTH_JSON_PATH'])
    args = get_args_from_env()
    args['web_auth_default_user'] = DcosUser(auth_json)
    cluster_api = ClusterApi(**args)
    cluster_api.wait_for_dcos()
    return cluster_api
Example #23
0
File: cli.py Project: malnick/dcos
def do_main(args):
    _handle_logging(args['--log-level'].upper())

    config_path = args['--config-path']
    if args['create']:
        config = launch.config.get_validated_config(config_path)
        info_path = args['--info-path']
        if os.path.exists(info_path):
            raise launch.util.LauncherError('InputConflict', 'Target info path already exists!')
        write_json(info_path, launch.get_launcher(config).create(config))
        return 0

    info = load_json(args['--info-path'])
    launcher = launch.get_launcher(info)

    if args['wait']:
        launcher.wait(info)
        print('Cluster is ready!')
        return 0

    if args['describe']:
        print(json_prettyprint(launcher.describe(info)))
        return 0

    if args['pytest']:
        test_cmd = 'py.test'
        if args['--env'] is not None:
            if '=' in args['--env']:
                # User is attempting to do an assigment with the option
                raise launch.util.LauncherError(
                    'OptionError', "The '--env' option can only pass through environment variables "
                    "from the current environment. Set variables according to the shell being used.")
            var_list = args['--env'].split(',')
            launch.util.check_keys(os.environ, var_list)
            test_cmd = ' '.join(['{}={}'.format(e, os.environ[e]) for e in var_list]) + ' ' + test_cmd
        if len(args['<pytest_extras>']) > 0:
            test_cmd += ' ' + ' '.join(args['<pytest_extras>'])
        launcher.test(info, test_cmd)
        return 0

    if args['delete']:
        launcher.delete(info)
        return 0
Example #24
0
    def load(self, id: str):

        # Validate the package id.
        PackageId(id)

        path = self.package_path(id)
        if not os.path.exists(path):
            raise PackageNotFound(id)

        filename = os.path.join(path, "pkginfo.json")
        try:
            pkginfo = load_json(filename)
        except OSError as ex:
            raise PackageError("No / unreadable pkginfo.json in {0}: {1}".format(id, ex.strerror)) from ex

        if not isinstance(pkginfo, dict):
            raise PackageError("Usage should be a dictionary, not a {0}".format(type(pkginfo).__name__))

        return Package(path, id, pkginfo)
Example #25
0
def _do_bootstrap(install, repository):
    # These files should be set by the environment which initially builds
    # the host (cloud-init).
    repository_url = if_exists(load_string, install.get_config_filename("setup-flags/repository-url"))

    # TODO(cmaloney): If there is 1+ master, grab the active config from a master.
    # If the config can't be grabbed from any of them, fail.
    def fetcher(id, target):
        if repository_url is None:
            raise ValidationError("ERROR: Non-local package {} but no repository url given.".format(repository_url))
        return requests_fetcher(repository_url, id, target, os.getcwd())

    # Copy host/cluster-specific packages written to the filesystem manually
    # from the setup-packages folder into the repository. Do not overwrite or
    # merge existing packages, hard fail instead.
    setup_packages_to_activate = []
    setup_pkg_dir = install.get_config_filename("setup-packages")
    copy_fetcher = partial(_copy_fetcher, setup_pkg_dir)
    if os.path.exists(setup_pkg_dir):
        for pkg_id_str in os.listdir(setup_pkg_dir):
            print("Installing setup package: {}".format(pkg_id_str))
            if not PackageId.is_id(pkg_id_str):
                raise ValidationError("Invalid package id in setup package: {}".format(pkg_id_str))
            pkg_id = PackageId(pkg_id_str)
            if pkg_id.version != "setup":
                raise ValidationError(
                    "Setup packages (those in `{0}`) must have the version setup. "
                    "Bad package: {1}".format(setup_pkg_dir, pkg_id_str))

            # Make sure there is no existing package
            if repository.has_package(pkg_id_str):
                print("WARNING: Ignoring already installed package {}".format(pkg_id_str))

            repository.add(copy_fetcher, pkg_id_str)
            setup_packages_to_activate.append(pkg_id_str)

    # If active.json is set on the host, use that as the set of packages to
    # activate. Otherwise just use the set of currently active packages (those
    # active in the bootstrap tarball)
    to_activate = None
    active_path = install.get_config_filename("setup-flags/active.json")
    if os.path.exists(active_path):
        print("Loaded active packages from", active_path)
        to_activate = load_json(active_path)

        # Ensure all packages are local
        print("Ensuring all packages in active set {} are local".format(",".join(to_activate)))
        for package in to_activate:
            repository.add(fetcher, package)
    else:
        print("Calculated active packages from bootstrap tarball")
        to_activate = list(install.get_active())

        # Fetch and activate all requested additional packages to accompany the bootstrap packages.
        cluster_packages_filename = install.get_config_filename("setup-flags/cluster-packages.json")
        cluster_packages = if_exists(load_json, cluster_packages_filename)
        print("Checking for cluster packages in:", cluster_packages_filename)
        if cluster_packages:
            if not isinstance(cluster_packages, list):
                print('ERROR: {} should contain a JSON list of packages. Got a {}'.format(cluster_packages_filename,
                                                                                          type(cluster_packages)))
            print("Loading cluster-packages: {}".format(cluster_packages))

            for package_id_str in cluster_packages:
                # Validate the package ids
                pkg_id = PackageId(package_id_str)

                # Fetch the packages if not local
                if not repository.has_package(package_id_str):
                    repository.add(fetcher, package_id_str)

                # Add the package to the set to activate
                setup_packages_to_activate.append(package_id_str)
        else:
            print("No cluster-packages specified")

    # Calculate the full set of final packages (Explicit activations + setup packages).
    # De-duplicate using a set.
    to_activate = list(set(to_activate + setup_packages_to_activate))

    print("Activating packages")
    install.activate(repository.load_packages(to_activate))
Example #26
0
    def activate(self, packages):
        # Ensure the new set is reasonable.
        validate_compatible(packages, self.__roles)

        # Build the absolute paths for the running config, new config location,
        # and where to archive the config.
        active_names = self.get_active_names()
        active_dirs = list(
            map(self._make_abs, self.__well_known_dirs + ["active"]))

        new_names = [name + ".new" for name in active_names]
        new_dirs = [name + ".new" for name in active_dirs]

        old_names = [name + ".old" for name in active_names]

        # Remove all pre-existing new and old directories
        for name in chain(new_names, old_names):
            if (os.path.exists(name)):
                if os.path.isdir(name):
                    shutil.rmtree(name)
                else:
                    os.remove(name)

        # Make the directories for the new config
        for name in new_dirs:
            os.makedirs(name)

        # Fill in all the new contents
        def symlink_all(src, dest):
            if not os.path.isdir(src):
                return

            symlink_tree(src, dest)

        # Set the new LD_LIBRARY_PATH, PATH.
        env_contents = env_header.format(
            "/opt/mesosphere" if self.__fake_path else self.__root)
        env_export_contents = env_export_header.format(
            "/opt/mesosphere" if self.__fake_path else self.__root)

        active_buildinfo_full = {}

        # Add the folders, config in each package.
        for package in packages:
            # Package folders
            # NOTE: Since active is at the end of the folder list it will be
            # removed by the zip. This is the desired behavior, since it will be
            # populated later.
            # Do the basename since some well known dirs are full paths (dcos.target.wants)
            # while inside the packages they are always top level directories.
            for new, dir_name in zip(new_dirs, self.__well_known_dirs):
                dir_name = os.path.basename(dir_name)
                pkg_dir = os.path.join(package.path, dir_name)
                assert os.path.isabs(new)
                assert os.path.isabs(pkg_dir)

                try:
                    symlink_all(pkg_dir, new)

                    # Symlink all applicable role-based config
                    for role in self.__roles:
                        role_dir = os.path.join(
                            package.path, "{0}_{1}".format(dir_name, role))
                        symlink_all(role_dir, new)
                except ConflictingFile as ex:
                    raise ValidationError(
                        "Two packages are trying to install the same file {0} or "
                        "two roles in the set of roles {1} are causing a package "
                        "to try activating multiple versions of the same file. "
                        "One of the package files is {2}.".format(
                            ex.dest, self.__roles, ex.src))

            # Add to the active folder
            os.symlink(
                package.path,
                os.path.join(self._make_abs("active.new"), package.name))

            # Add to the environment contents
            env_contents += "# package: {0}\n".format(package.id)
            for k, v in package.environment.items():
                env_contents += "{0}={1}\n".format(k, v)
            env_contents += "\n"

            # Add to the environment.export contents
            env_export_contents += "# package: {0}\n".format(package.id)
            for k, v in package.environment.items():
                env_export_contents += "export {0}={1}\n".format(k, v)
            env_export_contents += "\n"

            # Add to the buildinfo
            try:
                active_buildinfo_full[package.name] = load_json(
                    os.path.join(package.path, "buildinfo.full.json"))
            except FileNotFoundError:
                # TODO(cmaloney): These only come from setup-packages. Should update
                # setup-packages to add a buildinfo.full for those packages
                active_buildinfo_full[package.name] = None

        # Write out the new environment file.
        new_env = self._make_abs("environment.new")
        write_string(new_env, env_contents)

        # Write out the new environment.export file
        new_env_export = self._make_abs("environment.export.new")
        write_string(new_env_export, env_export_contents)

        # Write out the buildinfo of every active package
        new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new")
        write_json(new_buildinfo_meta, active_buildinfo_full)

        self.swap_active(".new")
Example #27
0
def _do_bootstrap(install, repository):
    # These files should be set by the environment which initially builds
    # the host (cloud-init).
    repository_url = if_exists(
        load_string, install.get_config_filename("setup-flags/repository-url"))

    # TODO(cmaloney): If there is 1+ master, grab the active config from a master.
    # If the config can't be grabbed from any of them, fail.
    def fetcher(id, target):
        if repository_url is None:
            raise ValidationError(
                "ERROR: Non-local package {} but no repository url given.".
                format(repository_url))
        return requests_fetcher(repository_url, id, target, os.getcwd())

    # Copy host/cluster-specific packages written to the filesystem manually
    # from the setup-packages folder into the repository. Do not overwrite or
    # merge existing packages, hard fail instead.
    setup_packages_to_activate = []
    setup_pkg_dir = install.get_config_filename("setup-packages")
    copy_fetcher = partial(_copy_fetcher, setup_pkg_dir)
    if os.path.exists(setup_pkg_dir):
        for pkg_id_str in os.listdir(setup_pkg_dir):
            print("Installing setup package: {}".format(pkg_id_str))
            if not PackageId.is_id(pkg_id_str):
                raise ValidationError(
                    "Invalid package id in setup package: {}".format(
                        pkg_id_str))
            pkg_id = PackageId(pkg_id_str)
            if pkg_id.version != "setup":
                raise ValidationError(
                    "Setup packages (those in `{0}`) must have the version setup. "
                    "Bad package: {1}".format(setup_pkg_dir, pkg_id_str))

            # Make sure there is no existing package
            if repository.has_package(pkg_id_str):
                print("WARNING: Ignoring already installed package {}".format(
                    pkg_id_str))

            repository.add(copy_fetcher, pkg_id_str)
            setup_packages_to_activate.append(pkg_id_str)

    # If active.json is set on the host, use that as the set of packages to
    # activate. Otherwise just use the set of currently active packages (those
    # active in the bootstrap tarball)
    to_activate = None
    active_path = install.get_config_filename("setup-flags/active.json")
    if os.path.exists(active_path):
        print("Loaded active packages from", active_path)
        to_activate = load_json(active_path)

        # Ensure all packages are local
        print("Ensuring all packages in active set {} are local".format(
            ",".join(to_activate)))
        for package in to_activate:
            repository.add(fetcher, package)
    else:
        print("Calculated active packages from bootstrap tarball")
        to_activate = list(install.get_active())

        # Fetch and activate all requested additional packages to accompany the bootstrap packages.
        cluster_packages_filename = install.get_config_filename(
            "setup-flags/cluster-packages.json")
        cluster_packages = if_exists(load_json, cluster_packages_filename)
        print("Checking for cluster packages in:", cluster_packages_filename)
        if cluster_packages:
            if not isinstance(cluster_packages, list):
                print(
                    'ERROR: {} should contain a JSON list of packages. Got a {}'
                    .format(cluster_packages_filename, type(cluster_packages)))
            print("Loading cluster-packages: {}".format(cluster_packages))

            for package_id_str in cluster_packages:
                # Validate the package ids
                pkg_id = PackageId(package_id_str)

                # Fetch the packages if not local
                if not repository.has_package(package_id_str):
                    repository.add(fetcher, package_id_str)

                # Add the package to the set to activate
                setup_packages_to_activate.append(package_id_str)
        else:
            print("No cluster-packages specified")

    # Calculate the full set of final packages (Explicit activations + setup packages).
    # De-duplicate using a set.
    to_activate = list(set(to_activate + setup_packages_to_activate))

    print("Activating packages")
    install.activate(repository.load_packages(to_activate))
Example #28
0
def test_signal_service(dcos_api_session):
    """
    signal-service runs on an hourly timer, this test runs it as a one-off
    and pushes the results to the test_server app for easy retrieval
    """
    # This is due to caching done by 3DT / Signal service
    # We're going to remove this soon: https://mesosphere.atlassian.net/browse/DCOS-9050
    dcos_version = os.environ["DCOS_VERSION"]
    signal_config_data = load_json('/opt/mesosphere/etc/dcos-signal-config.json')
    customer_key = signal_config_data.get('customer_key', '')
    enabled = signal_config_data.get('enabled', 'false')
    cluster_id = load_string('/var/lib/dcos/cluster-id').strip()

    if enabled == 'false':
        pytest.skip('Telemetry disabled in /opt/mesosphere/etc/dcos-signal-config.json... skipping test')

    logging.info("Version: " + dcos_version)
    logging.info("Customer Key: " + customer_key)
    logging.info("Cluster ID: " + cluster_id)

    direct_report = dcos_api_session.get('/system/health/v1/report?cache=0')
    signal_results = subprocess.check_output(["/opt/mesosphere/bin/dcos-signal", "-test"], universal_newlines=True)
    r_data = json.loads(signal_results)

    exp_data = {
        'diagnostics': {
            'event': 'health',
            'anonymousId': cluster_id,
            'properties': {}
        },
        'cosmos': {
            'event': 'package_list',
            'anonymousId': cluster_id,
            'properties': {}
        },
        'mesos': {
            'event': 'mesos_track',
            'anonymousId': cluster_id,
            'properties': {}
        }
    }

    # Generic properties which are the same between all tracks
    generic_properties = {
        'platform': expanded_config['platform'],
        'provider': expanded_config['provider'],
        'source': 'cluster',
        'clusterId': cluster_id,
        'customerKey': customer_key,
        'environmentVersion': dcos_version,
        'variant': 'open'
    }

    # Insert the generic property data which is the same between all signal tracks
    exp_data['diagnostics']['properties'].update(generic_properties)
    exp_data['cosmos']['properties'].update(generic_properties)
    exp_data['mesos']['properties'].update(generic_properties)

    # Insert all the diagnostics data programmatically
    master_units = [
        'adminrouter-service',
        'adminrouter-reload-service',
        'adminrouter-reload-timer',
        'cosmos-service',
        'metrics-master-service',
        'metrics-master-socket',
        'exhibitor-service',
        'history-service',
        'log-master-service',
        'log-master-socket',
        'logrotate-master-service',
        'logrotate-master-timer',
        'marathon-service',
        'mesos-dns-service',
        'mesos-master-service',
        'metronome-service',
        'signal-service']
    all_node_units = [
        '3dt-service',
        '3dt-socket',
        'epmd-service',
        'gen-resolvconf-service',
        'gen-resolvconf-timer',
        'navstar-service',
        'pkgpanda-api-service',
        'signal-timer',
        'spartan-service',
        'spartan-watchdog-service',
        'spartan-watchdog-timer']
    slave_units = [
        'mesos-slave-service']
    public_slave_units = [
        'mesos-slave-public-service']
    all_slave_units = [
        'docker-gc-service',
        'docker-gc-timer',
        'metrics-agent-service',
        'metrics-agent-socket',
        'adminrouter-agent-service',
        'adminrouter-agent-reload-service',
        'adminrouter-agent-reload-timer',
        'log-agent-service',
        'log-agent-socket',
        'logrotate-agent-service',
        'logrotate-agent-timer',
        'rexray-service']

    master_units.append('oauth-service')

    for unit in master_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(dcos_api_session.masters)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in all_node_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(
            dcos_api_session.all_slaves + dcos_api_session.masters)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in slave_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(dcos_api_session.slaves)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in public_slave_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] \
            = len(dcos_api_session.public_slaves)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in all_slave_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] \
            = len(dcos_api_session.all_slaves)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0

    def check_signal_data():
        # Check the entire hash of diagnostics data
        assert r_data['diagnostics'] == exp_data['diagnostics']
        # Check a subset of things regarding Mesos that we can logically check for
        framework_names = [x['name'] for x in r_data['mesos']['properties']['frameworks']]
        assert 'marathon' in framework_names
        assert 'metronome' in framework_names
        # There are no packages installed by default on the integration test, ensure the key exists
        assert len(r_data['cosmos']['properties']['package_list']) == 0

    try:
        check_signal_data()
    except AssertionError as err:
        logging.info('System report: {}'.format(direct_report.json()))
        raise err
Example #29
0
def test_signal_service(dcos_api_session):
    """
    signal-service runs on an hourly timer, this test runs it as a one-off
    and pushes the results to the test_server app for easy retrieval
    """
    # This is due to caching done by 3DT / Signal service
    # We're going to remove this soon: https://mesosphere.atlassian.net/browse/DCOS-9050
    dcos_version = os.environ["DCOS_VERSION"]
    signal_config_data = load_json('/opt/mesosphere/etc/dcos-signal-config.json')
    customer_key = signal_config_data.get('customer_key', '')
    enabled = signal_config_data.get('enabled', 'false')
    cluster_id = load_string('/var/lib/dcos/cluster-id').strip()

    if enabled == 'false':
        pytest.skip('Telemetry disabled in /opt/mesosphere/etc/dcos-signal-config.json... skipping test')

    logging.info("Version: " + dcos_version)
    logging.info("Customer Key: " + customer_key)
    logging.info("Cluster ID: " + cluster_id)

    direct_report = dcos_api_session.get('/system/health/v1/report?cache=0')
    signal_results = subprocess.check_output(["/opt/mesosphere/bin/dcos-signal", "-test"], universal_newlines=True)
    r_data = json.loads(signal_results)

    exp_data = {
        'diagnostics': {
            'event': 'health',
            'anonymousId': cluster_id,
            'properties': {}
        },
        'cosmos': {
            'event': 'package_list',
            'anonymousId': cluster_id,
            'properties': {}
        },
        'mesos': {
            'event': 'mesos_track',
            'anonymousId': cluster_id,
            'properties': {}
        }
    }

    # Generic properties which are the same between all tracks
    generic_properties = {
        'platform': expanded_config['platform'],
        'provider': expanded_config['provider'],
        'source': 'cluster',
        'clusterId': cluster_id,
        'customerKey': customer_key,
        'environmentVersion': dcos_version,
        'variant': 'open'
    }

    # Insert the generic property data which is the same between all signal tracks
    exp_data['diagnostics']['properties'].update(generic_properties)
    exp_data['cosmos']['properties'].update(generic_properties)
    exp_data['mesos']['properties'].update(generic_properties)

    # Insert all the diagnostics data programmatically
    master_units = [
        'adminrouter-service',
        'adminrouter-reload-service',
        'adminrouter-reload-timer',
        'cosmos-service',
        'metrics-master-service',
        'metrics-master-socket',
        'exhibitor-service',
        'history-service',
        'log-master-service',
        'log-master-socket',
        'logrotate-master-service',
        'logrotate-master-timer',
        'marathon-service',
        'mesos-dns-service',
        'mesos-master-service',
        'metronome-service',
        'signal-service']
    all_node_units = [
        '3dt-service',
        '3dt-socket',
        'epmd-service',
        'gen-resolvconf-service',
        'gen-resolvconf-timer',
        'navstar-service',
        'pkgpanda-api-service',
        'pkgpanda-api-socket',
        'signal-timer',
        'spartan-service',
        'spartan-watchdog-service',
        'spartan-watchdog-timer']
    slave_units = [
        'mesos-slave-service']
    public_slave_units = [
        'mesos-slave-public-service']
    all_slave_units = [
        'docker-gc-service',
        'docker-gc-timer',
        'metrics-agent-service',
        'metrics-agent-socket',
        'adminrouter-agent-service',
        'adminrouter-agent-reload-service',
        'adminrouter-agent-reload-timer',
        'log-agent-service',
        'log-agent-socket',
        'logrotate-agent-service',
        'logrotate-agent-timer',
        'rexray-service']

    master_units.append('oauth-service')

    for unit in master_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(dcos_api_session.masters)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in all_node_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(
            dcos_api_session.all_slaves + dcos_api_session.masters)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in slave_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(dcos_api_session.slaves)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in public_slave_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] \
            = len(dcos_api_session.public_slaves)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in all_slave_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] \
            = len(dcos_api_session.all_slaves)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0

    def check_signal_data():
        # Check the entire hash of diagnostics data
        assert r_data['diagnostics'] == exp_data['diagnostics']
        # Check a subset of things regarding Mesos that we can logically check for
        framework_names = [x['name'] for x in r_data['mesos']['properties']['frameworks']]
        assert 'marathon' in framework_names
        assert 'metronome' in framework_names
        # There are no packages installed by default on the integration test, ensure the key exists
        assert len(r_data['cosmos']['properties']['package_list']) == 0

    try:
        check_signal_data()
    except AssertionError as err:
        logging.info('System report: {}'.format(direct_report.json()))
        raise err
Example #30
0
    def activate(self, packages):
        # Ensure the new set is reasonable.
        validate_compatible(packages, self.__roles)

        # Build the absolute paths for the running config, new config location,
        # and where to archive the config.
        active_names = self.get_active_names()
        active_dirs = list(
            map(self._make_abs, self.__well_known_dirs + ["active"]))

        new_names = [name + ".new" for name in active_names]
        new_dirs = [name + ".new" for name in active_dirs]

        old_names = [name + ".old" for name in active_names]

        # Remove all pre-existing new and old directories
        for name in chain(new_names, old_names):
            if os.path.exists(name):
                if os.path.isdir(name):
                    remove_directory(name)
                else:
                    os.remove(name)

        # Remove unit files staged for an activation that didn't occur.
        if not self.__skip_systemd_dirs:
            self.systemd.remove_staged_unit_files()

        # Make the directories for the new config
        for name in new_dirs:
            os.makedirs(name)

        def symlink_all(src, dest):
            if not os.path.isdir(src):
                return

            symlink_tree(src, dest)

        # Set the new LD_LIBRARY_PATH, PATH.
        env_contents = env_header.format(
            "/opt/mesosphere" if self.__fake_path else self.__root)
        env_export_contents = env_export_header.format(
            "/opt/mesosphere" if self.__fake_path else self.__root)

        active_buildinfo_full = {}

        dcos_service_configuration = self._get_dcos_configuration_template()

        # Building up the set of users
        sysusers = UserManagement(self.__manage_users, self.__add_users)

        def _get_service_files(_dir):
            service_files = []
            for root, directories, filenames in os.walk(_dir):
                for filename in filter(lambda name: name.endswith(".service"),
                                       filenames):
                    service_files.append(os.path.join(root, filename))
            return service_files

        def _get_service_names(_dir):
            service_files = list(
                map(os.path.basename, _get_service_files(_dir)))

            if not service_files:
                return []

            return list(
                map(lambda name: os.path.splitext(name)[0], service_files))

        # Add the folders, config in each package.
        for package in packages:
            # Package folders
            # NOTE: Since active is at the end of the folder list it will be
            # removed by the zip. This is the desired behavior, since it will be
            # populated later.
            # Do the basename since some well known dirs are full paths (dcos.target.wants)
            # while inside the packages they are always top level directories.
            for new, dir_name in zip(new_dirs, self.__well_known_dirs):
                dir_name = os.path.basename(dir_name)
                pkg_dir = os.path.join(package.path, dir_name)

                assert os.path.isabs(new)
                assert os.path.isabs(pkg_dir)

                try:
                    symlink_all(pkg_dir, new)

                    # Symlink all applicable role-based config
                    for role in self.__roles:
                        role_dir = os.path.join(
                            package.path, "{0}_{1}".format(dir_name, role))
                        symlink_all(role_dir, new)

                except ConflictingFile as ex:
                    raise ValidationError(
                        "Two packages are trying to install the same file {0} or "
                        "two roles in the set of roles {1} are causing a package "
                        "to try activating multiple versions of the same file. "
                        "One of the package files is {2}.".format(
                            ex.dest, self.__roles, ex.src))

            # Add to the active folder
            os.symlink(
                package.path,
                os.path.join(self._make_abs("active.new"), package.name))

            # Add to the environment and environment.export contents

            env_contents += "# package: {0}\n".format(package.id)
            env_export_contents += "# package: {0}\n".format(package.id)

            for k, v in package.environment.items():
                env_contents += "{0}={1}\n".format(k, v)
                env_export_contents += "export {0}={1}\n".format(k, v)

            env_contents += "\n"
            env_export_contents += "\n"

            # Add to the buildinfo
            try:
                active_buildinfo_full[package.name] = load_json(
                    os.path.join(package.path, "buildinfo.full.json"))
            except FileNotFoundError:
                # TODO(cmaloney): These only come from setup-packages. Should update
                # setup-packages to add a buildinfo.full for those packages
                active_buildinfo_full[package.name] = None

            # NOTE: It is critical the state dir, the package name and the user name are all the
            # same. Otherwise on upgrades we might remove access to a files by changing their chown
            # to something incompatible. We survive the first upgrade because everything goes from
            # root to specific users, and root can access all user files.
            if package.username is not None:
                sysusers.add_user(package.username, package.group)

            # Ensure the state directory exists
            # TODO(cmaloney): On upgrade take a snapshot?
            if self.__manage_state_dir:
                state_dir_path = self.__state_dir_root + '/' + package.name
                if package.state_directory:
                    make_directory(state_dir_path)
                    if package.username and not is_windows:
                        uid = sysusers.get_uid(package.username)
                        check_call(['chown', '-R', str(uid), state_dir_path])

            if package.sysctl:
                service_names = _get_service_names(package.path)

                if not service_names:
                    raise ValueError(
                        "service name required for sysctl could not be determined for {package}"
                        .format(package=package.id))

                for service in service_names:
                    if service in package.sysctl:
                        dcos_service_configuration["sysctl"][
                            service] = package.sysctl[service]

        # Prepare new systemd units for activation.
        if not self.__skip_systemd_dirs:
            new_wants_dir = self._make_abs(self.__systemd_dir + ".new")
            if os.path.exists(new_wants_dir):
                self.systemd.stage_new_units(new_wants_dir)

        dcos_service_configuration_file = os.path.join(
            self._make_abs("etc.new"), DCOS_SERVICE_CONFIGURATION_FILE)
        write_json(dcos_service_configuration_file, dcos_service_configuration)

        # Write out the new environment file.
        new_env = self._make_abs("environment.new")
        write_string(new_env, env_contents)

        # Write out the new environment.export file
        new_env_export = self._make_abs("environment.export.new")
        write_string(new_env_export, env_export_contents)

        # Write out the buildinfo of every active package
        new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new")
        write_json(new_buildinfo_meta, active_buildinfo_full)

        self.swap_active(".new")
from pkgpanda.util import load_json

dcos_config = load_json('/opt/mesosphere/etc/expanded.config.json')
Example #32
0
def test_setup(tmpdir):
    repo_path = tmp_repository(tmpdir)
    tmpdir.join("root", "bootstrap").write("", ensure=True)

    check_call([
        "pkgpanda", "setup", "--root={0}/root".format(tmpdir),
        "--rooted-systemd", "--repository={}".format(repo_path),
        "--config-dir=../resources/etc-active", "--no-systemd"
    ])

    expect_fs("{0}".format(tmpdir), ["repository", "root"])

    # TODO(cmaloney): Validate things got placed correctly.
    expect_fs(
        "{0}/root".format(tmpdir),
        {
            "active": ["env", "mesos", "mesos-config"],
            "active.buildinfo.full.json": None,
            "bin": ["mesos", "mesos-dir", "mesos-master", "mesos-slave"],
            "lib": ["libmesos.so"],
            "etc": ["dcos-service-configuration.json", "foobar", "some.json"],
            "include": [],
            "dcos.target.wants": ["dcos-mesos-master.service"],
            "dcos.target": None,
            "environment": None,
            "environment.export": None,
            "dcos-mesos-master.service": None  # rooted_systemd
        })

    expected_dcos_service_configuration = {
        "sysctl": {
            "dcos-mesos-master": {
                "kernel.watchdog_thresh": "11",
                "net.netfilter.nf_conntrack_udp_timeout": "30"
            },
            "dcos-mesos-slave": {
                "kperf.debug_level": "1"
            }
        }
    }

    assert expected_dcos_service_configuration == load_json(
        "{tmpdir}/root/etc/dcos-service-configuration.json".format(
            tmpdir=tmpdir))

    # Introspection should work right
    active = set(
        check_output([
            "pkgpanda", "active", "--root={0}/root".format(tmpdir),
            "--rooted-systemd", "--repository={}".format(repo_path),
            "--config-dir=../resources/etc-active"
        ]).decode("utf-8").split())

    assert active == {
        "env--setup", "mesos--0.22.0",
        "mesos-config--ffddcfb53168d42f92e4771c6f8a8a9a818fd6b8"
    }
    tmpdir.join("root", "bootstrap").write("", ensure=True)
    # If we setup the same directory again we should get .old files.
    check_call([
        "pkgpanda", "setup", "--root={0}/root".format(tmpdir),
        "--rooted-systemd", "--repository={}".format(repo_path),
        "--config-dir=../resources/etc-active", "--no-systemd"
    ])
    # TODO(cmaloney): Validate things got placed correctly.

    expect_fs(
        "{0}/root".format(tmpdir),
        {
            "active": ["env", "mesos", "mesos-config"],
            "active.buildinfo.full.json.old": None,
            "active.buildinfo.full.json": None,
            "bin": ["mesos", "mesos-dir", "mesos-master", "mesos-slave"],
            "lib": ["libmesos.so"],
            "etc": ["dcos-service-configuration.json", "foobar", "some.json"],
            "include": [],
            "dcos.target": None,
            "dcos.target.wants": ["dcos-mesos-master.service"],
            "environment": None,
            "environment.export": None,
            "active.old": ["env", "mesos", "mesos-config"],
            "bin.old": ["mesos", "mesos-dir", "mesos-master", "mesos-slave"],
            "lib.old": ["libmesos.so"],
            "etc.old":
            ["dcos-service-configuration.json", "foobar", "some.json"],
            "include.old": [],
            "dcos.target.wants.old": ["dcos-mesos-master.service"],
            "environment.old": None,
            "environment.export.old": None,
            "dcos-mesos-master.service": None  # rooted systemd
        })

    # Should only pickup the packages once / one active set.
    active = set(
        check_output([
            "pkgpanda", "active", "--root={0}/root".format(tmpdir),
            "--rooted-systemd", "--repository={}".format(repo_path),
            "--config-dir=../resources/etc-active"
        ]).decode('utf-8').split())

    assert active == {
        "env--setup", "mesos--0.22.0",
        "mesos-config--ffddcfb53168d42f92e4771c6f8a8a9a818fd6b8"
    }

    # Touch some .new files so we can be sure that deactivate cleans those up as well.
    tmpdir.mkdir("root/bin.new")
    tmpdir.mkdir("root/lib.new")
    tmpdir.mkdir("root/etc.new")
    tmpdir.mkdir("root/foo.new")
    tmpdir.mkdir("root/baz")
    tmpdir.mkdir("root/foobar.old")
    tmpdir.mkdir("root/packages")

    # Uninstall / deactivate everything,
    check_call([
        "pkgpanda", "uninstall", "--root={0}/root".format(tmpdir),
        "--rooted-systemd", "--repository={}".format(repo_path),
        "--config-dir=../resources/etc-active", "--no-systemd"
    ])

    expect_fs("{0}".format(tmpdir), {"repository": None})
Example #33
0
    def activate(self, packages):
        # Ensure the new set is reasonable.
        validate_compatible(packages, self.__roles)

        # Build the absolute paths for the running config, new config location,
        # and where to archive the config.
        active_names = self.get_active_names()
        active_dirs = list(map(self._make_abs, self.__well_known_dirs + ["active"]))

        new_names = [name + ".new" for name in active_names]
        new_dirs = [name + ".new" for name in active_dirs]

        old_names = [name + ".old" for name in active_names]

        # Remove all pre-existing new and old directories
        for name in chain(new_names, old_names):
            if (os.path.exists(name)):
                if os.path.isdir(name):
                    shutil.rmtree(name)
                else:
                    os.remove(name)

        # Make the directories for the new config
        for name in new_dirs:
            os.makedirs(name)

        # Fill in all the new contents
        def symlink_all(src, dest):
            if not os.path.isdir(src):
                return

            symlink_tree(src, dest)

        # Set the new LD_LIBRARY_PATH, PATH.
        env_contents = env_header.format("/opt/mesosphere" if self.__fake_path else self.__root)
        env_export_contents = env_export_header.format("/opt/mesosphere" if self.__fake_path else self.__root)

        active_buildinfo_full = {}

        # Add the folders, config in each package.
        for package in packages:
            # Package folders
            # NOTE: Since active is at the end of the folder list it will be
            # removed by the zip. This is the desired behavior, since it will be
            # populated later.
            # Do the basename since some well known dirs are full paths (dcos.target.wants)
            # while inside the packages they are always top level directories.
            for new, dir_name in zip(new_dirs, self.__well_known_dirs):
                dir_name = os.path.basename(dir_name)
                pkg_dir = os.path.join(package.path, dir_name)
                assert os.path.isabs(new)
                assert os.path.isabs(pkg_dir)

                try:
                    symlink_all(pkg_dir, new)

                    # Symlink all applicable role-based config
                    for role in self.__roles:
                        role_dir = os.path.join(package.path, "{0}_{1}".format(dir_name, role))
                        symlink_all(role_dir, new)
                except ConflictingFile as ex:
                    raise ValidationError("Two packages are trying to install the same file {0} or "
                                          "two roles in the set of roles {1} are causing a package "
                                          "to try activating multiple versions of the same file. "
                                          "One of the package files is {2}.".format(
                                            ex.dest,
                                            self.__roles,
                                            ex.src))

            # Add to the active folder
            os.symlink(package.path, os.path.join(self._make_abs("active.new"), package.name))

            # Add to the environment contents
            env_contents += "# package: {0}\n".format(package.id)
            for k, v in package.environment.items():
                env_contents += "{0}={1}\n".format(k, v)
            env_contents += "\n"

            # Add to the environment.export contents
            env_export_contents += "# package: {0}\n".format(package.id)
            for k, v in package.environment.items():
                env_export_contents += "export {0}={1}\n".format(k, v)
            env_export_contents += "\n"

            # Add to the buildinfo
            try:
                active_buildinfo_full[package.name] = load_json(os.path.join(package.path, "buildinfo.full.json"))
            except FileNotFoundError:
                # TODO(cmaloney): These only come from setup-packages. Should update
                # setup-packages to add a buildinfo.full for those packages
                active_buildinfo_full[package.name] = None

        # Write out the new environment file.
        new_env = self._make_abs("environment.new")
        write_string(new_env, env_contents)

        # Write out the new environment.export file
        new_env_export = self._make_abs("environment.export.new")
        write_string(new_env_export, env_export_contents)

        # Write out the buildinfo of every active package
        new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new")
        write_json(new_buildinfo_meta, active_buildinfo_full)

        self.swap_active(".new")
Example #34
0
def _do_bootstrap(install, repository):
    # These files should be set by the environment which initially builds
    # the host (cloud-init).
    repository_url = if_exists(load_string, install.get_config_filename("setup-flags/repository-url"))

    def fetcher(id, target):
        if repository_url is None:
            raise ValidationError("ERROR: Non-local package {} but no repository url given.".format(id))
        return requests_fetcher(repository_url, id, target, os.getcwd())

    setup_pkg_dir = install.get_config_filename("setup-packages")
    if os.path.exists(setup_pkg_dir):
        raise ValidationError(
            "setup-packages is no longer supported. It's functionality has been replaced with late "
            "binding packages. Found setup packages dir: {}".format(setup_pkg_dir))

    setup_packages_to_activate = []

    # If the host has late config values, build the late config package from them.
    late_config = if_exists(load_yaml, install.get_config_filename("setup-flags/late-config.yaml"))
    if late_config:
        pkg_id_str = late_config['late_bound_package_id']
        late_values = late_config['bound_values']
        print("Binding late config to late package {}".format(pkg_id_str))
        print("Bound values: {}".format(late_values))

        if not PackageId.is_id(pkg_id_str):
            raise ValidationError("Invalid late package id: {}".format(pkg_id_str))
        pkg_id = PackageId(pkg_id_str)
        if pkg_id.version != "setup":
            raise ValidationError("Late package must have the version setup. Bad package: {}".format(pkg_id_str))

        # Collect the late config package.
        with tempfile.NamedTemporaryFile() as f:
            download(
                f.name,
                repository_url + '/packages/{0}/{1}.dcos_config'.format(pkg_id.name, pkg_id_str),
                os.getcwd(),
                rm_on_error=False,
            )
            late_package = load_yaml(f.name)

        # Resolve the late package using the bound late config values.
        final_late_package = resolve_late_package(late_package, late_values)

        # Render the package onto the filesystem and add it to the package
        # repository.
        with tempfile.NamedTemporaryFile() as f:
            do_gen_package(final_late_package, f.name)
            repository.add(lambda _, target: extract_tarball(f.name, target), pkg_id_str)
        setup_packages_to_activate.append(pkg_id_str)

    # If active.json is set on the host, use that as the set of packages to
    # activate. Otherwise just use the set of currently active packages (those
    # active in the bootstrap tarball)
    to_activate = None
    active_path = install.get_config_filename("setup-flags/active.json")
    if os.path.exists(active_path):
        print("Loaded active packages from", active_path)
        to_activate = load_json(active_path)

        # Ensure all packages are local
        print("Ensuring all packages in active set {} are local".format(",".join(to_activate)))
        for package in to_activate:
            repository.add(fetcher, package)
    else:
        print("Calculated active packages from bootstrap tarball")
        to_activate = list(install.get_active())

        package_list_filename = install.get_config_filename("setup-flags/cluster-package-list")
        print("Checking for cluster packages in:", package_list_filename)
        package_list_id = if_exists(load_string, package_list_filename)
        if package_list_id:
            print("Cluster package list:", package_list_id)
            cluster_packages = _get_package_list(package_list_id, repository_url)
            print("Loading cluster-packages: {}".format(cluster_packages))

            for package_id_str in cluster_packages:
                # Validate the package ids
                pkg_id = PackageId(package_id_str)

                # Fetch the packages if not local
                if not repository.has_package(package_id_str):
                    repository.add(fetcher, package_id_str)

                # Add the package to the set to activate
                setup_packages_to_activate.append(package_id_str)
        else:
            print("No cluster-packages specified")

    # Calculate the full set of final packages (Explicit activations + setup packages).
    # De-duplicate using a set.
    to_activate = list(set(to_activate + setup_packages_to_activate))

    print("Activating packages")
    install.activate(repository.load_packages(to_activate))
Example #35
0
def test_setup(tmpdir):
    repo_path = tmp_repository(tmpdir)
    tmpdir.join("root", "bootstrap").write("", ensure=True)

    check_call(["pkgpanda",
                "setup",
                "--root={0}/root".format(tmpdir),
                "--rooted-systemd",
                "--repository={}".format(repo_path),
                "--config-dir={}".format(resources_test_dir("etc-active")),
                "--no-systemd"
                ])

    expect_fs("{0}".format(tmpdir), ["repository", "root"])

    # TODO(cmaloney): Validate things got placed correctly.
    expect_fs(
        "{0}/root".format(tmpdir),
        {
            "active": ["dcos-provider-abcdef-test", "mesos", "mesos-config"],
            "active.buildinfo.full.json": None,
            "bin": [
                "mesos",
                "mesos-dir",
                "mesos-master",
                "mesos-slave"],
            "lib": ["libmesos.so"],
            "etc": ["dcos-service-configuration.json", "foobar", "some.json"],
            "include": [],
            "dcos.target.wants": ["dcos-mesos-master.service"],
            "dcos.target": None,
            "environment": None,
            "environment.export": None,
            "dcos-mesos-master.service": None           # rooted_systemd
        })

    expected_dcos_service_configuration = {
        "sysctl": {
            "dcos-mesos-master": {
                "kernel.watchdog_thresh": "11",
                "net.netfilter.nf_conntrack_udp_timeout": "30"
            },
            "dcos-mesos-slave": {
                "kperf.debug_level": "1"
            }
        }
    }

    assert expected_dcos_service_configuration == load_json(
        "{tmpdir}/root/etc/dcos-service-configuration.json".format(tmpdir=tmpdir))

    assert load_json('{0}/root/etc/some.json'.format(tmpdir)) == {
        'cluster-specific-stuff': 'magic',
        'foo': 'bar',
        'baz': 'qux',
    }

    # Introspection should work right
    active = set(check_output([
        "pkgpanda",
        "active",
        "--root={0}/root".format(tmpdir),
        "--rooted-systemd",
        "--repository={}".format(repo_path),
        "--config-dir={}".format(resources_test_dir("etc-active"))]).decode().split())

    assert active == {
        "dcos-provider-abcdef-test--setup",
        "mesos--0.22.0",
        "mesos-config--ffddcfb53168d42f92e4771c6f8a8a9a818fd6b8",
    }
    tmpdir.join("root", "bootstrap").write("", ensure=True)
    # If we setup the same directory again we should get .old files.
    check_call(["pkgpanda",
                "setup",
                "--root={0}/root".format(tmpdir),
                "--rooted-systemd",
                "--repository={}".format(repo_path),
                "--config-dir={}".format(resources_test_dir("etc-active")),
                "--no-systemd"
                ])
    # TODO(cmaloney): Validate things got placed correctly.

    expect_fs(
        "{0}/root".format(tmpdir),
        {
            "active": ["dcos-provider-abcdef-test", "mesos", "mesos-config"],
            "active.buildinfo.full.json.old": None,
            "active.buildinfo.full.json": None,
            "bin": [
                "mesos",
                "mesos-dir",
                "mesos-master",
                "mesos-slave"],
            "lib": ["libmesos.so"],
            "etc": ["dcos-service-configuration.json", "foobar", "some.json"],
            "include": [],
            "dcos.target": None,
            "dcos.target.wants": ["dcos-mesos-master.service"],
            "environment": None,
            "environment.export": None,
            "active.old": ["dcos-provider-abcdef-test", "mesos", "mesos-config"],
            "bin.old": [
                "mesos",
                "mesos-dir",
                "mesos-master",
                "mesos-slave"],
            "lib.old": ["libmesos.so"],
            "etc.old": ["dcos-service-configuration.json", "foobar", "some.json"],
            "include.old": [],
            "dcos.target.wants.old": ["dcos-mesos-master.service"],
            "environment.old": None,
            "environment.export.old": None,
            "dcos-mesos-master.service": None       # rooted systemd
        })

    # Should only pickup the packages once / one active set.
    active = set(check_output([
        "pkgpanda",
        "active",
        "--root={0}/root".format(tmpdir),
        "--rooted-systemd",
        "--repository={}".format(repo_path),
        "--config-dir={}".format(resources_test_dir("etc-active"))]).decode().split())

    assert active == {
        "dcos-provider-abcdef-test--setup",
        "mesos--0.22.0",
        "mesos-config--ffddcfb53168d42f92e4771c6f8a8a9a818fd6b8",
    }

    # Touch some .new files so we can be sure that deactivate cleans those up as well.
    tmpdir.mkdir("root/bin.new")
    tmpdir.mkdir("root/lib.new")
    tmpdir.mkdir("root/etc.new")
    tmpdir.mkdir("root/foo.new")
    tmpdir.mkdir("root/baz")
    tmpdir.mkdir("root/foobar.old")
    tmpdir.mkdir("root/packages")

    # Uninstall / deactivate everything,
    check_call(["pkgpanda",
                "uninstall",
                "--root={0}/root".format(tmpdir),
                "--rooted-systemd",
                "--repository={}".format(repo_path),
                "--config-dir={}".format(resources_test_dir("etc-active")),
                "--no-systemd"
                ])

    expect_fs("{0}".format(tmpdir), {"repository": None})
Example #36
0
    def activate(self, packages):
        # Ensure the new set is reasonable.
        validate_compatible(packages, self.__roles)

        # Build the absolute paths for the running config, new config location,
        # and where to archive the config.
        active_names = self.get_active_names()
        active_dirs = list(map(self._make_abs, self.__well_known_dirs + ["active"]))

        new_names = [name + ".new" for name in active_names]
        new_dirs = [name + ".new" for name in active_dirs]

        old_names = [name + ".old" for name in active_names]

        # Remove all pre-existing new and old directories
        for name in chain(new_names, old_names):
            if os.path.exists(name):
                if os.path.isdir(name):
                    shutil.rmtree(name)
                else:
                    os.remove(name)

        # Make the directories for the new config
        for name in new_dirs:
            os.makedirs(name)

        def symlink_all(src, dest):
            if not os.path.isdir(src):
                return

            symlink_tree(src, dest)

        # Set the new LD_LIBRARY_PATH, PATH.
        env_contents = env_header.format("/opt/mesosphere" if self.__fake_path else self.__root)
        env_export_contents = env_export_header.format("/opt/mesosphere" if self.__fake_path else self.__root)

        active_buildinfo_full = {}

        dcos_service_configuration = self._get_dcos_configuration_template()

        # Building up the set of users
        sysusers = UserManagement(self.__manage_users, self.__add_users)

        def _get_service_files(_dir):
            service_files = []
            for root, directories, filenames in os.walk(_dir):
                for filename in filter(lambda name: name.endswith(".service"), filenames):
                    service_files.append(os.path.join(root, filename))
            return service_files

        def _get_service_names(_dir):
            service_files = list(map(os.path.basename, _get_service_files(_dir)))

            if not service_files:
                return []

            return list(map(lambda name: os.path.splitext(name)[0], service_files))

        # Add the folders, config in each package.
        for package in packages:
            # Package folders
            # NOTE: Since active is at the end of the folder list it will be
            # removed by the zip. This is the desired behavior, since it will be
            # populated later.
            # Do the basename since some well known dirs are full paths (dcos.target.wants)
            # while inside the packages they are always top level directories.
            for new, dir_name in zip(new_dirs, self.__well_known_dirs):
                dir_name = os.path.basename(dir_name)
                pkg_dir = os.path.join(package.path, dir_name)

                assert os.path.isabs(new)
                assert os.path.isabs(pkg_dir)

                try:
                    symlink_all(pkg_dir, new)

                    # Symlink all applicable role-based config
                    for role in self.__roles:
                        role_dir = os.path.join(package.path, "{0}_{1}".format(dir_name, role))
                        symlink_all(role_dir, new)

                except ConflictingFile as ex:
                    raise ValidationError("Two packages are trying to install the same file {0} or "
                                          "two roles in the set of roles {1} are causing a package "
                                          "to try activating multiple versions of the same file. "
                                          "One of the package files is {2}.".format(ex.dest,
                                                                                    self.__roles,
                                                                                    ex.src))

            # Add to the active folder
            os.symlink(package.path, os.path.join(self._make_abs("active.new"), package.name))

            # Add to the environment and environment.export contents

            env_contents += "# package: {0}\n".format(package.id)
            env_export_contents += "# package: {0}\n".format(package.id)

            for k, v in package.environment.items():
                env_contents += "{0}={1}\n".format(k, v)
                env_export_contents += "export {0}={1}\n".format(k, v)

            env_contents += "\n"
            env_export_contents += "\n"

            # Add to the buildinfo
            try:
                active_buildinfo_full[package.name] = load_json(os.path.join(package.path, "buildinfo.full.json"))
            except FileNotFoundError:
                # TODO(cmaloney): These only come from setup-packages. Should update
                # setup-packages to add a buildinfo.full for those packages
                active_buildinfo_full[package.name] = None

            # NOTE: It is critical the state dir, the package name and the user name are all the
            # same. Otherwise on upgrades we might remove access to a files by changing their chown
            # to something incompatible. We survive the first upgrade because everything goes from
            # root to specific users, and root can access all user files.
            if package.username is not None:
                sysusers.add_user(package.username, package.group)

            # Ensure the state directory exists
            # TODO(cmaloney): On upgrade take a snapshot?
            if self.__manage_state_dir:
                state_dir_path = self.__state_dir_root + '/' + package.name
                if package.state_directory:
                    check_call(['mkdir', '-p', state_dir_path])

                    if package.username:
                        uid = sysusers.get_uid(package.username)
                        check_call(['chown', '-R', str(uid), state_dir_path])

            if package.sysctl:
                service_names = _get_service_names(package.path)

                if not service_names:
                    raise ValueError("service name required for sysctl could not be determined for {package}".format(
                        package=package.id))

                for service in service_names:
                    if service in package.sysctl:
                        dcos_service_configuration["sysctl"][service] = package.sysctl[service]

        dcos_service_configuration_file = os.path.join(self._make_abs("etc.new"), DCOS_SERVICE_CONFIGURATION_FILE)
        write_json(dcos_service_configuration_file, dcos_service_configuration)

        # Write out the new environment file.
        new_env = self._make_abs("environment.new")
        write_string(new_env, env_contents)

        # Write out the new environment.export file
        new_env_export = self._make_abs("environment.export.new")
        write_string(new_env_export, env_export_contents)

        # Write out the buildinfo of every active package
        new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new")
        write_json(new_buildinfo_meta, active_buildinfo_full)

        self.swap_active(".new")