def build_dcos_file_from_universe_definition(
    package: Dict, dcos_files_path: str, tmpdir_factory  # _pytest.TempdirFactory
) -> Tuple[str, str, str]:
    """
    Build the .dcos file if its not already present in the given directory.
    Returns a Tuple containing (path of .dcos file, name, and version)
    """
    # TODO Ideally we should `migrate` and then `build`.
    name = package["name"]
    version = package["version"]
    target = os.path.join(dcos_files_path, "{}-{}.dcos".format(name, version))
    if os.path.isfile(target):
        log.info("Skipping build, using cached file : {}".format(target))
    else:
        del package["releaseVersion"]
        del package["selected"]
        package_json_file = tmpdir_factory.mktemp(sdk_utils.random_string()).join(
            sdk_utils.random_string()
        )
        package_json_file.write(json.dumps(package))
        rc, _, _ = sdk_cmd.run_cli(
            " ".join(
                [
                    "registry",
                    "build",
                    "--build-definition-file={}".format(str(package_json_file)),
                    "--output-directory={}".format(dcos_files_path),
                    "--json",
                ]
            )
        )
        assert rc == 0
    assert os.path.isfile(target), "No valid .dcos file is built"
    return target, name, version
def build_dcos_file_from_universe_definition(
    package: Dict, dcos_files_path: str, tmpdir_factory: TempdirFactory,
) -> Tuple[str, str, str]:
    """
    Build the .dcos file if its not already present in the given directory.
    Returns a Tuple containing (path of .dcos file, name, and version)
    """
    # TODO Ideally we should `migrate` and then `build`.
    name = package["name"]
    version = package["version"]
    target = os.path.join(dcos_files_path, "{}-{}.dcos".format(name, version))
    if os.path.isfile(target):
        log.info("Skipping build, using cached file : {}".format(target))
    else:
        del package["releaseVersion"]
        del package["selected"]
        package_json_file = tmpdir_factory.mktemp(sdk_utils.random_string()).join(
            sdk_utils.random_string()
        )
        package_json_file.write(json.dumps(package))
        rc, _, _ = sdk_cmd.run_cli(
            " ".join(
                [
                    "registry",
                    "build",
                    "--build-definition-file={}".format(str(package_json_file)),
                    "--output-directory={}".format(dcos_files_path),
                    "--json",
                ]
            )
        )
        assert rc == 0
    assert os.path.isfile(target), "No valid .dcos file is built"
    return target, name, version
def add_dcos_files_to_registry(tmpdir_factory) -> None:  # _pytest.TempdirFactory
    # Use DCOS_FILES_PATH if its set to a valid path OR use pytest's tmpdir.
    dcos_files_path = os.environ.get("DCOS_FILES_PATH", "")
    valid_path_set = os.path.isdir(dcos_files_path)
    if valid_path_set and not os.access(dcos_files_path, os.W_OK):
        log.warning("{} is not writable.".format(dcos_files_path))
        valid_path_set = False
    if not valid_path_set:
        dcos_files_path = str(tmpdir_factory.mktemp(sdk_utils.random_string()))
    stub_universe_urls = sdk_repository.get_repos()
    log.info(
        "Using {} to build .dcos files (if not exists) from {}".format(
            dcos_files_path, stub_universe_urls
        )
    )
    dcos_files_list = build_dcos_files_from_stubs(
        stub_universe_urls, dcos_files_path, tmpdir_factory
    )
    log.info("Bundled .dcos files : {}".format(dcos_files_list))

    @retrying.retry(stop_max_delay=5 * 60 * 1000, wait_fixed=5 * 1000)
    def wait_for_added_registry(name, version):
        code, stdout, stderr = sdk_cmd.run_cli(
            "registry describe --package-name={} --package-version={} --json".format(name, version),
            print_output=False,
        )
        assert code == 0 and json.loads(stdout).get("status") == "Added"

    for file_path, name, version in dcos_files_list:
        rc, out, err = sdk_cmd.run_cli("registry add --dcos-file={} --json".format(file_path))
        assert rc == 0
        assert len(json.loads(out)["packages"]) > 0, "No packages were added"
        wait_for_added_registry(name, version)
def add_stub_universe_urls(stub_universe_urls: list) -> dict:
    stub_urls = {}

    if not stub_universe_urls:
        return stub_urls

    log.info('Adding stub URLs: {}'.format(stub_universe_urls))
    for idx, url in enumerate(stub_universe_urls):
        log.info('URL {}: {}'.format(idx, repr(url)))
        package_name = 'testpkg-{}'.format(sdk_utils.random_string())
        stub_urls[package_name] = url

    # clean up any duplicate repositories
    current_universes = sdk_cmd.run_cli('package repo list --json')
    for repo in json.loads(current_universes)['repositories']:
        if repo['uri'] in stub_urls.values():
            log.info('Removing duplicate stub URL: {}'.format(repo['uri']))
            sdk_cmd.run_cli('package repo remove {}'.format(repo['name']))

    # add the needed universe repositories
    for name, url in stub_urls.items():
        log.info('Adding stub repo {} URL: {}'.format(name, url))
        rc, stdout, stderr = sdk_cmd.run_raw_cli(
            'package repo add --index=0 {} {}'.format(name, url))
        if rc != 0:
            raise Exception(
                'Failed to add stub repo {} ({}): stdout=[{}], stderr=[{}]'.
                format(name, url, stdout, stderr))

    log.info('Finished adding universe repos')

    return stub_urls
def add_dcos_files_to_registry(tmpdir_factory: TempdirFactory) -> None:
    # Use DCOS_FILES_PATH if its set to a valid path OR use pytest's tmpdir.
    dcos_files_path = os.environ.get("DCOS_FILES_PATH", "")
    valid_path_set = os.path.isdir(dcos_files_path)
    if valid_path_set and not os.access(dcos_files_path, os.W_OK):
        log.warning("{} is not writable.".format(dcos_files_path))
        valid_path_set = False
    if not valid_path_set:
        dcos_files_path = str(tmpdir_factory.mktemp(sdk_utils.random_string()))
    stub_universe_urls = sdk_repository.get_repos()
    log.info(
        "Using {} to build .dcos files (if not exists) from {}".format(
            dcos_files_path, stub_universe_urls
        )
    )
    dcos_files_list = build_dcos_files_from_stubs(
        stub_universe_urls, dcos_files_path, tmpdir_factory
    )
    log.info("Bundled .dcos files : {}".format(dcos_files_list))

    @retrying.retry(stop_max_delay=5 * 60 * 1000, wait_fixed=5 * 1000)
    def wait_for_added_registry(name: str, version: str) -> None:
        code, stdout, stderr = sdk_cmd.run_cli(
            "registry describe --package-name={} --package-version={} --json".format(name, version),
            print_output=False,
        )
        assert code == 0 and json.loads(stdout).get("status") == "Added"

    for file_path, name, version in dcos_files_list:
        rc, out, err = sdk_cmd.run_cli("registry add --dcos-file={} --json".format(file_path))
        assert rc == 0
        assert len(json.loads(out)["packages"]) > 0, "No packages were added"
        wait_for_added_registry(name, version)
Exemple #6
0
def add_dcos_files_to_registry(
        tmpdir_factory  # _pytest.TempdirFactory
) -> None:
    # Use DCOS_FILES_PATH if its set to a valid path OR use pytest's tmpdir.
    dcos_files_path = os.environ.get('DCOS_FILES_PATH', '')
    valid_path_set = os.path.isdir(dcos_files_path)
    if valid_path_set and not os.access(dcos_files_path, os.W_OK):
        log.warning('{} is not writable.'.format(dcos_files_path))
        valid_path_set = False
    if not valid_path_set:
        dcos_files_path = str(tmpdir_factory.mktemp(sdk_utils.random_string()))
    stub_universe_urls = sdk_repository.get_universe_repos()
    log.info('Using {} to build .dcos files (if not exists) from {}'.format(
        dcos_files_path, stub_universe_urls))
    dcos_files_list = build_dcos_files_from_stubs(stub_universe_urls,
                                                  dcos_files_path,
                                                  tmpdir_factory)
    log.info('Bundled .dcos files : {}'.format(dcos_files_list))
    for file_path, name, version in dcos_files_list:
        rc, out, err = sdk_cmd.run_raw_cli(' '.join(
            ['registry', 'add', '--dcos-file={}'.format(file_path), '--json']))
        assert rc == 0
        assert len(json.loads(out)['packages']) > 0, 'No packages were added'
        wait_until_cli_condition(
            ' '.join([
                'registry', 'describe', '--package-name=' + name,
                '--package-version=' + version, '--json'
            ]), lambda code, out, err: code == 0 and json.loads(out).get(
                'status') == 'Added')
def package_registry_session(tmpdir_factory):  # _pytest.TempdirFactory
    pkg_reg_repo = {}
    service_uid = "pkg-reg-uid-{}".format(sdk_utils.random_string())
    secret_path = "{}-secret-{}".format(service_uid, sdk_utils.random_string())
    try:
        sdk_security.create_service_account(service_uid, secret_path)
        grant_perms_for_registry_account(service_uid)
        pkg_reg_repo = install_package_registry(secret_path)
        add_dcos_files_to_registry(tmpdir_factory)
        yield
    finally:
        log.info("Teardown of package_registry_session initiated")
        sdk_repository.remove_universe_repos(pkg_reg_repo)
        # TODO If/when adding S3 backend, remove `Added` packages.
        sdk_install.uninstall(PACKAGE_REGISTRY_NAME, PACKAGE_REGISTRY_SERVICE_NAME)
        # No need to revoke perms, just delete the secret; the following ignores any failures.
        sdk_security.delete_service_account(service_uid, secret_path)
def package_registry_session(tmpdir_factory: TempdirFactory) -> Iterator[None]:
    pkg_reg_repo: Dict[str, str] = {}
    service_uid = "pkg-reg-uid-{}".format(sdk_utils.random_string())
    secret_path = "{}-secret-{}".format(service_uid, sdk_utils.random_string())
    try:
        sdk_security.create_service_account(service_uid, secret_path)
        grant_perms_for_registry_account(service_uid)
        pkg_reg_repo = install_package_registry(secret_path)
        add_dcos_files_to_registry(tmpdir_factory)
        yield
    finally:
        log.info("Teardown of package_registry_session initiated")
        sdk_repository.remove_universe_repos(pkg_reg_repo)
        # TODO If/when adding S3 backend, remove `Added` packages.
        sdk_install.uninstall(PACKAGE_REGISTRY_NAME, PACKAGE_REGISTRY_SERVICE_NAME)
        # No need to revoke perms, just delete the secret; the following ignores any failures.
        sdk_security.delete_service_account(service_uid, secret_path)
Exemple #9
0
def package_registry_session(tmpdir_factory):  # _pytest.TempdirFactory
    pkg_reg_stub = {}
    pkg_reg_repo = {}
    try:
        # TODO Remove stub. We should install from bootstrap registry.
        pkg_reg_stub = add_package_registry_stub()
        service_uid = 'pkg-reg-uid-{}'.format(sdk_utils.random_string())
        secret_path = '{}-secret-{}'.format(service_uid,
                                            sdk_utils.random_string())
        sdk_security.create_service_account(service_uid, secret_path)
        grant_perms_for_registry_account(service_uid)
        pkg_reg_repo = install_package_registry(secret_path)
        add_dcos_files_to_registry(tmpdir_factory)
        yield
    finally:
        log.info('Teardown of package_registry_session initiated')
        sdk_repository.remove_universe_repos(pkg_reg_repo)
        # TODO If/when adding S3 backend, remove `Added` packages.
        sdk_install.uninstall(PACKAGE_REGISTRY_NAME,
                              PACKAGE_REGISTRY_SERVICE_NAME)
        sdk_repository.remove_universe_repos(pkg_reg_stub)
        # No need to revoke perms, just delete the secret.
        sdk_security.delete_service_account(service_uid, secret_path)
Exemple #10
0
def test_scaling_load(master_count, job_count, single_use: bool, run_delay,
                      cpu_quota, work_duration, mom, external_volume: bool,
                      scenario) -> None:
    """Launch a load test scenario. This does not verify the results
    of the test, but does ensure the instances and jobs were created.

    The installation is run in threads, but the job creation and
    launch is then done serially after all Jenkins instances have
    completed installation.

    Args:
        master_count: Number of Jenkins masters or instances
        job_count: Number of Jobs on each Jenkins master
        single_use: Mesos Single-Use Agent on (true) or off (false)
        run_delay: Jobs should run every X minute(s)
        cpu_quota: CPU quota (0.0 to disable)
        work_duration: Time, in seconds, for generated jobs to sleep
        mom: Marathon on Marathon instance name
        external_volume: External volume on rexray (true) or local volume (false)
    """
    with shakedown.marathon_on_marathon(mom):
        if cpu_quota is not 0.0:
            _setup_quota(SHARED_ROLE, cpu_quota)

    masters = [
        "jenkins{}".format(sdk_utils.random_string())
        for _ in range(0, int(master_count))
    ]
    # launch Jenkins services
    install_threads = _spawn_threads(masters,
                                     _install_jenkins,
                                     external_volume=external_volume,
                                     mom=mom,
                                     daemon=True)
    thread_failures = _wait_and_get_failures(install_threads,
                                             timeout=DEPLOY_TIMEOUT)
    thread_names = [x.name for x in thread_failures]

    # the rest of the commands require a running Jenkins instance
    deployed_masters = [x for x in masters if x not in thread_names]
    job_threads = _spawn_threads(deployed_masters,
                                 _create_jobs,
                                 jobs=job_count,
                                 single=single_use,
                                 delay=run_delay,
                                 duration=work_duration,
                                 scenario=scenario)
    _wait_on_threads(job_threads, JOB_RUN_TIMEOUT)
def create_slave() -> str:
    label = sdk_utils.random_string()
    jenkins.create_mesos_slave_node(label)
    r = jenkins.create_mesos_slave_node(label,
                                        service_name=config.SERVICE_NAME)
    assert r.status_code == 200, 'create_mesos_slave_node failed : {}'.format(
        r.status_code)
    assert label in r.text, 'Label {} missing from {}'.format(label, r.text)
    log.info("Set of labels is now: %s", r.text)
    yield label
    log.info("Removing label %s", label)
    r = jenkins_remote_access.remove_slave_info(
        label, service_name=config.SERVICE_NAME)
    assert r.status_code == 200, 'remove_slave_info failed : {}'.format(
        r.status_code)
    assert label not in r.text, 'Label {} still present in {}'.format(
        label, r.text)
    log.info("Set of labels is now: %s", r.text)
Exemple #12
0
def add_stub_universe_urls(stub_universe_urls: list) -> dict:
    stub_urls = {}

    if not stub_universe_urls:
        return stub_urls

    # clean up any duplicate repositories
    _, current_universes, _ = sdk_cmd.run_cli("package repo list --json")
    for repo in json.loads(current_universes)["repositories"]:
        if repo["uri"] in stub_universe_urls:
            log.info("Removing duplicate stub URL: {}".format(repo["uri"]))
            assert remove_repo(repo["name"])

    # add the needed universe repositories
    log.info("Adding stub URLs: {}".format(stub_universe_urls))
    for url in stub_universe_urls:
        assert add_repo("testpkg-{}".format(sdk_utils.random_string()), url, 0)

    return stub_urls
def add_stub_universe_urls(stub_universe_urls: List[str]) -> Dict[str, str]:
    stub_urls: Dict[str, str] = {}

    if not stub_universe_urls:
        return stub_urls

    # clean up any duplicate repositories
    _, current_universes, _ = sdk_cmd.run_cli("package repo list --json")
    for repo in json.loads(current_universes)["repositories"]:
        if repo["uri"] in stub_universe_urls:
            log.info("Removing duplicate stub URL: {}".format(repo["uri"]))
            assert remove_repo(repo["name"])

    # add the needed universe repositories
    log.info("Adding stub URLs: {}".format(stub_universe_urls))
    for url in stub_universe_urls:
        name = "testpkg-{}".format(sdk_utils.random_string())
        stub_urls[name] = url
        assert add_repo(name, url, 0)

    return stub_urls
Exemple #14
0
def test_scaling_load(master_count, job_count, single_use: bool, run_delay,
                      cpu_quota, work_duration, mom, external_volume: bool,
                      scenario, min_index, max_index, batch_size) -> None:
    """Launch a load test scenario. This does not verify the results
    of the test, but does ensure the instances and jobs were created.

    The installation is run in threads, but the job creation and
    launch is then done serially after all Jenkins instances have
    completed installation.

    Args:
        master_count: Number of Jenkins masters or instances
        job_count: Number of Jobs on each Jenkins master
        single_use: Mesos Single-Use Agent on (true) or off (false)
        run_delay: Jobs should run every X minute(s)
        cpu_quota: CPU quota (0.0 to disable)
        work_duration: Time, in seconds, for generated jobs to sleep
        mom: Marathon on Marathon instance name
        external_volume: External volume on rexray (true) or local volume (false)
        min_index: minimum index to begin jenkins suffixes at
        max_index: maximum index to end jenkins suffixes at
        batch_size: batch size to deploy jenkins instances in
    """
    security_mode = sdk_dcos.get_security_mode()
    if mom and cpu_quota != 0.0:
        with shakedown.marathon_on_marathon(mom):
            _setup_quota(SHARED_ROLE, cpu_quota)

    # create marathon client
    if mom:
        with shakedown.marathon_on_marathon(mom):
            marathon_client = shakedown.marathon.create_client()
    else:
        marathon_client = shakedown.marathon.create_client()

    masters = []
    if min_index == -1 or max_index == -1:
        masters = [
            "jenkins{}".format(sdk_utils.random_string())
            for _ in range(0, int(master_count))
        ]
    else:
        #max and min indexes are specified
        #NOTE: using min/max will override master count
        masters = [
            "jenkins{}".format(index) for index in range(min_index, max_index)
        ]
    # create service accounts in parallel
    sdk_security.install_enterprise_cli()
    service_account_threads = _spawn_threads(masters,
                                             _create_service_accounts,
                                             security=security_mode)

    thread_failures = _wait_and_get_failures(service_account_threads,
                                             timeout=SERVICE_ACCOUNT_TIMEOUT)
    # launch Jenkins services
    current = 0
    end = max_index - min_index
    while current + batch_size <= end:

        log.info(
            "Re-authenticating current batch load of jenkins{} - jenkins{} "
            "to prevent auth-timeouts on scale cluster.".format(
                current, current + batch_size))
        dcos_login.login_session()

        batched_masters = masters[current:current + batch_size]
        install_threads = _spawn_threads(batched_masters,
                                         _install_jenkins,
                                         event='deployments',
                                         client=marathon_client,
                                         external_volume=external_volume,
                                         security=security_mode,
                                         daemon=True,
                                         mom=mom)
        thread_failures = _wait_and_get_failures(install_threads,
                                                 timeout=DEPLOY_TIMEOUT)
        thread_names = [x.name for x in thread_failures]

        # the rest of the commands require a running Jenkins instance
        deployed_masters = [
            x for x in batched_masters if x not in thread_names
        ]
        job_threads = _spawn_threads(deployed_masters,
                                     _create_jobs,
                                     jobs=job_count,
                                     single=single_use,
                                     delay=run_delay,
                                     duration=work_duration,
                                     scenario=scenario)
        _wait_on_threads(job_threads, JOB_RUN_TIMEOUT)
        r = json.dumps(TIMINGS)
        print(r)
        current = current + batch_size