Exemplo n.º 1
0
def setup_spark(configure_security, configure_universe):
    try:
        utils.require_spark()
        utils.upload_file(os.environ["SCALA_TEST_JAR_PATH"])
        shakedown.run_dcos_command(
            'package install --cli dcos-enterprise-cli --yes')
        yield
    finally:
        utils.teardown_spark()
Exemplo n.º 2
0
def setup_spark(kerberized_kafka, configure_security_spark,
                configure_universe):
    try:
        # need to do this here also in case this test is run first
        # and the jar hasn't been updated
        utils.upload_file(os.environ["SCALA_TEST_JAR_PATH"])
        utils.require_spark()
        yield
    finally:
        utils.teardown_spark()
Exemplo n.º 3
0
def test_python():
    python_script_path = os.path.join(THIS_DIR, 'jobs', 'python',
                                      'pi_with_include.py')
    python_script_url = utils.upload_file(python_script_path)
    py_file_path = os.path.join(THIS_DIR, 'jobs', 'python',
                                'PySparkTestInclude.py')
    py_file_url = utils.upload_file(py_file_path)
    utils.run_tests(app_url=python_script_url,
                    app_args="30",
                    expected_output="Pi is roughly 3",
                    args=["--py-files", py_file_url])
Exemplo n.º 4
0
def test_exclude_message_when_production(set_production_true, fname, key):
    u.setup_aws_mocks()
    bucket = u.create_bucket()
    # Given that PRODUCTION is missing or true
    set_production_true()
    # WHEN a bad file is processed:
    u.upload_file(bucket, fname, key)
    event = u.build_event(bucket.name, key)
    response = lambda_handler(event, u.dummy_context)
    print("response:", response)
    # THEN it should pass & be marked as excluded
    assert "pass" in response['results'][0]['status']
    assert u.is_in("Excluded from validation", response['results'], 'results')
Exemplo n.º 5
0
def test_pass_no_message_when_no_verbose(setter, fname):
    queue = u.setup_aws_mocks()
    bucket = u.create_bucket()
    # given that verbose is not set
    setter()
    # when a good file is processed
    bucket_name, key_name = u.upload_file(bucket, fname)
    event = u.build_event(bucket_name, key_name)
    response = lambda_handler(event, u.dummy_context)
    # THEN there should be no message
    count, msg_json = u.get_one_message(queue)
    try:
        msg_dict = json.loads(msg_json)
        msg = msg_dict['Message']
    except ValueError:
        msg = ""

    # print things that will be useful to debug
    print("response:", response)
    print("message:", msg)
    print("count:", count)

    # actual criteria to pass
    assert "pass" in response['results'][0]['status']
    assert count is 0 and msg is ""
Exemplo n.º 6
0
def test_driver_executor_tls():
    '''
    Put keystore and truststore as secrets in DC/OS secret store.
    Run SparkPi job with TLS enabled, referencing those secrets.
    Make sure other secrets still show up.
    '''
    python_script_path = os.path.join(THIS_DIR, 'jobs', 'python',
                                      'pi_with_secret.py')
    python_script_url = utils.upload_file(python_script_path)
    resources_folder = os.path.join(
        os.path.dirname(os.path.realpath(__file__)), 'resources')
    keystore_file = 'server.jks'
    truststore_file = 'trust.jks'
    keystore_path = os.path.join(resources_folder,
                                 '{}.base64'.format(keystore_file))
    truststore_path = os.path.join(resources_folder,
                                   '{}.base64'.format(truststore_file))
    keystore_secret = '__dcos_base64__keystore'
    truststore_secret = '__dcos_base64__truststore'
    my_secret = 'mysecret'
    my_secret_content = 'secretcontent'
    shakedown.run_dcos_command(
        'security secrets create /{} --value-file {}'.format(
            keystore_secret, keystore_path))
    shakedown.run_dcos_command(
        'security secrets create /{} --value-file {}'.format(
            truststore_secret, truststore_path))
    shakedown.run_dcos_command('security secrets create /{} --value {}'.format(
        my_secret, my_secret_content))
    password = '******'
    try:
        utils.run_tests(
            app_url=python_script_url,
            app_args="30 {} {}".format(my_secret, my_secret_content),
            expected_output="Pi is roughly 3",
            args=[
                "--keystore-secret-path",
                keystore_secret,
                "--truststore-secret-path",
                truststore_secret,
                "--private-key-password",
                format(password),
                "--keystore-password",
                format(password),
                "--truststore-password",
                format(password),
                "--conf",
                "spark.mesos.driver.secret.names={}".format(my_secret),
                "--conf",
                "spark.mesos.driver.secret.filenames={}".format(my_secret),
                "--conf",
                "spark.mesos.driver.secret.envkeys={}".format(my_secret),
            ])
    finally:
        shakedown.run_dcos_command(
            'security secrets delete /{}'.format(keystore_secret))
        shakedown.run_dcos_command(
            'security secrets delete /{}'.format(truststore_secret))
        shakedown.run_dcos_command(
            'security secrets delete /{}'.format(my_secret))
Exemplo n.º 7
0
def test_disconnect_from_master():
    python_script_path = os.path.join(THIS_DIR, 'jobs', 'python',
                                      'long_running.py')
    python_script_url = utils.upload_file(python_script_path)
    task_id = utils.submit_job(
        python_script_url,
        "{} {}".format(LONG_RUNNING_FW_NUM_TASKS, LONG_RUNNING_RUN_TIME_SEC), [
            "--conf", "spark.mesos.driver.failoverTimeout=1800", "--conf",
            "spark.cores.max=1"
        ])

    # Wait until executor is running
    utils.wait_for_executors_running(LONG_RUNNING_FW_NAME,
                                     LONG_RUNNING_FW_NUM_TASKS)

    # Block the driver's connection to Mesos master
    framework_info = shakedown.get_service(LONG_RUNNING_FW_NAME)
    (driver_host, port) = _parse_fw_pid_host_port(framework_info["pid"])
    _block_master_connection(driver_host, port)

    # The connection will timeout after 15 minutes of inactivity.
    # Add 5 minutes to make sure the master has detected the disconnection.
    # The framework will be considered disconnected => failover_timeout kicks in.
    LOGGER.info(
        "Waiting {} seconds for connection with master to timeout...".format(
            MASTER_CONNECTION_TIMEOUT_SEC))
    time.sleep(MASTER_CONNECTION_TIMEOUT_SEC + 5 * 60)

    # Restore the connection. The driver should reconnect.
    _unblock_master_connection(driver_host)

    # The executor and driver should finish.
    utils.check_job_output(task_id, "Job completed successfully")
Exemplo n.º 8
0
def test_fail_message_when_verbose(set_verbose_true, fname):
    queue = u.setup_aws_mocks()
    bucket = u.create_bucket()
    # Given that VERBOSE is set
    set_verbose_true()
    # WHEN a bad file is processed
    u.upload_file(bucket, fname)
    event = u.build_event(bucket.name, fname)
    response = lambda_handler(event, u.dummy_context)
    print("response:", response)
    # THEN there should be a message
    count, msg_json = u.get_one_message(queue)
    msg_dict = json.loads(msg_json)
    msg = msg_dict['Message']
    print("message:", msg)
    assert "fail" in response['results'][0]['status']
    assert count is 1 and msg.startswith('fail for')
Exemplo n.º 9
0
def test_no_exclude_message_when_not_production(set_production_false, fname):
    u.setup_aws_mocks()
    bucket = u.create_bucket()
    # Given that PRODUCTION is set to False
    set_production_false()
    # WHEN any file is processed
    u.upload_file(bucket, fname)
    event = u.build_event(bucket.name, fname)
    response = lambda_handler(event, u.dummy_context)
    # THEN there should be no mentions of skipping
    # count, msg = get_one_message(queue)

    # print things that will be useful to debug
    print("response:", response)

    # actual criteria to pass
    assert u.not_in("Excluded from validation",
                    response['results'][0]['results'])
Exemplo n.º 10
0
def test_jar(app_name=utils.SPARK_APP_NAME):
    master_url = ("https"
                  if utils.is_strict() else "http") + "://leader.mesos:5050"
    spark_job_runner_args = '{} dcos \\"*\\" spark:only 2 --auth-token={}'.format(
        master_url, shakedown.dcos_acs_token())
    jar_url = utils.upload_file(os.getenv('TEST_JAR_PATH'))
    utils.run_tests(
        app_url=jar_url,
        app_args=spark_job_runner_args,
        expected_output="All tests passed",
        app_name=app_name,
        args=[
            "--class",
            'com.typesafe.spark.test.mesos.framework.runners.SparkJobRunner'
        ])
Exemplo n.º 11
0
def test_always_log_output_issue_17(setter, fname, capsys):
    # mock the queue, but we won't examine it
    u.setup_aws_mocks()
    bucket = u.create_bucket()
    # Given that VERBOSE is in any state
    setter()
    # WHEN any file is processed
    bucket_name, key_name = u.upload_file(bucket, fname)
    event = u.build_event(bucket_name, key_name)
    results = lambda_handler(event, u.dummy_context)
    # THEN there should always be a message on stdout
    out, err = capsys.readouterr()
    # put useful information in failure output
    print("response: '{}'".format(results))
    assert out != ''
    assert err == ''
Exemplo n.º 12
0
def test_r():
    r_script_path = os.path.join(THIS_DIR, 'jobs', 'R', 'dataframe.R')
    r_script_url = utils.upload_file(r_script_path)
    utils.run_tests(app_url=r_script_url,
                    app_args='',
                    expected_output="Justin")