Beispiel #1
0
                  ) and should_restart_cluster and not config.IS_MESOS_MODE:
    Cluster(spark_home=config.SPARK_HOME_DIR).stop()

spark_build_manager = SparkBuildManager("%s/spark-build-cache" % PROJ_DIR,
                                        config.SPARK_GIT_REPO)

if config.IS_MESOS_MODE:
    cluster = MesosCluster(spark_home=config.SPARK_HOME_DIR,
                           spark_conf_dir=config.SPARK_CONF_DIR,
                           mesos_master=config.SPARK_CLUSTER_URL)
elif config.USE_CLUSTER_SPARK:
    cluster = Cluster(spark_home=config.SPARK_HOME_DIR,
                      spark_conf_dir=config.SPARK_CONF_DIR)
else:
    cluster = spark_build_manager.get_cluster(
        config.SPARK_COMMIT_ID, config.SPARK_CONF_DIR,
        config.SPARK_MERGE_COMMIT_INTO_MASTER)

# rsync Spark to all nodes in case there is a change in Worker config
if should_restart_cluster and should_rsync_spark_home:
    cluster.sync_spark()

# If a cluster is already running from an earlier test, try shutting it down.
if os.path.exists(cluster.spark_home) and should_restart_cluster:
    cluster.stop()

if should_restart_cluster:
    # Ensure all shutdowns have completed (no executors are running).
    cluster.ensure_spark_stopped_on_slaves()

# Build the tests for each project.
Beispiel #2
0
spark_build_manager = SparkBuildManager("%s/spark-build-cache" % PROJ_DIR,
                                        config.SPARK_GIT_REPO)

if config.IS_MESOS_MODE:
    cluster = MesosCluster(spark_home=config.SPARK_HOME_DIR,
                           spark_conf_dir=config.SPARK_CONF_DIR,
                           mesos_master=config.SPARK_CLUSTER_URL)
elif config.USE_CLUSTER_SPARK:
    cluster = Cluster(spark_home=config.SPARK_HOME_DIR,
                      spark_conf_dir=config.SPARK_CONF_DIR)
else:
    cluster = spark_build_manager.get_cluster(
        commit_id=config.SPARK_COMMIT_ID,
        conf_dir=config.SPARK_CONF_DIR,
        merge_commit_into_master=config.SPARK_MERGE_COMMIT_INTO_MASTER,
        is_yarn_mode=config.IS_YARN_MODE,
        additional_make_distribution_args=args.
        additional_make_distribution_args)

# rsync Spark to all nodes in case there is a change in Worker config
if should_restart_cluster and should_rsync_spark_home:
    cluster.sync_spark()

# If a cluster is already running from an earlier test, try shutting it down.
if os.path.exists(cluster.spark_home) and should_restart_cluster:
    cluster.stop()

if should_restart_cluster:
    # Ensure all shutdowns have completed (no executors are running).
    cluster.ensure_spark_stopped_on_slaves()
Beispiel #3
0
    assert config.SPARK_COMMIT_ID is not "", \
        ("Please specify SPARK_COMMIT_ID in %s" % args.config_file)

# If a cluster is already running from the Spark EC2 scripts, try shutting it down.
if os.path.exists(config.SPARK_HOME_DIR) and should_restart_cluster and not config.IS_MESOS_MODE:
    Cluster(spark_home=config.SPARK_HOME_DIR).stop()

spark_build_manager = SparkBuildManager("%s/spark-build-cache" % PROJ_DIR, config.SPARK_GIT_REPO)

if config.IS_MESOS_MODE:
    cluster = MesosCluster(spark_home=config.SPARK_HOME_DIR, spark_conf_dir=config.SPARK_CONF_DIR,
                           mesos_master=config.SPARK_CLUSTER_URL)
elif config.USE_CLUSTER_SPARK:
    cluster = Cluster(spark_home=config.SPARK_HOME_DIR, spark_conf_dir=config.SPARK_CONF_DIR)
else:
    cluster = spark_build_manager.get_cluster(config.SPARK_COMMIT_ID, config.SPARK_CONF_DIR,
                                              config.SPARK_MERGE_COMMIT_INTO_MASTER, config.IS_YARN_MODE)

# rsync Spark to all nodes in case there is a change in Worker config
if should_restart_cluster and should_rsync_spark_home:
    cluster.sync_spark()

# If a cluster is already running from an earlier test, try shutting it down.
if os.path.exists(cluster.spark_home) and should_restart_cluster:
    cluster.stop()

if should_restart_cluster:
    # Ensure all shutdowns have completed (no executors are running).
    cluster.ensure_spark_stopped_on_slaves()

# Build the tests for each project.
spark_work_dir = "%s/work" % cluster.spark_home
Beispiel #4
0
# If a cluster is already running from the Spark EC2 scripts, try shutting it down.
if os.path.exists(config.SPARK_HOME_DIR) and should_restart_cluster and not config.IS_MESOS_MODE:
    Cluster(spark_home=config.SPARK_HOME_DIR).stop()

spark_build_manager = SparkBuildManager("%s/spark-build-cache" % PROJ_DIR, config.SPARK_GIT_REPO)

if config.IS_MESOS_MODE:
    cluster = MesosCluster(spark_home=config.SPARK_HOME_DIR, spark_conf_dir=config.SPARK_CONF_DIR,
                           mesos_master=config.SPARK_CLUSTER_URL)
elif config.USE_CLUSTER_SPARK:
    cluster = Cluster(spark_home=config.SPARK_HOME_DIR, spark_conf_dir=config.SPARK_CONF_DIR)
else:
    cluster = spark_build_manager.get_cluster(
        commit_id=config.SPARK_COMMIT_ID,
        conf_dir=config.SPARK_CONF_DIR,
        merge_commit_into_master=config.SPARK_MERGE_COMMIT_INTO_MASTER,
        is_yarn_mode=config.IS_YARN_MODE,
        additional_make_distribution_args=args.additional_make_distribution_args)

# rsync Spark to all nodes in case there is a change in Worker config
if should_restart_cluster and should_rsync_spark_home:
    cluster.sync_spark()

# If a cluster is already running from an earlier test, try shutting it down.
if os.path.exists(cluster.spark_home) and should_restart_cluster:
    cluster.stop()

if should_restart_cluster:
    # Ensure all shutdowns have completed (no executors are running).
    cluster.ensure_spark_stopped_on_slaves()