def test_yarn_process_on_kill(self, mock_popen): # Given mock_popen.return_value.stdout = six.StringIO('stdout') mock_popen.return_value.stderr = six.StringIO('stderr') mock_popen.return_value.poll.return_value = None mock_popen.return_value.wait.return_value = 0 log_lines = [ 'SPARK_MAJOR_VERSION is set to 2, using Spark2', 'WARN NativeCodeLoader: Unable to load native-hadoop library for your ' + 'platform... using builtin-java classes where applicable', 'WARN DomainSocketFactory: The short-circuit local reads feature cannot ' + 'be used because libhadoop cannot be loaded.', 'INFO Client: Requesting a new application from cluster with 10 ' + 'NodeManagerapplication_1486558679801_1820s', 'INFO Client: Submitting application application_1486558679801_1820 ' + 'to ResourceManager' ] hook = SparkSubmitHook(conn_id='spark_yarn_cluster') hook._process_spark_submit_log(log_lines) hook.submit() # When hook.on_kill() # Then self.assertIn(call(['yarn', 'application', '-kill', 'application_1486558679801_1820'], stderr=-1, stdout=-1), mock_popen.mock_calls)
def test_submit(self, mock_process): # We don't have spark-submit available, and this is hard to mock, so let's # just use this simple mock. mock_Popen = mock_process.Popen.return_value mock_Popen.stdout = StringIO(u'stdout') mock_Popen.stderr = StringIO(u'stderr') mock_Popen.returncode = None mock_Popen.communicate.return_value = ['extra stdout', 'extra stderr'] hook = SparkSubmitHook() hook.submit(self._spark_job_file)
def test_spark_process_runcmd(self, mock_popen): # Given mock_popen.return_value.stdout = StringIO(u'stdout') mock_popen.return_value.stderr = StringIO(u'stderr') mock_popen.return_value.wait.return_value = 0 # When hook = SparkSubmitHook(conn_id='') hook.submit() # Then self.assertEqual(mock_popen.mock_calls[0], call(['spark-submit', '--master', 'yarn', '--name', 'default-name', ''], stdout=-1, stderr=-2))
def test_k8s_process_on_kill(self, mock_popen, mock_client_method): # Given mock_popen.return_value.stdout = six.StringIO('stdout') mock_popen.return_value.stderr = six.StringIO('stderr') mock_popen.return_value.poll.return_value = None mock_popen.return_value.wait.return_value = 0 client = mock_client_method.return_value hook = SparkSubmitHook(conn_id='spark_k8s_cluster') log_lines = [ 'INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:' + 'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' + 'namespace: default' + 'labels: spark-app-selector -> spark-465b868ada474bda82ccb84ab2747fcd,' + 'spark-role -> driver' + 'pod uid: ba9c61f6-205f-11e8-b65f-d48564c88e42' + 'creation time: 2018-03-05T10:26:55Z' + 'service account name: spark' + 'volumes: spark-init-properties, download-jars-volume,' + 'download-files-volume, spark-token-2vmlm' + 'node name: N/A' + 'start time: N/A' + 'container images: N/A' + 'phase: Pending' + 'status: []' + '2018-03-05 11:26:56 INFO LoggingPodStatusWatcherImpl:54 - State changed,' + ' new state:' + 'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' + 'namespace: default' + 'Exit code: 0' ] hook._process_spark_submit_log(log_lines) hook.submit() # When hook.on_kill() # Then import kubernetes kwargs = {'pretty': True, 'body': kubernetes.client.V1DeleteOptions()} client.delete_namespaced_pod.assert_called_once_with( 'spark-pi-edf2ace37be7353a958b38733a12f8e6-driver', 'mynamespace', **kwargs)
def test_spark_process_runcmd(self, mock_popen): # Given mock_popen.return_value.stdout = six.StringIO('stdout') mock_popen.return_value.stderr = six.StringIO('stderr') mock_popen.return_value.wait.return_value = 0 # When hook = SparkSubmitHook(conn_id='') hook.submit() # Then self.assertEqual( mock_popen.mock_calls[0], call([ 'spark-submit', '--master', 'yarn', '--name', 'default-name', '' ], stderr=-2, stdout=-1, universal_newlines=True, bufsize=-1))
def test_spark_process_on_kill(self, mock_popen): # Given mock_popen.return_value.stdout = six.StringIO('stdout') mock_popen.return_value.stderr = six.StringIO('stderr') mock_popen.return_value.poll.return_value = None mock_popen.return_value.wait.return_value = 0 log_lines = [ 'SPARK_MAJOR_VERSION is set to 2, using Spark2', 'WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable', 'WARN DomainSocketFactory: The short-circuit local reads feature cannot be used because libhadoop cannot be loaded.', 'INFO Client: Requesting a new application from cluster with 10 NodeManagerapplication_1486558679801_1820s', 'INFO Client: Submitting application application_1486558679801_1820 to ResourceManager' ] hook = SparkSubmitHook(conn_id='spark_yarn_cluster') hook._process_log(log_lines) hook.submit() # When hook.on_kill() # Then self.assertIn(call(['yarn', 'application', '-kill', 'application_1486558679801_1820'], stderr=-1, stdout=-1), mock_popen.mock_calls)
def test_SparkProcess_runcmd(self, mock_popen): # Given mock_popen.return_value.stdout = StringIO(u'stdout') mock_popen.return_value.stderr = StringIO(u'stderr') mock_popen.return_value.returncode = 0 mock_popen.return_value.communicate.return_value = [ StringIO(u'stdout\nstdout'), StringIO(u'stderr\nstderr') ] # When hook = SparkSubmitHook(conn_id='') hook.submit() # Then self.assertEqual( mock_popen.mock_calls[0], call([ 'spark-submit', '--master', 'yarn', '--name', 'default-name', '' ], stderr=-1, stdout=-1))
class SparkSubmitOperator(BaseOperator): """ This hook is a wrapper around the spark-submit binary to kick off a spark-submit job. It requires that the "spark-submit" binary is in the PATH or the spark-home is set in the extra on the connection. :param application: The application that submitted as a job, either jar or py file. :type application: str :param conf: Arbitrary Spark configuration properties :type conf: dict :param conn_id: The connection id as configured in Airflow administration. When an invalid connection_id is supplied, it will default to yarn. :type conn_id: str :param files: Upload additional files to the executor running the job, separated by a comma. Files will be placed in the working directory of each executor. For example, serialized objects. :type files: str :param py_files: Additional python files used by the job, can be .zip, .egg or .py. :type py_files: str :param jars: Submit additional jars to upload and place them in executor classpath. :param driver_classpath: Additional, driver-specific, classpath settings. :type driver_classpath: str :type jars: str :param java_class: the main class of the Java application :type java_class: str :param packages: Comma-separated list of maven coordinates of jars to include on the driver and executor classpaths :type packages: str :param exclude_packages: Comma-separated list of maven coordinates of jars to exclude while resolving the dependencies provided in 'packages' :type exclude_packages: str :param repositories: Comma-separated list of additional remote repositories to search for the maven coordinates given with 'packages' :type repositories: str :param total_executor_cores: (Standalone & Mesos only) Total cores for all executors (Default: all the available cores on the worker) :type total_executor_cores: int :param executor_cores: (Standalone & YARN only) Number of cores per executor (Default: 2) :type executor_cores: int :param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G) :type executor_memory: str :param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G) (Default: 1G) :type driver_memory: str :param keytab: Full path to the file that contains the keytab :type keytab: str :param principal: The name of the kerberos principal used for keytab :type principal: str :param name: Name of the job (default airflow-spark) :type name: str :param num_executors: Number of executors to launch :type num_executors: int :param application_args: Arguments for the application being submitted :type application_args: list :param verbose: Whether to pass the verbose flag to spark-submit process for debugging :type verbose: bool """ template_fields = ('_name', '_application_args', '_packages') ui_color = WEB_COLORS['LIGHTORANGE'] @apply_defaults def __init__(self, application='', conf=None, conn_id='spark_default', files=None, py_files=None, driver_classpath=None, jars=None, java_class=None, packages=None, exclude_packages=None, repositories=None, total_executor_cores=None, executor_cores=None, executor_memory=None, driver_memory=None, keytab=None, principal=None, name='airflow-spark', num_executors=None, application_args=None, verbose=False, *args, **kwargs): super(SparkSubmitOperator, self).__init__(*args, **kwargs) self._application = application self._conf = conf self._files = files self._py_files = py_files self._driver_classpath = driver_classpath self._jars = jars self._java_class = java_class self._packages = packages self._exclude_packages = exclude_packages self._repositories = repositories self._total_executor_cores = total_executor_cores self._executor_cores = executor_cores self._executor_memory = executor_memory self._driver_memory = driver_memory self._keytab = keytab self._principal = principal self._name = name self._num_executors = num_executors self._application_args = application_args self._verbose = verbose self._hook = None self._conn_id = conn_id def execute(self, context): """ Call the SparkSubmitHook to run the provided spark job """ self._hook = SparkSubmitHook( conf=self._conf, conn_id=self._conn_id, files=self._files, py_files=self._py_files, driver_classpath=self._driver_classpath, jars=self._jars, java_class=self._java_class, packages=self._packages, exclude_packages=self._exclude_packages, repositories=self._repositories, total_executor_cores=self._total_executor_cores, executor_cores=self._executor_cores, executor_memory=self._executor_memory, driver_memory=self._driver_memory, keytab=self._keytab, principal=self._principal, name=self._name, num_executors=self._num_executors, application_args=self._application_args, verbose=self._verbose) self._hook.submit(self._application) def on_kill(self): self._hook.on_kill()
class SparkSubmitOperator(BaseOperator): """ This hook is a wrapper around the spark-submit binary to kick off a spark-submit job. It requires that the "spark-submit" binary is in the PATH or the spark-home is set in the extra on the connection. :param application: The application that submitted as a job, either jar or py file. :type application: str :param conf: Arbitrary Spark configuration properties :type conf: dict :param conn_id: The connection id as configured in Airflow administration. When an invalid connection_id is supplied, it will default to yarn. :type conn_id: str :param files: Upload additional files to the container running the job, separated by a comma. For example hive-site.xml. :type files: str :param py_files: Additional python files used by the job, can be .zip, .egg or .py. :type py_files: str :param jars: Submit additional jars to upload and place them in executor classpath. :param driver_classpath: Additional, driver-specific, classpath settings. :type driver_classpath: str :type jars: str :param java_class: the main class of the Java application :type java_class: str :param packages: Comma-separated list of maven coordinates of jars to include on the driver and executor classpaths :type packages: str :param exclude_packages: Comma-separated list of maven coordinates of jars to exclude while resolving the dependencies provided in 'packages' :type exclude_packages: str :param repositories: Comma-separated list of additional remote repositories to search for the maven coordinates given with 'packages' :type repositories: str :param total_executor_cores: (Standalone & Mesos only) Total cores for all executors (Default: all the available cores on the worker) :type total_executor_cores: int :param executor_cores: (Standalone & YARN only) Number of cores per executor (Default: 2) :type executor_cores: int :param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G) :type executor_memory: str :param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G) (Default: 1G) :type driver_memory: str :param keytab: Full path to the file that contains the keytab :type keytab: str :param principal: The name of the kerberos principal used for keytab :type principal: str :param name: Name of the job (default airflow-spark) :type name: str :param num_executors: Number of executors to launch :type num_executors: int :param application_args: Arguments for the application being submitted :type application_args: list :param verbose: Whether to pass the verbose flag to spark-submit process for debugging :type verbose: bool """ template_fields = ('_name', '_application_args','_packages') ui_color = WEB_COLORS['LIGHTORANGE'] @apply_defaults def __init__(self, application='', conf=None, conn_id='spark_default', files=None, py_files=None, driver_classpath=None, jars=None, java_class=None, packages=None, exclude_packages=None, repositories=None, total_executor_cores=None, executor_cores=None, executor_memory=None, driver_memory=None, keytab=None, principal=None, name='airflow-spark', num_executors=None, application_args=None, verbose=False, *args, **kwargs): super(SparkSubmitOperator, self).__init__(*args, **kwargs) self._application = application self._conf = conf self._files = files self._py_files = py_files self._driver_classpath = driver_classpath self._jars = jars self._java_class = java_class self._packages = packages self._exclude_packages = exclude_packages self._repositories = repositories self._total_executor_cores = total_executor_cores self._executor_cores = executor_cores self._executor_memory = executor_memory self._driver_memory = driver_memory self._keytab = keytab self._principal = principal self._name = name self._num_executors = num_executors self._application_args = application_args self._verbose = verbose self._hook = None self._conn_id = conn_id def execute(self, context): """ Call the SparkSubmitHook to run the provided spark job """ self._hook = SparkSubmitHook( conf=self._conf, conn_id=self._conn_id, files=self._files, py_files=self._py_files, driver_classpath=self._driver_classpath, jars=self._jars, java_class=self._java_class, packages=self._packages, exclude_packages=self._exclude_packages, repositories=self._repositories, total_executor_cores=self._total_executor_cores, executor_cores=self._executor_cores, executor_memory=self._executor_memory, driver_memory=self._driver_memory, keytab=self._keytab, principal=self._principal, name=self._name, num_executors=self._num_executors, application_args=self._application_args, verbose=self._verbose ) self._hook.submit(self._application) def on_kill(self): self._hook.on_kill()
def _run_spark_submit(self, application, jars): # task_env = get_cloud_config(Clouds.local) spark_local_config = SparkLocalEngineConfig() _config = self.config deploy = self.deploy AIRFLOW_ON = is_airflow_enabled() if AIRFLOW_ON: from airflow.contrib.hooks.spark_submit_hook import SparkSubmitHook from airflow.exceptions import AirflowException as SparkException else: from dbnd_spark._vendor.airflow.spark_hook import ( SparkException, SparkSubmitHook, ) spark = SparkSubmitHook( conf=_config.conf, conn_id=spark_local_config.conn_id, name=self.job.job_id, application_args=list_of_strings(self.task.application_args()), java_class=self.task.main_class, files=deploy.arg_files(_config.files), py_files=deploy.arg_files(self.task.get_py_files()), driver_class_path=_config.driver_class_path, jars=deploy.arg_files(jars), packages=_config.packages, exclude_packages=_config.exclude_packages, repositories=_config.repositories, total_executor_cores=_config.total_executor_cores, executor_cores=_config.executor_cores, executor_memory=_config.executor_memory, driver_memory=_config.driver_memory, keytab=_config.keytab, principal=_config.principal, num_executors=_config.num_executors, env_vars=self._get_env_vars(), verbose=_config.verbose, ) if not AIRFLOW_ON: # If there's no Airflow then there's no Connection so we # take conn information from spark config spark.set_connection(spark_local_config.conn_uri) log_buffer = StringIO() with log_buffer as lb: dbnd_log_handler = self._capture_submit_log(spark, lb) try: # sync the application file to remote if needed spark.submit(application=deploy.sync(application)) except SparkException as ex: return_code = self._get_spark_return_code_from_exception(ex) if return_code != "0": error_snippets = parse_spark_log_safe( log_buffer.getvalue().split(os.linesep)) raise failed_to_run_spark_script( self, spark._build_spark_submit_command( application=application), application, return_code, error_snippets, ) else: raise failed_spark_status(ex) finally: spark.log.handlers = [ h for h in spark.log.handlers if not dbnd_log_handler ]
class SparkSubmitOperator(BaseOperator): """ This hook is a wrapper around the spark-submit binary to kick off a spark-submit job. It requires that the "spark-submit" binary is in the PATH. :param application: The application that submitted as a job, either jar or py file. :type application: str :param conf: Arbitrary Spark configuration properties :type conf: dict :param conn_id: The connection id as configured in Airflow administration. When an invalid connection_id is supplied, it will default to yarn. :type conn_id: str :param files: Upload additional files to the container running the job, separated by a comma. For example hive-site.xml. :type files: str :param py_files: Additional python files used by the job, can be .zip, .egg or .py. :type py_files: str :param jars: Submit additional jars to upload and place them in executor classpath. :type jars: str :param executor_cores: Number of cores per executor (Default: 2) :type executor_cores: int :param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G) :type executor_memory: str :param keytab: Full path to the file that contains the keytab :type keytab: str :param principal: The name of the kerberos principal used for keytab :type principal: str :param name: Name of the job (default airflow-spark) :type name: str :param num_executors: Number of executors to launch :type num_executors: int :param verbose: Whether to pass the verbose flag to spark-submit process for debugging :type verbose: bool """ @apply_defaults def __init__(self, application='', conf=None, conn_id='spark_default', files=None, py_files=None, jars=None, executor_cores=None, executor_memory=None, keytab=None, principal=None, name='airflow-spark', num_executors=None, verbose=False, *args, **kwargs): super(SparkSubmitOperator, self).__init__(*args, **kwargs) self._application = application self._conf = conf self._files = files self._py_files = py_files self._jars = jars self._executor_cores = executor_cores self._executor_memory = executor_memory self._keytab = keytab self._principal = principal self._name = name self._num_executors = num_executors self._verbose = verbose self._hook = None self._conn_id = conn_id def execute(self, context): """ Call the SparkSubmitHook to run the provided spark job """ self._hook = SparkSubmitHook( conf=self._conf, conn_id=self._conn_id, files=self._files, py_files=self._py_files, jars=self._jars, executor_cores=self._executor_cores, executor_memory=self._executor_memory, keytab=self._keytab, principal=self._principal, name=self._name, num_executors=self._num_executors, verbose=self._verbose ) self._hook.submit(self._application) def on_kill(self): self._hook.on_kill()