Esempio n. 1
0
    def __init__(
        self,
        *,
        job_id: Optional[str] = None,
        job_name: Optional[str] = None,
        json: Optional[Any] = None,
        notebook_params: Optional[Dict[str, str]] = None,
        python_params: Optional[List[str]] = None,
        jar_params: Optional[List[str]] = None,
        spark_submit_params: Optional[List[str]] = None,
        python_named_parameters: Optional[Dict[str, str]] = None,
        idempotency_token: Optional[str] = None,
        databricks_conn_id: str = 'databricks_default',
        polling_period_seconds: int = 30,
        databricks_retry_limit: int = 3,
        databricks_retry_delay: int = 1,
        databricks_retry_args: Optional[Dict[Any, Any]] = None,
        do_xcom_push: bool = True,
        wait_for_termination: bool = True,
        **kwargs,
    ) -> None:
        """Creates a new ``DatabricksRunNowOperator``."""
        super().__init__(**kwargs)
        self.json = json or {}
        self.databricks_conn_id = databricks_conn_id
        self.polling_period_seconds = polling_period_seconds
        self.databricks_retry_limit = databricks_retry_limit
        self.databricks_retry_delay = databricks_retry_delay
        self.databricks_retry_args = databricks_retry_args
        self.wait_for_termination = wait_for_termination

        if job_id is not None:
            self.json['job_id'] = job_id
        if job_name is not None:
            self.json['job_name'] = job_name
        if 'job_id' in self.json and 'job_name' in self.json:
            raise AirflowException(
                "Argument 'job_name' is not allowed with argument 'job_id'")
        if notebook_params is not None:
            self.json['notebook_params'] = notebook_params
        if python_params is not None:
            self.json['python_params'] = python_params
        if python_named_parameters is not None:
            self.json['python_named_parameters'] = python_named_parameters
        if jar_params is not None:
            self.json['jar_params'] = jar_params
        if spark_submit_params is not None:
            self.json['spark_submit_params'] = spark_submit_params
        if idempotency_token is not None:
            self.json['idempotency_token'] = idempotency_token

        self.json = deep_string_coerce(self.json)
        # This variable will be used in case our task gets killed.
        self.run_id: Optional[int] = None
        self.do_xcom_push = do_xcom_push
Esempio n. 2
0
    def __init__(
        self,
        *,
        json: Optional[Any] = None,
        tasks: Optional[List[object]] = None,
        spark_jar_task: Optional[Dict[str, str]] = None,
        notebook_task: Optional[Dict[str, str]] = None,
        spark_python_task: Optional[Dict[str, Union[str, List[str]]]] = None,
        spark_submit_task: Optional[Dict[str, List[str]]] = None,
        pipeline_task: Optional[Dict[str, str]] = None,
        new_cluster: Optional[Dict[str, object]] = None,
        existing_cluster_id: Optional[str] = None,
        libraries: Optional[List[Dict[str, str]]] = None,
        run_name: Optional[str] = None,
        timeout_seconds: Optional[int] = None,
        databricks_conn_id: str = 'databricks_default',
        polling_period_seconds: int = 30,
        databricks_retry_limit: int = 3,
        databricks_retry_delay: int = 1,
        databricks_retry_args: Optional[Dict[Any, Any]] = None,
        do_xcom_push: bool = True,
        idempotency_token: Optional[str] = None,
        access_control_list: Optional[List[Dict[str, str]]] = None,
        wait_for_termination: bool = True,
        git_source: Optional[Dict[str, str]] = None,
        **kwargs,
    ) -> None:
        """Creates a new ``DatabricksSubmitRunOperator``."""
        super().__init__(**kwargs)
        self.json = json or {}
        self.databricks_conn_id = databricks_conn_id
        self.polling_period_seconds = polling_period_seconds
        self.databricks_retry_limit = databricks_retry_limit
        self.databricks_retry_delay = databricks_retry_delay
        self.databricks_retry_args = databricks_retry_args
        self.wait_for_termination = wait_for_termination
        if tasks is not None:
            self.json['tasks'] = tasks
        if spark_jar_task is not None:
            self.json['spark_jar_task'] = spark_jar_task
        if notebook_task is not None:
            self.json['notebook_task'] = notebook_task
        if spark_python_task is not None:
            self.json['spark_python_task'] = spark_python_task
        if spark_submit_task is not None:
            self.json['spark_submit_task'] = spark_submit_task
        if pipeline_task is not None:
            self.json['pipeline_task'] = pipeline_task
        if new_cluster is not None:
            self.json['new_cluster'] = new_cluster
        if existing_cluster_id is not None:
            self.json['existing_cluster_id'] = existing_cluster_id
        if libraries is not None:
            self.json['libraries'] = libraries
        if run_name is not None:
            self.json['run_name'] = run_name
        if timeout_seconds is not None:
            self.json['timeout_seconds'] = timeout_seconds
        if 'run_name' not in self.json:
            self.json['run_name'] = run_name or kwargs['task_id']
        if idempotency_token is not None:
            self.json['idempotency_token'] = idempotency_token
        if access_control_list is not None:
            self.json['access_control_list'] = access_control_list
        if git_source is not None:
            self.json['git_source'] = git_source

        self.json = deep_string_coerce(self.json)
        # This variable will be used in case our task gets killed.
        self.run_id: Optional[int] = None
        self.do_xcom_push = do_xcom_push