Пример #1
0
def test_source_hash_with_root_input_manager():
    @root_input_manager
    def my_input_manager():
        return 5

    @op(ins={"x": In(root_manager_key="manager")})
    def the_op(x):
        return x + 1

    @job(version_strategy=SourceHashVersionStrategy(),
         resource_defs={"manager": my_input_manager})
    def call_the_op():
        the_op()

    with instance_for_test() as instance:
        result = call_the_op.execute_in_process(instance=instance)
        assert result.success

        memoized_plan = create_execution_plan(call_the_op,
                                              instance_ref=instance.get_ref())
        assert len(memoized_plan.step_keys_to_execute) == 0
Пример #2
0
def define_inty_job():
    @op(out=Out(Int))
    def return_one():
        return 1

    @op(
        ins={"num": In(Int)},
        out=Out(Int),
    )
    def add_one(num):
        return num + 1

    @job(
        resource_defs={
            "io_manager": s3_pickle_io_manager,
            "s3": s3_test_resource,
        }
    )
    def basic_external_plan_execution():
        add_one(return_one())

    return basic_external_plan_execution
Пример #3
0
def test_custom_dagster_dataframe_loading_ok():
    input_dataframe = DataFrame({"foo": [1, 2, 3]})
    with safe_tempfile_path() as input_csv_fp, safe_tempfile_path() as output_csv_fp:
        input_dataframe.to_csv(input_csv_fp)
        TestDataFrame = create_dagster_pandas_dataframe_type(
            name="TestDataFrame",
            columns=[
                PandasColumn.exists("foo"),
            ],
        )

        @op(
            ins={"test_df": In(TestDataFrame)},
            out=Out(TestDataFrame),
        )
        def use_test_dataframe(_, test_df):
            test_df["bar"] = [2, 4, 6]
            return test_df

        @graph
        def basic_graph():
            use_test_dataframe()

        result = basic_graph.execute_in_process(
            run_config={
                "ops": {
                    "use_test_dataframe": {
                        "inputs": {"test_df": {"csv": {"path": input_csv_fp}}},
                        "outputs": [
                            {"result": {"csv": {"path": output_csv_fp}}},
                        ],
                    }
                }
            }
        )
        assert result.success
        output_df = read_csv(output_csv_fp)
        assert all(output_df["bar"] == [2, 4, 6])
Пример #4
0
def test_dataframe_csv_missing_inputs():
    called = {}

    @op(ins={"df": In(DataFrame)})
    def df_as_input(_context, df):  # pylint: disable=W0613
        called["yup"] = True

    @graph
    def missing_inputs():
        return df_as_input()

    with pytest.raises(DagsterInvalidConfigError) as exc_info:
        missing_inputs.execute_in_process()

    assert len(exc_info.value.errors) == 1

    expected_suggested_config = {"df_as_input": {"inputs": {"df": "<selector>"}}}
    assert exc_info.value.errors[0].message.startswith(
        'Missing required config entry "ops" at the root.'
    )
    assert str(expected_suggested_config) in exc_info.value.errors[0].message

    assert "yup" not in called
Пример #5
0
def test_root_input_manager_add_input_metadata():
    @root_input_manager
    def my_root_input_manager(context):
        context.add_input_metadata(metadata={"foo": "bar"})
        context.add_input_metadata(metadata={"baz": "qux"})
        return []

    @op(ins={"input1": In(root_manager_key="my_root_input_manager")})
    def my_op(_, input1):
        return input1

    @job(resource_defs={"my_root_input_manager": my_root_input_manager})
    def my_job():
        my_op()

    result = my_job.execute_in_process()
    loaded_input_event = [
        event for event in result.all_events if event.event_type_value == "LOADED_INPUT"
    ][0]
    metadata_entries = loaded_input_event.event_specific_data.metadata_entries
    assert len(metadata_entries) == 2
    assert metadata_entries[0].label == "foo"
    assert metadata_entries[1].label == "baz"
Пример #6
0
def test_dataframe_csv_from_inputs():
    called = {}

    @op(ins={"df": In(DataFrame)})
    def df_as_config(_context, df):
        assert df.to_dict("list") == {"num1": [1, 3], "num2": [2, 4]}
        called["yup"] = True

    @graph
    def test_graph():
        return df_as_config()

    result = test_graph.execute_in_process(
        run_config={
            "ops": {
                "df_as_config": {
                    "inputs": {"df": {"csv": {"path": file_relative_path(__file__, "num.csv")}}}
                }
            }
        },
    )

    assert result.success
    assert called["yup"]
Пример #7
0
def test_dataframe_pickle_from_inputs():
    # python2.7 doesn't like DataFrame pickles created from python3.x and vice versa. So we create them on-the-go
    pickle_path = file_relative_path(__file__, "num.pickle")
    df = pd.DataFrame({"num1": [1, 3], "num2": [2, 4]})
    df.to_pickle(pickle_path)

    called = {}

    @op(ins={"df": In(DataFrame)})
    def df_as_config(_context, df):
        assert df.to_dict("list") == {"num1": [1, 3], "num2": [2, 4]}
        called["yup"] = True

    @graph
    def test_graph():
        df_as_config()

    result = test_graph.execute_in_process(
        run_config={
            "ops": {
                "df_as_config": {
                    "inputs": {
                        "df": {
                            "pickle": {
                                "path": pickle_path
                            }
                        }
                    }
                }
            }
        })

    assert result.success
    assert called["yup"]

    os.remove(pickle_path)
Пример #8
0
def test_dataframe_csv_missing_input_collision():
    called = {}

    @op(out=Out(DataFrame))
    def df_as_output(_context):
        return pd.DataFrame()

    @op(ins={"df": In(DataFrame)})
    def df_as_input(_context, df):  # pylint: disable=W0613
        called["yup"] = True

    @graph
    def overlapping():
        return df_as_input(df_as_output())

    with pytest.raises(DagsterInvalidConfigError) as exc_info:
        overlapping.execute_in_process(run_config={
            "ops": {
                "df_as_input": {
                    "inputs": {
                        "df": {
                            "csv": {
                                "path":
                                file_relative_path(__file__, "num.csv")
                            }
                        }
                    }
                }
            }
        }, )

    assert (
        'Error 1: Received unexpected config entry "inputs" at path root:ops:df_as_input.'
        in str(exc_info.value))

    assert "yup" not in called
Пример #9
0
def make_op(
    name,
    asset_key=None,
    error_rate=None,
    data_size_fn=None,
    sleep_factor=None,
    has_input=False,
):
    @op(
        name=name,
        config_schema={"partition": str},
        ins={"the_input": In(dagster_type=Nothing)} if has_input else {},
        out=Out(dagster_type=Nothing),
    )
    def made_op(context):
        partition_date = datetime.strptime(context.op_config["partition"], DEFAULT_DATE_FORMAT)
        if data_size_fn:
            data_size = data_size_fn(partition_date)
            sleep_time = sleep_factor * data_size

            time.sleep(sleep_time)

        rand = random()
        if error_rate and rand < error_rate:
            raise IntentionalRandomFailure(f"random {rand} < error rate {error_rate}")

        if asset_key:
            metadata = {"Data size (bytes)": data_size} if data_size_fn else None

            yield AssetMaterialization(
                asset_key=asset_key,
                metadata=metadata,
                partition=context.op_config.get("partition"),
            )

    return made_op
Пример #10
0
from dagster import In, Out, graph, op
from dagster.core.definitions.output import GraphOut


@op
def do_something():
    pass


@op
def one():
    return 1


@op(ins={"arg1": In(int)}, out=Out(int))
def do_something_else(arg1):
    return arg1


@graph(out=GraphOut())
def do_two_things(arg1):
    do_something()
    return do_something_else(arg1)


@op
def do_yet_more(arg1):
    assert arg1 == 1


@graph
Пример #11
0

@op
def my_input_op(abc, xyz):
    pass


# end_input_op_marker

# start_typed_input_op_marker

MyDagsterType = DagsterType(type_check_fn=lambda _, value: value % 2 == 0,
                            name="MyDagsterType")


@op(ins={"abc": In(dagster_type=MyDagsterType)})
def my_typed_input_op(abc):
    pass


# end_typed_input_op_marker

# start_output_op_marker


@op
def my_output_op():
    return 5


# end_output_op_marker
Пример #12
0
from dagster import Array, Bool, Field, In, Noneable, Nothing, Out, Output, op
from dagster_fivetran.resources import DEFAULT_POLL_INTERVAL
from dagster_fivetran.types import FivetranOutput
from dagster_fivetran.utils import generate_materializations


@op(
    required_resource_keys={"fivetran"},
    ins={"start_after": In(Nothing)},
    out=Out(
        FivetranOutput,
        description=
        "Parsed json dictionary representing the details of the Fivetran connector after "
        "the sync successfully completes. "
        "See the [Fivetran API Docs](https://fivetran.com/docs/rest-api/connectors#retrieveconnectordetails) "
        "to see detailed information on this response.",
    ),
    config_schema={
        "connector_id":
        Field(
            str,
            is_required=True,
            description=
            "The Fivetran Connector ID that this op will sync. You can retrieve this "
            'value from the "Setup" tab of a given connector in the Fivetran UI.',
        ),
        "poll_interval":
        Field(
            float,
            default_value=DEFAULT_POLL_INTERVAL,
            description=
Пример #13
0
# pylint: disable=unused-argument
from dagster import In, job, op, root_input_manager


def read_dataframe_from_table(**_kwargs):
    pass


@op(ins={"dataframe": In(root_manager_key="my_root_manager")})
def my_op(dataframe):
    """Do some stuff"""


# def_start_marker
@root_input_manager(input_config_schema={"table_name": str})
def table_loader(context):
    return read_dataframe_from_table(name=context.config["table_name"])


# def_end_marker


def execute_with_config():
    # execute_start_marker
    @job(resource_defs={"my_root_manager": table_loader})
    def my_job():
        my_op()

    my_job.execute_in_process(run_config={
        "ops": {
            "my_op": {
Пример #14
0
from typing import Tuple

from dagster import In, Out, op


@op(
    ins={"arg1": In(metadata={"a": "b"})},
    out={"out1": Out(metadata={"c": "d"}), "out2": Out(metadata={"e": "f"})},
)
def do_something(arg1: str) -> Tuple[int, int]:
    return int(arg1), int(arg1) + 1
Пример #15
0
# isort: skip_file

from dagster import DagsterType, In, Out, op

# start_basic_even_type
EvenDagsterType = DagsterType(
    name="EvenDagsterType",
    type_check_fn=lambda _, value: isinstance(value, int) and value % 2 is 0,
)
# end_basic_even_type


# start_basic_even_type_no_annotations
@op(
    ins={"num": In(EvenDagsterType)},
    out=Out(EvenDagsterType),
)
def double_even(num):
    return num


# end_basic_even_type_no_annotations


# start_basic_even_type_with_annotations
@op(
    ins={"num": In(EvenDagsterType)},
    out=Out(EvenDagsterType),
)
def double_even_with_annotations(num: int) -> int:
    return num
Пример #16
0
import dagster_pandas as dagster_pd
import dagstermill

from dagster import In, InputDefinition, Out, OutputDefinition, file_relative_path, op

from ...data_frame import DataFrame


@op(
    ins={"num": In(dagster_pd.DataFrame)},
    out=Out(dagster_pd.DataFrame),
)
def sum_op(num):
    sum_df = num.copy()
    sum_df["sum"] = sum_df["num1"] + sum_df["num2"]
    return sum_df


@op(
    ins={"sum_df": In(dagster_pd.DataFrame)},
    out=Out(dagster_pd.DataFrame),
)
def sum_sq_op(sum_df):
    sum_sq_df = sum_df.copy()
    sum_sq_df["sum_sq"] = sum_df["sum"]**2
    return sum_sq_df


@op(
    ins={"sum_sq_op": In(dagster_pd.DataFrame)},
    out=Out(dagster_pd.DataFrame),
Пример #17
0
from pandas import DataFrame, Series

from dagster import In, Out, Output, op
from dagster.utils.log import get_dagster_logger

logger = get_dagster_logger()


@op(
    ins={
        "stories":
        In(
            root_manager_key="warehouse_loader",
            metadata={
                "table": "hackernews.stories",
                "columns": ["id"]
            },
        ),
        "comments":
        In(
            root_manager_key="warehouse_loader",
            metadata={
                "table": "hackernews.comments",
                "columns": ["id", "user_id", "parent"],
            },
        ),
    },
    out=Out(io_manager_key="warehouse_io_manager",
            metadata={"table": "hackernews.comment_stories"}),
)
def build_comment_stories(stories: DataFrame, comments: DataFrame):
Пример #18
0

model_perf_notebook = define_dagstermill_solid(
    "recommender_model_perf",
    notebook_path=file_relative_path(__file__, "../notebooks/recommender_model_perf.ipynb"),
    input_defs=[InputDefinition(dagster_type=TruncatedSVD, name="recommender_model")],
    output_notebook_name="perf_notebook",
)


@op(
    ins={
        "story_titles": In(
            root_manager_key="warehouse_loader",
            metadata={
                "table": "hackernews.stories",
                "columns": ["id", "title"],
            },
        ),
    },
    out=Out(
        dagster_type=DataFrame,
        io_manager_key="warehouse_io_manager",
        metadata={"table": "hackernews.component_top_stories"},
    ),
)
def build_component_top_stories(
    model: TruncatedSVD, user_story_matrix: IndexedCooMatrix, story_titles: DataFrame
):
    """
    For each component in the collaborative filtering model, finds the titles of the top stories
Пример #19
0
    "raw_fans",
    "raw_event_admins",
    "raw_group_admins",
]


def create_raw_file_ops():
    return list(map(create_raw_file_op, raw_files))


def input_name_for_raw_file(raw_file):
    return raw_file + "_ready"


@op(
    ins={"start": In(Nothing)},
    out=Out(dagster_type=Nothing),
    description="Load a bunch of raw tables from corresponding files",
)
def many_table_materializations(_context):
    with open(file_relative_path(__file__, MARKDOWN_EXAMPLE), "r") as f:
        md_str = f.read()
        for table in raw_tables:
            yield AssetMaterialization(
                asset_key="table_info",
                metadata={
                    "table_name": table,
                    "table_path": EventMetadata.path(f"/path/to/{table}"),
                    "table_data": {"name": table},
                    "table_name_big": EventMetadata.url(f"https://bigty.pe/{table}"),
                    "table_blurb": EventMetadata.md(md_str),
Пример #20
0
def test_ins_dagster_types():
    assert In(dagster_type=None)
    assert In(dagster_type=int)
    assert In(dagster_type=List)
    assert In(dagster_type=List[int])  # typing type
    assert In(dagster_type=Int)  # dagster type
Пример #21
0
    def execute(self, query: str):
        pass


def get_database_connection():
    return MockDatabase()


# start_marker
from dagster import In, Nothing, job, op


@op
def create_table_1():
    get_database_connection().execute(
        "create table_1 as select * from some_source_table")


@op(ins={"start": In(Nothing)})
def create_table_2():
    get_database_connection().execute(
        "create table_2 as select * from table_1")


@job
def nothing_dependency():
    create_table_2(start=create_table_1())


# end_marker
Пример #22
0
def test_io_manager_add_input_metadata():
    class MyIOManager(IOManager):
        def handle_output(self, context, obj):
            pass

        def load_input(self, context):
            context.add_input_metadata(metadata={"foo": "bar"})
            context.add_input_metadata(metadata={"baz": "qux"})

            observations = context.get_observations()
            assert observations[0].asset_key == context.asset_key
            assert observations[0].metadata_entries[0].label == "foo"
            assert observations[1].metadata_entries[0].label == "baz"
            return 1

    @io_manager
    def my_io_manager(_):
        return MyIOManager()

    in_asset_key = AssetKey(["a", "b"])
    out_asset_key = AssetKey(["c", "d"])

    @op(out=Out(asset_key=out_asset_key))
    def before():
        pass

    @op(ins={"a": In(asset_key=in_asset_key)}, out={})
    def after(a):
        del a

    @job(resource_defs={"io_manager": my_io_manager})
    def my_job():
        after(before())

    get_observation = lambda event: event.event_specific_data.asset_observation

    result = my_job.execute_in_process()
    observations = [
        event for event in result.all_node_events if event.event_type_value == "ASSET_OBSERVATION"
    ]

    # first observation
    assert observations[0].step_key == "after"
    assert get_observation(observations[0]) == AssetObservation(
        asset_key=in_asset_key, metadata={"foo": "bar"}
    )
    # second observation
    assert observations[1].step_key == "after"
    assert get_observation(observations[1]) == AssetObservation(
        asset_key=in_asset_key, metadata={"baz": "qux"}
    )

    # confirm loaded_input event contains metadata
    loaded_input_event = [
        event for event in result.all_events if event.event_type_value == "LOADED_INPUT"
    ][0]
    assert loaded_input_event
    loaded_input_event_metadata = loaded_input_event.event_specific_data.metadata_entries
    assert len(loaded_input_event_metadata) == 2
    assert loaded_input_event_metadata[0].label == "foo"
    assert loaded_input_event_metadata[1].label == "baz"
Пример #23
0
from dagster import Float, In, Int, List, Out, graph, op


@op(out=Out(Int))
def emit_one():
    return 1


@op(ins={"numbers": In(List[Int])}, out=Out(Int))
def add(numbers):
    return sum(numbers)


@op(ins={"num": In(Float)}, out=Out(Float))
def div_two(num):
    return num / 2


@graph
def emit_two():
    return add([emit_one(), emit_one()])


@graph
def add_four(num):
    return add([emit_two(), emit_two(), num])


@graph
def div_four(num):
    return div_two(num=div_two(num))
Пример #24
0
def test_custom_dagster_dataframe_parametrizable_input():
    @dagster_type_loader(
        Selector({
            "door_a": Field(str),
            "door_b": Field(str),
            "door_c": Field(str),
        }))
    def silly_loader(_, config):
        which_door = list(config.keys())[0]
        if which_door == "door_a":
            return DataFrame({"foo": ["goat"]})
        elif which_door == "door_b":
            return DataFrame({"foo": ["car"]})
        elif which_door == "door_c":
            return DataFrame({"foo": ["goat"]})
        raise DagsterInvariantViolationError(
            "You did not pick a door. You chose: {which_door}".format(
                which_door=which_door))

    @dagster_type_materializer(
        Selector({
            "devnull": Field(str),
            "nothing": Field(str)
        }))
    def silly_materializer(_, _config, _value):
        return AssetMaterialization(asset_key="nothing",
                                    description="just one of those days")

    TestDataFrame = create_dagster_pandas_dataframe_type(
        name="TestDataFrame",
        columns=[
            PandasColumn.exists("foo"),
        ],
        loader=silly_loader,
        materializer=silly_materializer,
    )

    @op(
        ins={"df": In(TestDataFrame)},
        out=Out(TestDataFrame),
    )
    def did_i_win(_, df):
        return df

    @graph
    def basic_graph():
        did_i_win()

    result = basic_graph.execute_in_process(
        run_config={
            "ops": {
                "did_i_win": {
                    "inputs": {
                        "df": {
                            "door_a": "bar"
                        }
                    },
                    "outputs": [{
                        "result": {
                            "devnull": "baz"
                        }
                    }],
                }
            }
        })
    assert result.success
    output_df = result.output_for_node("did_i_win")
    assert isinstance(output_df, DataFrame)
    assert output_df["foo"].tolist() == ["goat"]
    materialization_events = [
        event for event in result.all_node_events
        if event.is_step_materialization
    ]
    assert len(materialization_events) == 1
    assert materialization_events[
        0].event_specific_data.materialization.label == "nothing"
Пример #25
0

# We've added a Dagster type for this op's output
@op(out=Out(TripsDataFrame), config_schema={"clean": Field(bool, False)})
def load_trips(context):
    df = pd.read_csv(
        "./ebike_trips.csv",
        parse_dates=["start_time", "end_time"],
    )
    if context.op_config["clean"]:
        df = df[pd.notna(df.end_time)]
    return df


# We've added a Dagster type for this op's input
@op(ins={"trips": In(TripsDataFrame)})
def generate_plot(context, trips):
    minute_lengths = [
        x.total_seconds() / 60 for x in trips.end_time - trips.start_time
    ]
    bin_edges = np.histogram_bin_edges(minute_lengths, 15)
    fig, ax = plt.subplots(figsize=(10, 5))
    ax.set(title="Trip lengths", xlabel="Minutes", ylabel="Count")
    ax.hist(minute_lengths, bins=bin_edges)
    fig.savefig("trip_lengths.png")
    context.log_event(
        AssetMaterialization(asset_key="trip_dist_plot",
                             description="Distribution of trip lengths."))


@job
Пример #26
0
            col("state"),
            lit(", Zip: "),
            col("zip"),
        ),
    )
    consolidated_df = consolidated_df.select(col("station"),
                                             col("full_address"))
    outpath = os.path.join(context.resources.savedir, "stationcons.csv")
    df_to_csv(consolidated_df, outpath)
    return outpath


@op(
    config_schema={"version_salt": Field(String)},
    ins={
        "maxtemp_path": In(str),
        "stationcons_path": In(str)
    },
    required_resource_keys={"savedir"},
)
def combine_dfs(context, maxtemp_path, stationcons_path):
    maxtemps = df_from_csv(maxtemp_path)
    stationcons = df_from_csv(stationcons_path)
    joined_temps = maxtemps.join(stationcons,
                                 col("airport_code") == col("station")).select(
                                     col("full_address"), col("temperature_f"))
    outpath = os.path.join(context.resources.savedir, "temp_for_place.csv")
    df_to_csv(joined_temps, outpath)
    return outpath

Пример #27
0
        },
        input_mappings=None,
        output_mappings=None,
        config=None,
    )

    result = single_op_graph.execute_in_process()

    return result.output_for_node(op_inst.name)


def get_num_csv_environment(ops_config):
    return {"ops": ops_config}


@op(ins={"num_csv": In(DataFrame)}, out=Out(DataFrame))
def sum_table(_, num_csv):
    check.inst_param(num_csv, "num_csv", pd.DataFrame)
    num_csv["sum"] = num_csv["num1"] + num_csv["num2"]
    return num_csv


@op(ins={"sum_df": In(DataFrame)}, out=Out(DataFrame))
def sum_sq_table(sum_df):
    sum_df["sum_squared"] = sum_df["sum"] * sum_df["sum"]
    return sum_df


@op(
    ins={"sum_table_renamed": In(DataFrame)},
    out=Out(DataFrame),
Пример #28
0
import random
import time

from dagster import Field, In, Out, Output, graph, op


@op(
    ins={"chase_duration": In(int)},
    out=Out(int),
    config_schema={
        "chase_size":
        Field(
            int,
            default_value=100000,
            is_required=False,
            description="How big should the pointer chase array be?",
        )
    },
)
def hammer_op(context, chase_duration):
    """what better way to do a lot of gnarly work than to pointer chase?"""
    ptr_length = context.op_config["chase_size"]

    data = list(range(0, ptr_length))
    random.shuffle(data)

    curr = random.randint(0, ptr_length - 1)
    # and away we go
    start_time = time.time()
    while (time.time() - start_time) < chase_duration:
        curr = data[curr]
Пример #29
0
def airflow_operator_to_op(
    airflow_op: BaseOperator,
    connections: Optional[List[Connection]] = None,
    capture_python_logs=True,
    return_output=False,
) -> OpDefinition:
    """Construct a Dagster op corresponding to a given Airflow operator.

    The resulting op will execute the ``execute()`` method on the Airflow operator. Dagster and
    any dependencies required by the Airflow Operator must be available in the Python environment
    within which your Dagster ops execute.

    To specify Airflow connections utilized by the operator, instantiate and pass Airflow connection
    objects in a list to the ``connections`` parameter of this function.

    .. code-block:: python

        http_task = SimpleHttpOperator(task_id="my_http_task", endpoint="foo")
        connections = [Connection(conn_id="http_default", host="https://mycoolwebsite.com")]
        dagster_op = airflow_operator_to_op(http_task, connections=connections)

    In order to specify extra parameters to the connection, call the ``set_extra()`` method
    on the instantiated Airflow connection:

    .. code-block:: python

        s3_conn = Connection(conn_id=f's3_conn', conn_type="s3")
        s3_conn_extra = {
            "aws_access_key_id": "my_access_key",
            "aws_secret_access_key": "my_secret_access_key",
        }
        s3_conn.set_extra(json.dumps(s3_conn_extra))

    Args:
        airflow_op (BaseOperator): The Airflow operator to convert into a Dagster op
        connections (Optional[List[Connection]]): Airflow connections utilized by the operator.
        capture_python_logs (bool): If False, will not redirect Airflow logs to compute logs.
            (default: True)
        return_output (bool): If True, will return any output from the Airflow operator.
            (default: False)

    Returns:
        converted_op (OpDefinition): The generated Dagster op

    """
    airflow_op = check.inst_param(airflow_op, "airflow_op", BaseOperator)
    connections = check.opt_list_param(connections, "connections", Connection)

    @op(
        name=airflow_op.task_id,
        ins={"start_after": In(Nothing)},
        out=Out(Any) if return_output else Out(Nothing),
    )
    def converted_op(context):
        conn_names = []
        for connection in connections:
            conn_name = f"AIRFLOW_CONN_{connection.conn_id}".upper()
            os.environ[conn_name] = connection.get_uri()
            conn_names.append(conn_name)

        if capture_python_logs:
            # Airflow has local logging configuration that may set logging.Logger.propagate
            # to be false. We override the logger object and replace it with DagsterLogManager.
            airflow_op._log = context.log  # pylint: disable=protected-access
            # Airflow operators and hooks use separate logger objects. We add a handler to
            # receive logs from hooks.
            with replace_airflow_logger_handlers(context.log._dagster_handler  # pylint: disable=protected-access
                                                 ):
                output = airflow_op.execute({})
        else:
            output = airflow_op.execute({})

        for conn_name in conn_names:
            os.environ.pop(conn_name)

        if return_output:
            return output

    return converted_op
Пример #30
0
)
def multiply_the_word_slow(context, word):
    time.sleep(context.solid_config["sleep_time"])
    return word * context.solid_config["factor"]


@lambda_solid(input_defs=[InputDefinition("word")])
def count_letters(word):
    counts = defaultdict(int)
    for letter in word:
        counts[letter] += 1
    return dict(counts)


@op(
    ins={"word": In(String)},
)
def always_fail(context, word):
    raise Exception("Op Exception Message")


@op(
    ins={"word": In(String)},
    config_schema={"factor": IntSource},
)
def multiply_the_word_op(context, word):
    return word * context.solid_config["factor"]


@op(ins={"word": In()})
def count_letters_op(word):