def __init__(self, name, input_defs, output_defs, description=None, metadata=None, positional_inputs=None): self._name = check_valid_name(name) self._description = check.opt_str_param(description, 'description') self._metadata = check.opt_dict_param(metadata, 'metadata', key_type=str) self._input_defs = frozenlist(input_defs) self._input_dict = frozendict( {input_def.name: input_def for input_def in input_defs}) check.invariant( len(self._input_defs) == len(self._input_dict), 'Duplicate input def names') self._output_defs = frozenlist(output_defs) self._output_dict = frozendict( {output_def.name: output_def for output_def in output_defs}) check.invariant( len(self._output_defs) == len(self._output_dict), 'Duplicate output def names') check.opt_list_param(positional_inputs, 'positional_inputs', str) self._positional_inputs = (positional_inputs if positional_inputs is not None else list( map(lambda inp: inp.name, input_defs)))
def __init__(self, name, input_defs, output_defs, description=None, metadata=None): self._name = check_valid_name(name) self._description = check.opt_str_param(description, 'description') self._metadata = check.opt_dict_param(metadata, 'metadata', key_type=str) self._input_defs = frozenlist(input_defs) self._input_dict = frozendict( {input_def.name: input_def for input_def in input_defs}) check.invariant( len(self._input_defs) == len(self._input_dict), 'Duplicate input def names') self._output_defs = frozenlist(output_defs) self._output_dict = frozendict( {output_def.name: output_def for output_def in output_defs}) check.invariant( len(self._output_defs) == len(self._output_dict), 'Duplicate output def names')
def __init__( self, name, input_defs, output_defs, description=None, tags=None, positional_inputs=None, ): self._name = check_valid_name(name) self._description = check.opt_str_param(description, "description") self._tags = validate_tags(tags) self._input_defs = frozenlist(input_defs) self._input_dict = frozendict({input_def.name: input_def for input_def in input_defs}) check.invariant(len(self._input_defs) == len(self._input_dict), "Duplicate input def names") self._output_defs = frozenlist(output_defs) self._output_dict = frozendict({output_def.name: output_def for output_def in output_defs}) check.invariant( len(self._output_defs) == len(self._output_dict), "Duplicate output def names" ) check.opt_list_param(positional_inputs, "positional_inputs", str) self._positional_inputs = ( positional_inputs if positional_inputs is not None else list(map(lambda inp: inp.name, input_defs)) )
def __init__(self, name, input_dict, output_dict, description=None, metadata=None): self.name = check_valid_name(name) self.description = check.opt_str_param(description, 'description') self.metadata = check.opt_dict_param(metadata, 'metadata', key_type=str) self.input_dict = frozendict(input_dict) self.output_dict = frozendict(output_dict)
def _recurse_in_to_map(context: TraversalContext, config_value: Any) -> EvaluateValueResult: check.invariant( context.config_type.kind == ConfigTypeKind.MAP, "Unexpected non map type", ) if not config_value: return EvaluateValueResult.for_value({}) config_value = cast(Dict[object, object], config_value) if any((ck is None for ck in config_value.keys())): check.failed("Null map key not caught in validation") if context.config_type.inner_type.kind != ConfigTypeKind.NONEABLE: # type: ignore if any((cv is None for cv in config_value.values())): check.failed("Null map member not caught in validation") results = { key: _recursively_process_config(context.for_map(key), item) for key, item in config_value.items() } errors = [] for result in results.values(): if not result.success: errors += cast(List[EvaluationError], result.errors) if errors: return EvaluateValueResult.for_errors(errors) return EvaluateValueResult.for_value( frozendict({key: result.value for key, result in results.items()}))
def validate_map_config( context: ValidationContext, config_value: object) -> EvaluateValueResult[Dict[str, object]]: check.inst_param(context, "context", ValidationContext) check.invariant(context.config_type_snap.kind == ConfigTypeKind.MAP) check.not_none_param(config_value, "config_value") if not isinstance(config_value, dict): return EvaluateValueResult.for_error( create_map_error(context, config_value)) config_value = cast(Dict[object, object], config_value) evaluation_results = [ _validate_config(context.for_map_key(key), key) for key in config_value.keys() ] + [ _validate_config(context.for_map_value(key), config_item) for key, config_item in config_value.items() ] errors = [] for result in evaluation_results: if not result.success: errors += cast(List, result.errors) return EvaluateValueResult(not bool(errors), frozendict(config_value), errors) # type: ignore
def test_opt_nullable_dict_param(): assert check.opt_nullable_dict_param(None, 'opt_nullable_dict_param') is None assert check.opt_nullable_dict_param({}, 'opt_nullable_dict_param') == {} assert check.opt_nullable_dict_param(frozendict(), 'opt_nullable_dict_param') == {} ddict = {'a': 2} assert check.opt_nullable_dict_param(ddict, 'opt_nullable_dict_param') == ddict class Foo(object): pass class Bar(Foo): pass ddict_class = {'a': Bar} assert (check.opt_nullable_dict_param(ddict_class, 'opt_nullable_dict_param', value_class=Foo) == ddict_class) with pytest.raises(ParameterCheckError): check.opt_nullable_dict_param(1, 'opt_nullable_dict_param') with pytest.raises(ParameterCheckError): check.opt_nullable_dict_param('foo', 'opt_nullable_dict_param')
def add_run_tags(self, run_id, new_tags): check.str_param(run_id, "run_id") check.dict_param(new_tags, "new_tags", key_type=str, value_type=str) run = self._runs[run_id] run_tags = merge_dicts(run.tags if run.tags else {}, new_tags) self._runs[run_id] = run.with_tags(run_tags) self._run_tags[run_id] = frozendict(run_tags)
def test_opt_nullable_dict_param(): assert check.opt_nullable_dict_param(None, "opt_nullable_dict_param") is None assert check.opt_nullable_dict_param({}, "opt_nullable_dict_param") == {} assert check.opt_nullable_dict_param(frozendict(), "opt_nullable_dict_param") == {} ddict = {"a": 2} assert check.opt_nullable_dict_param(ddict, "opt_nullable_dict_param") == ddict class Foo(object): pass class Bar(Foo): pass ddict_class = {"a": Bar} assert (check.opt_nullable_dict_param(ddict_class, "opt_nullable_dict_param", value_class=Foo) == ddict_class) with pytest.raises(ParameterCheckError): check.opt_nullable_dict_param(1, "opt_nullable_dict_param") with pytest.raises(ParameterCheckError): check.opt_nullable_dict_param("foo", "opt_nullable_dict_param")
def _recurse_in_to_selector(context: TraversalContext, config_value: Any) -> EvaluateValueResult: check.invariant( context.config_type.kind == ConfigTypeKind.SELECTOR, "Non-selector not caught in validation", ) if config_value: check.invariant(config_value and len(config_value) == 1) field_name, incoming_field_value = ensure_single_item(config_value) else: field_name, field_def = ensure_single_item(context.config_type.fields) incoming_field_value = field_def.default_value if field_def.default_provided else None field_def = context.config_type.fields[field_name] field_evr = _recursively_process_config( context.for_field(field_def, field_name), {} if incoming_field_value is None and ConfigTypeKind.has_fields(field_def.config_type.kind) else incoming_field_value, ) if field_evr.success: return EvaluateValueResult.for_value( frozendict({field_name: field_evr.value})) return field_evr
def validate_selector_config( context: ValidationContext, config_value: object ) -> EvaluateValueResult[Dict[str, object]]: check.inst_param(context, "context", ValidationContext) check.param_invariant(context.config_type_snap.kind == ConfigTypeKind.SELECTOR, "selector_type") check.not_none_param(config_value, "config_value") # Special case the empty dictionary, meaning no values provided for the # value of the selector. # E.g. {'logging': {}} # If there is a single field defined on the selector and if it is optional # it passes validation. (e.g. a single logger "console") if config_value == {}: return _validate_empty_selector_config(context) # type: ignore # Now we ensure that the used-provided config has only a a single entry # and then continue the validation pass if not isinstance(config_value, dict): return EvaluateValueResult.for_error(create_selector_type_error(context, config_value)) if len(config_value) > 1: return EvaluateValueResult.for_error( create_selector_multiple_fields_error(context, config_value) ) field_name, field_value = ensure_single_item(config_value) if not context.config_type_snap.has_field(field_name): return EvaluateValueResult.for_error(create_field_not_defined_error(context, field_name)) field_snap = context.config_type_snap.get_field(field_name) child_evaluate_value_result = _validate_config( context.for_field_snap(field_snap), # This is a very particular special case where we want someone # to be able to select a selector key *without* a value # # e.g. # storage: # filesystem: # # And we want the default values of the child elements of filesystem: # to "fill in" {} if field_value is None and ConfigTypeKind.has_fields( context.config_schema_snapshot.get_config_snap(field_snap.type_key).kind ) else field_value, ) if child_evaluate_value_result.success: return EvaluateValueResult.for_value( # type: ignore frozendict({field_name: child_evaluate_value_result.value}) ) else: return child_evaluate_value_result # type: ignore
def __init__(self, name, input_defs, output_defs, description=None, metadata=None): self.name = check_valid_name(name) self.description = check.opt_str_param(description, 'description') self.metadata = check.opt_dict_param(metadata, 'metadata', key_type=str) self.input_defs = frozenlist(input_defs) self.input_dict = frozendict( {input_def.name: input_def for input_def in input_defs}) self.output_defs = frozenlist(output_defs) self.output_dict = frozendict( {output_def.name: output_def for output_def in output_defs})
def add_run(self, pipeline_run): check.inst_param(pipeline_run, 'pipeline_run', PipelineRun) check.invariant( not self._runs.get(pipeline_run.run_id), 'Can not add same run twice for run_id {run_id}'.format(run_id=pipeline_run.run_id), ) self._runs[pipeline_run.run_id] = pipeline_run if pipeline_run.tags and len(pipeline_run.tags) > 0: self._run_tags[pipeline_run.run_id] = frozendict(pipeline_run.tags) return pipeline_run
def _recurse_in_to_shape(context: TraversalContext, config_value: Any) -> EvaluateValueResult: check.invariant(ConfigTypeKind.is_shape(context.config_type.kind), "Unexpected non shape type") config_value = check.opt_dict_param(config_value, "config_value", key_type=str) fields = context.config_type.fields incoming_fields = config_value.keys() processed_fields = {} for expected_field, field_def in fields.items(): if expected_field in incoming_fields: processed_fields[expected_field] = _recursively_process_config( context.for_field(field_def, expected_field), config_value[expected_field]) elif field_def.default_provided: processed_fields[expected_field] = _recursively_process_config( context.for_field(field_def, expected_field), field_def.default_value) elif field_def.is_required: check.failed( "Missing required composite member not caught in validation") # For permissive composite fields, we skip applying defaults because these fields are unknown # to us if context.config_type.kind == ConfigTypeKind.PERMISSIVE_SHAPE: defined_fields = fields.keys() extra_fields = [ field for field in incoming_fields if field not in defined_fields ] for extra_field in extra_fields: processed_fields[extra_field] = EvaluateValueResult.for_value( config_value[extra_field]) errors = [] for result in processed_fields.values(): if not result.success: for error in result.errors: errors.append(error) if errors: return EvaluateValueResult.for_errors(errors) return EvaluateValueResult.for_value( frozendict( {key: result.value for key, result in processed_fields.items()}))
def _validate_shape_config(context, config_value, check_for_extra_incoming_fields): check.inst_param(context, "context", ValidationContext) check.not_none_param(config_value, "config_value") check.bool_param(check_for_extra_incoming_fields, "check_for_extra_incoming_fields") if config_value and not isinstance(config_value, dict): return EvaluateValueResult.for_error( create_dict_type_mismatch_error(context, config_value)) field_snaps = context.config_type_snap.fields defined_field_names = {fs.name for fs in field_snaps} incoming_field_names = set(config_value.keys()) errors = [] if check_for_extra_incoming_fields: _append_if_error( errors, _check_for_extra_incoming_fields(context, defined_field_names, incoming_field_names), ) _append_if_error( errors, _compute_missing_fields_error(context, field_snaps, incoming_field_names)) # dict is well-formed. now recursively validate all incoming fields field_errors = [] for field_snap in context.config_type_snap.fields: name = field_snap.name if name in config_value: field_evr = _validate_config(context.for_field_snap(field_snap), config_value[name]) if field_evr.errors: field_errors += field_evr.errors if field_errors: errors += field_errors if errors: return EvaluateValueResult.for_errors(errors) else: return EvaluateValueResult.for_value(frozendict(config_value))
def test_opt_nullable_dict_param(): assert check.opt_nullable_dict_param(None, "opt_nullable_dict_param") is None assert check.opt_nullable_dict_param({}, "opt_nullable_dict_param") == {} assert check.opt_nullable_dict_param(frozendict(), "opt_nullable_dict_param") == {} ddict = {"a": 2} assert check.opt_nullable_dict_param(ddict, "opt_nullable_dict_param") == ddict with pytest.raises(ParameterCheckError): check.opt_nullable_dict_param(1, "opt_nullable_dict_param") with pytest.raises(ParameterCheckError): check.opt_nullable_dict_param("foo", "opt_nullable_dict_param")
def _recurse_in_to_shape(context, config_value): check.invariant(ConfigTypeKind.is_shape(context.config_type.kind), 'Unexpected non shape type') config_value = check.opt_dict_param(config_value, 'config_value', key_type=str) fields = context.config_type.fields incoming_fields = set(config_value.keys()) processed_fields = {} for expected_field, field_def in fields.items(): if expected_field in incoming_fields: processed_fields[expected_field] = _recursively_process_config( context.for_field(field_def, expected_field), config_value[expected_field]) elif field_def.default_provided: processed_fields[expected_field] = _recursively_process_config( context.for_field(field_def, expected_field), field_def.default_value) elif not field_def.is_optional: check.failed( 'Missing non-optional composite member not caught in validation' ) # For permissive composite fields, we skip applying defaults because these fields are unknown # to us if context.config_type.kind == ConfigTypeKind.PERMISSIVE_SHAPE: defined_fields = set(fields.keys()) extra_fields = incoming_fields - defined_fields for extra_field in extra_fields: processed_fields[extra_field] = EvaluateValueResult.for_value( config_value[extra_field]) errors = [ result.error for result in processed_fields.values() if not result.success ] if errors: return EvaluateValueResult.for_errors(errors) return EvaluateValueResult.for_value( frozendict( {key: result.value for key, result in processed_fields.items()}))
def _validate_shape_config(context, config_value, check_for_extra_incoming_fields): check.inst_param(context, 'context', ValidationContext) check.not_none_param(config_value, 'config_value') check.bool_param(check_for_extra_incoming_fields, 'check_for_extra_incoming_fields') if config_value and not isinstance(config_value, dict): return EvaluateValueResult.for_error( create_dict_type_mismatch_error(context, config_value)) fields = context.config_type.fields defined_field_names = set(fields.keys()) incoming_field_names = set(config_value.keys()) errors = [] if check_for_extra_incoming_fields: _append_if_error( errors, _check_for_extra_incoming_fields(context, defined_field_names, incoming_field_names), ) _append_if_error( errors, _compute_missing_fields_error(context, fields, incoming_field_names)) # dict is well-formed. now recursively validate all incoming fields field_errors = [] for name, field_def in context.config_type.fields.items(): if name in config_value: field_evr = _validate_config(context.for_field(field_def, name), config_value[name]) if field_evr.errors: field_errors += field_evr.errors if field_errors: errors += field_errors if errors: return EvaluateValueResult.for_errors(errors) else: return EvaluateValueResult.for_value(frozendict(config_value))
def add_run(self, pipeline_run): check.inst_param(pipeline_run, "pipeline_run", PipelineRun) if self._runs.get(pipeline_run.run_id): raise DagsterRunAlreadyExists( "Can not add same run twice for run_id {run_id}".format( run_id=pipeline_run.run_id), ) if pipeline_run.pipeline_snapshot_id: if not self.has_pipeline_snapshot( pipeline_run.pipeline_snapshot_id): raise DagsterSnapshotDoesNotExist( "pipeline_snapshot_id {ss_id} does not exist in run storage." .format(ss_id=pipeline_run.pipeline_snapshot_id)) self._runs[pipeline_run.run_id] = pipeline_run if pipeline_run.tags and len(pipeline_run.tags) > 0: self._run_tags[pipeline_run.run_id] = frozendict(pipeline_run.tags) return pipeline_run
def test_opt_dict_param(): assert check.opt_dict_param(None, 'opt_dict_param') == {} assert check.opt_dict_param({}, 'opt_dict_param') == {} assert check.opt_dict_param(frozendict(), 'opt_dict_param') == {} ddict = {'a': 2} assert check.opt_dict_param(ddict, 'opt_dict_param') == ddict with pytest.raises(ParameterCheckError): check.opt_dict_param(0, 'opt_dict_param') with pytest.raises(ParameterCheckError): check.opt_dict_param(1, 'opt_dict_param') with pytest.raises(ParameterCheckError): check.opt_dict_param('foo', 'opt_dict_param') with pytest.raises(ParameterCheckError): check.opt_dict_param(['foo'], 'opt_dict_param') with pytest.raises(ParameterCheckError): check.opt_dict_param([], 'opt_dict_param')
def test_dict_param(): assert check.dict_param({}, "dict_param") == {} assert check.dict_param(frozendict(), "dict_param") == {} ddict = {"a": 2} assert check.dict_param(ddict, "dict_param") == ddict with pytest.raises(ParameterCheckError): check.dict_param(None, "dict_param") with pytest.raises(ParameterCheckError): check.dict_param(0, "dict_param") with pytest.raises(ParameterCheckError): check.dict_param(1, "dict_param") with pytest.raises(ParameterCheckError): check.dict_param("foo", "dict_param") with pytest.raises(ParameterCheckError): check.dict_param(["foo"], "dict_param") with pytest.raises(ParameterCheckError): check.dict_param([], "dict_param")
import uuid import warnings from collections import OrderedDict from typing import Union import toposort as toposort_ from dagster import check from dagster.utils import frozendict from dagster.version import __version__ BACKFILL_TAG_LENGTH = 8 PYTHON_LOGGING_LEVELS_MAPPING = frozendict( OrderedDict({ "CRITICAL": 50, "ERROR": 40, "WARNING": 30, "INFO": 20, "DEBUG": 10 })) PYTHON_LOGGING_LEVELS_ALIASES = frozendict( OrderedDict({ "FATAL": "CRITICAL", "WARN": "WARNING" })) PYTHON_LOGGING_LEVELS_NAMES = frozenset([ level_name.lower() for level_name in sorted( list(PYTHON_LOGGING_LEVELS_MAPPING.keys()) + list(PYTHON_LOGGING_LEVELS_ALIASES.keys())) ])
with pytest.raises(CheckError): check.opt_list_param([None], "list_param", Foo) class Wrong: pass class AlsoWrong: pass DICT_TEST_CASES = [ (dict(obj={}), True), (dict(obj=frozendict()), True), (dict(obj={"a": 2}), True), (dict(obj=None), False), (dict(obj=0), False), (dict(obj=1), False), (dict(obj="foo"), False), (dict(obj=["foo"]), False), (dict(obj=[]), False), (dict(obj={"str": 1}, key_type=str, value_type=int), True), (dict(obj={"str": 1}, value_type=int), True), (dict(obj={"str": 1}, key_type=str), True), (dict(obj={"str": 1}), True), (dict(obj={}, key_type=str, value_type=int), True), (dict(obj={}, value_type=int), True), (dict(obj={}, key_type=str), True), (dict(obj={}), True),
def test_frozendict(): d = frozendict({'foo': 'bar'}) with pytest.raises(RuntimeError): d['zip'] = 'zowie'
) schema = table_metadata_value.entry_data.schema assert isinstance(schema, TableSchema) assert schema.columns == [ TableColumn(name="name", type="string"), TableColumn(name="status", type="bool"), ] bad_values = frozendict( { "table_schema": {"columns": False, "constraints": False}, "table_column": {"name": False, "type": False, "description": False, "constraints": False}, "table_constraints": {"other": False}, "table_column_constraints": { "nullable": "foo", "unique": "foo", "other": False, }, } ) def test_table_column_keys(): with pytest.raises(TypeError): TableColumn(bad_key="foo", description="bar", type="string") # type: ignore @pytest.mark.parametrize("key,value", list(bad_values["table_column"].items())) def test_table_column_values(key, value): kwargs = {
def _validate_shape_config( context: ValidationContext, config_value: object, check_for_extra_incoming_fields: bool ) -> EvaluateValueResult[Dict[str, object]]: check.inst_param(context, "context", ValidationContext) check.not_none_param(config_value, "config_value") check.bool_param(check_for_extra_incoming_fields, "check_for_extra_incoming_fields") field_aliases = check.opt_dict_param( cast(Dict[str, str], context.config_type_snap.field_aliases), "field_aliases", key_type=str, value_type=str, ) if not isinstance(config_value, dict): return EvaluateValueResult.for_error( create_dict_type_mismatch_error(context, config_value)) config_value = cast(Dict[str, object], config_value) field_snaps = check.not_none(context.config_type_snap.fields) defined_field_names = {cast(str, fs.name) for fs in field_snaps} defined_field_names = defined_field_names.union(set( field_aliases.values())) incoming_field_names = set(config_value.keys()) errors: List[EvaluationError] = [] if check_for_extra_incoming_fields: _append_if_error( errors, _check_for_extra_incoming_fields( context, defined_field_names, incoming_field_names, ), ) _append_if_error( errors, _compute_missing_fields_error(context, field_snaps, incoming_field_names, field_aliases), ) # dict is well-formed. now recursively validate all incoming fields field_errors = [] field_snaps = check.not_none(context.config_type_snap.fields) for field_snap in field_snaps: name = field_snap.name aliased_name = field_aliases.get(name) if aliased_name is not None and aliased_name in config_value and name in config_value: field_errors.append( create_field_substitution_collision_error( context.for_field_snap(field_snap), name=name, aliased_name=aliased_name)) elif name in config_value: field_evr = _validate_config(context.for_field_snap(field_snap), config_value[name]) if field_evr.errors: field_errors += field_evr.errors elif aliased_name is not None and aliased_name in config_value: field_evr = _validate_config(context.for_field_snap(field_snap), config_value[aliased_name]) if field_evr.errors: field_errors += field_evr.errors if field_errors: errors += field_errors if errors: return EvaluateValueResult.for_errors(errors) else: return EvaluateValueResult.for_value( frozendict(config_value)) # type: ignore
def _evaluate_composite_solid_config(context): '''Evaluates config for a composite solid and returns CompositeSolidEvaluationResult ''' # Support config mapping override functions if not is_solid_container_config(context.config_type): return EvaluateValueResult.empty() handle = context.config_type.handle # If we've already seen this handle, skip -- we've already run the block of code below if not handle or handle in context.seen_handles: return EvaluateValueResult.empty() solid_def = context.pipeline.get_solid( context.config_type.handle).definition solid_def_name = context.pipeline.get_solid(handle).definition.name has_mapping = isinstance( solid_def, CompositeSolidDefinition) and solid_def.has_config_mapping # If there's no config mapping function provided for this composite solid, bail if not has_mapping: return EvaluateValueResult.empty() # We first validate the provided environment config as normal against the composite solid config # schema. This will perform a full traversal rooted at the SolidContainerConfigDict and thread # errors up to the root config_context = context.new_context_with_handle(handle) evaluate_value_result = _evaluate_config(config_context) if not evaluate_value_result.success: return evaluate_value_result try: mapped_config_value = solid_def.config_mapping.config_fn( ConfigMappingContext(run_config=context.run_config), # ensure we don't mutate the source environment dict frozendict(evaluate_value_result.value.get('config') or {}), ) except Exception: # pylint: disable=W0703 return EvaluateValueResult.for_error( create_bad_user_config_fn_error( context, solid_def.config_mapping.config_fn.__name__, str(handle), solid_def_name, traceback.format_exc(), )) if not mapped_config_value: return EvaluateValueResult.empty() # Perform basic validation on the mapped config value; remaining validation will happen via the # evaluate_config call below if not isinstance(mapped_config_value, dict): return EvaluateValueResult.for_error( create_bad_mapping_error( context, solid_def.config_mapping.config_fn.__name__, solid_def_name, str(handle), mapped_config_value, )) if 'solids' in context.config_value: return EvaluateValueResult.for_error( create_bad_mapping_solids_key_error(context, solid_def_name, str(handle))) # We've validated the composite solid config; now validate the mapping fn overrides against the # config schema subtree for child solids evaluate_value_result = _evaluate_config( context.for_mapped_composite_config(handle, mapped_config_value)) if evaluate_value_result.errors: prefix = ( 'Config override mapping function defined by solid {handle_name} from ' 'definition {solid_def_name} {path_msg} caused error: '.format( path_msg=get_friendly_path_msg(context.stack), handle_name=str(handle), solid_def_name=solid_def_name, )) errors = [ e._replace(message=prefix + e.message) for e in evaluate_value_result.errors ] return EvaluateValueResult.for_errors(errors) return EvaluateValueResult.for_value( dict_merge(context.config_value, {'solids': evaluate_value_result.value}))
def test_frozendict(): d = frozendict({"foo": "bar"}) with pytest.raises(RuntimeError): d["zip"] = "zowie"
import datetime import itertools import logging import uuid from collections import OrderedDict, namedtuple from dagster import check, seven from dagster.utils import frozendict DAGSTER_META_KEY = 'dagster_meta' PYTHON_LOGGING_LEVELS_MAPPING = frozendict( OrderedDict({ 'CRITICAL': 50, 'ERROR': 40, 'WARNING': 30, 'INFO': 20, 'DEBUG': 10 })) PYTHON_LOGGING_LEVELS_ALIASES = frozendict( OrderedDict({ 'FATAL': 'CRITICAL', 'WARN': 'WARNING' })) PYTHON_LOGGING_LEVELS_NAMES = frozenset([ level_name.lower() for level_name in sorted( list(PYTHON_LOGGING_LEVELS_MAPPING.keys()) + list(PYTHON_LOGGING_LEVELS_ALIASES.keys())) ])