def test_merge_dicts(): dict1 = {"k1": "v1", "k2": "v2", "k3": {"k1": "v1", "k2": "v2"}} dict2 = {"k1": "V1", "k4": "V4", "k3": {"k1": "V1", "k4": "V4"}} expected1 = {"k1": "V1", "k2": "v2", "k4": "V4", "k3": {"k1": "V1", "k2": "v2", "k4": "V4"}} expected2 = {"k1": "v1", "k2": "v2", "k4": "V4", "k3": {"k1": "v1", "k2": "v2", "k4": "V4"}} merged = util.merge_dicts_overwrite(dict1, dict2) assert expected1 == merged assert dict1 != expected1 assert dict2 != expected1 merged = util.merge_dicts_no_overwrite(dict1, dict2) assert expected2 == merged assert dict1 != expected2 assert dict2 != expected2 dict1_copy = deepcopy(dict1) util.merge_dicts_in_place_overwrite(dict1_copy, dict2) assert expected1 == dict1_copy assert dict1 != dict1_copy dict1_copy = deepcopy(dict1) util.merge_dicts_in_place_no_overwrite(dict1_copy, dict2) assert expected2 == dict1_copy assert dict1 != dict1_copy
def _deserialize_raw_ctx(raw_ctx): if raw_ctx.get("environment") is not None: raw_columns = raw_ctx["raw_columns"] raw_ctx["raw_columns"] = util.merge_dicts_overwrite(*raw_columns.values()) data_split = raw_ctx["environment_data"] if data_split["csv_data"] is not None and data_split["parquet_data"] is None: raw_ctx["environment"]["data"] = data_split["csv_data"] elif data_split["parquet_data"] is not None and data_split["csv_data"] is None: raw_ctx["environment"]["data"] = data_split["parquet_data"] else: raise CortexException("expected csv_data or parquet_data but found " + data_split) return raw_ctx
def __init__(self, **kwargs): if "cache_dir" in kwargs: self.cache_dir = kwargs["cache_dir"] elif "local_path" in kwargs: local_path_dir = os.path.dirname(os.path.abspath(kwargs["local_path"])) self.cache_dir = os.path.join(local_path_dir, "cache") else: raise ValueError("cache_dir must be specified (or inferred from local_path)") util.mkdir_p(self.cache_dir) if "local_path" in kwargs: ctx_raw = util.read_msgpack(kwargs["local_path"]) self.ctx = _deserialize_raw_ctx(ctx_raw) elif "obj" in kwargs: self.ctx = kwargs["obj"] elif "raw_obj" in kwargs: ctx_raw = kwargs["raw_obj"] self.ctx = _deserialize_raw_ctx(ctx_raw) elif "s3_path": local_ctx_path = os.path.join(self.cache_dir, "context.msgpack") bucket, key = S3.deconstruct_s3_path(kwargs["s3_path"]) S3(bucket, client_config={}).download_file(key, local_ctx_path) ctx_raw = util.read_msgpack(local_ctx_path) self.ctx = _deserialize_raw_ctx(ctx_raw) else: raise ValueError("invalid context args: " + kwargs) self.workload_id = kwargs.get("workload_id") self.id = self.ctx["id"] self.key = self.ctx["key"] self.cortex_config = self.ctx["cortex_config"] self.dataset_version = self.ctx["dataset_version"] self.root = self.ctx["root"] self.raw_dataset = self.ctx["raw_dataset"] self.status_prefix = self.ctx["status_prefix"] self.app = self.ctx["app"] self.environment = self.ctx["environment"] self.python_packages = self.ctx["python_packages"] or {} self.raw_columns = self.ctx["raw_columns"] or {} self.transformed_columns = self.ctx["transformed_columns"] or {} self.transformers = self.ctx["transformers"] or {} self.aggregators = self.ctx["aggregators"] or {} self.aggregates = self.ctx["aggregates"] or {} self.constants = self.ctx["constants"] or {} self.models = self.ctx["models"] or {} self.estimators = self.ctx["estimators"] or {} self.apis = self.ctx["apis"] or {} self.training_datasets = {k: v["dataset"] for k, v in self.models.items()} self.api_version = self.cortex_config["api_version"] if "local_storage_path" in kwargs: self.storage = LocalStorage(base_dir=kwargs["local_storage_path"]) else: self.storage = S3( bucket=self.cortex_config["bucket"], region=self.cortex_config["region"], client_config={}, ) if self.api_version != consts.CORTEX_VERSION: raise ValueError( "API version mismatch (Context: {}, Image: {})".format( self.api_version, consts.CORTEX_VERSION ) ) self.columns = util.merge_dicts_overwrite(self.raw_columns, self.transformed_columns) self.raw_column_names = list(self.raw_columns.keys()) self.transformed_column_names = list(self.transformed_columns.keys()) self.column_names = list(self.columns.keys()) # Internal caches self._transformer_impls = {} self._aggregator_impls = {} self._estimator_impls = {} self._metadatas = {} self._obj_cache = {} self.spark_uploaded_impls = {} # This affects Tensorflow S3 access os.environ["AWS_REGION"] = self.cortex_config.get("region", "") # Id map self.pp_id_map = ResourceMap(self.python_packages) if self.python_packages else None self.rf_id_map = ResourceMap(self.raw_columns) if self.raw_columns else None self.ag_id_map = ResourceMap(self.aggregates) if self.aggregates else None self.tf_id_map = ResourceMap(self.transformed_columns) if self.transformed_columns else None self.td_id_map = ResourceMap(self.training_datasets) if self.training_datasets else None self.models_id_map = ResourceMap(self.models) if self.models else None self.apis_id_map = ResourceMap(self.apis) if self.apis else None self.constants_id_map = ResourceMap(self.constants) if self.constants else None self.id_map = util.merge_dicts_overwrite( self.pp_id_map, self.rf_id_map, self.ag_id_map, self.tf_id_map, self.td_id_map, self.models_id_map, self.apis_id_map, self.constants_id_map, )