Пример #1
0
def _deserialize_raw_ctx(raw_ctx):
    if raw_ctx.get("environment") is not None:
        raw_columns = raw_ctx["raw_columns"]
        raw_ctx["raw_columns"] = util.merge_dicts_overwrite(*raw_columns.values())

        data_split = raw_ctx["environment_data"]

        if data_split["csv_data"] is not None and data_split["parquet_data"] is None:
            raw_ctx["environment"]["data"] = data_split["csv_data"]
        elif data_split["parquet_data"] is not None and data_split["csv_data"] is None:
            raw_ctx["environment"]["data"] = data_split["parquet_data"]
        else:
            raise CortexException("expected csv_data or parquet_data but found " + data_split)

    return raw_ctx
Пример #2
0
def _deserialize_raw_ctx(raw_ctx):
    raw_columns = raw_ctx["raw_columns"]
    raw_ctx["raw_columns"] = util.merge_dicts_overwrite(
        raw_columns["raw_int_columns"],
        raw_columns["raw_float_columns"],
        raw_columns["raw_string_columns"],
    )

    data_split = raw_ctx["environment_data"]

    if data_split["csv_data"] is not None and data_split["parquet_data"] is None:
        raw_ctx["environment"]["data"] = data_split["csv_data"]
    elif data_split["parquet_data"] is not None and data_split["csv_data"] is None:
        raw_ctx["environment"]["data"] = data_split["parquet_data"]
    else:
        raise CortexException("expected csv_data or parquet_data but found " + data_split)
    return raw_ctx
Пример #3
0
def test_merge_dicts():
    dict1 = {"k1": "v1", "k2": "v2", "k3": {"k1": "v1", "k2": "v2"}}
    dict2 = {"k1": "V1", "k4": "V4", "k3": {"k1": "V1", "k4": "V4"}}

    expected1 = {
        "k1": "V1",
        "k2": "v2",
        "k4": "V4",
        "k3": {
            "k1": "V1",
            "k2": "v2",
            "k4": "V4"
        }
    }
    expected2 = {
        "k1": "v1",
        "k2": "v2",
        "k4": "V4",
        "k3": {
            "k1": "v1",
            "k2": "v2",
            "k4": "V4"
        }
    }

    merged = util.merge_dicts_overwrite(dict1, dict2)
    assert expected1 == merged
    assert dict1 != expected1
    assert dict2 != expected1

    merged = util.merge_dicts_no_overwrite(dict1, dict2)
    assert expected2 == merged
    assert dict1 != expected2
    assert dict2 != expected2

    dict1_copy = deepcopy(dict1)
    util.merge_dicts_in_place_overwrite(dict1_copy, dict2)
    assert expected1 == dict1_copy
    assert dict1 != dict1_copy

    dict1_copy = deepcopy(dict1)
    util.merge_dicts_in_place_no_overwrite(dict1_copy, dict2)
    assert expected2 == dict1_copy
    assert dict1 != dict1_copy
Пример #4
0
    def __init__(self, **kwargs):
        if "cache_dir" in kwargs:
            self.cache_dir = kwargs["cache_dir"]
        elif "local_path" in kwargs:
            local_path_dir = os.path.dirname(
                os.path.abspath(kwargs["local_path"]))
            self.cache_dir = os.path.join(local_path_dir, "cache")
        else:
            raise ValueError(
                "cache_dir must be specified (or inferred from local_path)")
        util.mkdir_p(self.cache_dir)

        if "local_path" in kwargs:
            ctx_raw = util.read_msgpack(kwargs["local_path"])
            self.ctx = _deserialize_raw_ctx(ctx_raw)
        elif "obj" in kwargs:
            self.ctx = kwargs["obj"]
        elif "raw_obj" in kwargs:
            ctx_raw = kwargs["raw_obj"]
            self.ctx = _deserialize_raw_ctx(ctx_raw)
        elif "s3_path":
            local_ctx_path = os.path.join(self.cache_dir, "context.msgpack")
            bucket, key = S3.deconstruct_s3_path(kwargs["s3_path"])
            S3(bucket, client_config={}).download_file(key, local_ctx_path)
            ctx_raw = util.read_msgpack(local_ctx_path)
            self.ctx = _deserialize_raw_ctx(ctx_raw)
        else:
            raise ValueError("invalid context args: " + kwargs)

        self.workload_id = kwargs.get("workload_id")

        self.id = self.ctx["id"]
        self.key = self.ctx["key"]
        self.cortex_config = self.ctx["cortex_config"]
        self.dataset_version = self.ctx["dataset_version"]
        self.root = self.ctx["root"]
        self.raw_dataset = self.ctx["raw_dataset"]
        self.status_prefix = self.ctx["status_prefix"]
        self.app = self.ctx["app"]
        self.environment = self.ctx["environment"]
        self.python_packages = self.ctx["python_packages"]
        self.raw_columns = self.ctx["raw_columns"]
        self.transformed_columns = self.ctx["transformed_columns"]
        self.transformers = self.ctx["transformers"]
        self.aggregators = self.ctx["aggregators"]
        self.aggregates = self.ctx["aggregates"]
        self.constants = self.ctx["constants"]
        self.models = self.ctx["models"]
        self.apis = self.ctx["apis"]
        self.training_datasets = {
            k: v["dataset"]
            for k, v in self.models.items()
        }

        self.api_version = self.cortex_config["api_version"]

        if "local_storage_path" in kwargs:
            self.storage = LocalStorage(base_dir=kwargs["local_storage_path"])
        else:
            self.storage = S3(
                bucket=self.cortex_config["bucket"],
                region=self.cortex_config["region"],
                client_config={},
            )

        if self.api_version != consts.CORTEX_VERSION:
            raise ValueError(
                "API version mismatch (Context: {}, Image: {})".format(
                    self.api_version, consts.CORTEX_VERSION))

        self.columns = util.merge_dicts_overwrite(
            self.raw_columns,
            self.transformed_columns  # self.aggregates
        )

        self.values = util.merge_dicts_overwrite(self.aggregates,
                                                 self.constants)

        self.raw_column_names = list(self.raw_columns.keys())
        self.transformed_column_names = list(self.transformed_columns.keys())
        self.column_names = list(self.columns.keys())

        # Internal caches
        self._transformer_impls = {}
        self._aggregator_impls = {}
        self._model_impls = {}

        # This affects Tensorflow S3 access
        os.environ["AWS_REGION"] = self.cortex_config.get("region", "")

        # Id map
        self.pp_id_map = ResourceMap(self.python_packages)
        self.rf_id_map = ResourceMap(self.raw_columns)
        self.ag_id_map = ResourceMap(self.aggregates)
        self.tf_id_map = ResourceMap(self.transformed_columns)
        self.td_id_map = ResourceMap(self.training_datasets)
        self.models_id_map = ResourceMap(self.models)
        self.apis_id_map = ResourceMap(self.apis)
        self.constants_id_map = ResourceMap(self.constants)

        self.id_map = util.merge_dicts_overwrite(
            self.pp_id_map,
            self.rf_id_map,
            self.ag_id_map,
            self.tf_id_map,
            self.td_id_map,
            self.models_id_map,
            self.apis_id_map,
            self.constants_id_map,
        )