示例#1
0
    def __init__(
        self,
        input_features_def,
        combiner_def,
        output_features_def,
        random_seed=None,
    ):
        # Deep copy to prevent TensorFlow from hijacking the dicts within the config and
        # transforming them into _DictWrapper classes, which are not JSON serializable.
        self._input_features_df = copy.deepcopy(input_features_def)
        self._combiner_def = copy.deepcopy(combiner_def)
        self._output_features_df = copy.deepcopy(output_features_def)

        self._random_seed = random_seed

        if random_seed is not None:
            torch.random.manual_seed(random_seed)

        super().__init__()

        # ================ Inputs ================
        self.input_features = torch.nn.ModuleDict()
        self.input_features.update(build_inputs(input_features_def))

        # ================ Combiner ================
        logger.debug(f"Combiner {combiner_def[TYPE]}")
        combiner_class = get_combiner_class(combiner_def[TYPE])
        config, kwargs = load_config_with_kwargs(
            combiner_class.get_schema_cls(),
            combiner_def,
        )
        self.combiner = combiner_class(input_features=self.input_features, config=config, **kwargs)

        # ================ Outputs ================
        self.output_features = torch.nn.ModuleDict()
示例#2
0
    def __init__(
            self,
            input_features_def,
            combiner_def,
            output_features_def,
            **kwargs
    ):
        super().__init__()

        # ================ Inputs ================
        self.input_features = build_inputs(
            input_features_def
        )

        # ================ Combiner ================
        logger.debug('Combiner {}'.format(combiner_def[TYPE]))
        combiner_class = get_combiner_class(combiner_def[TYPE])
        self.combiner = combiner_class(
            input_features=self.input_features,
            **combiner_def,
            **kwargs
        )

        # ================ Outputs ================
        self.output_features = build_outputs(
            output_features_def,
            self.combiner
        )

        # ================ Combined loss metric ================
        self.eval_loss_metric = tf.keras.metrics.Mean()

        # After constructing all layers, clear the cache to free up memory
        clear_data_cache()
示例#3
0
    def __init__(
        self,
        input_features_def,
        combiner_def,
        output_features_def,
        random_seed=None,
    ):
        self._input_features_def = copy.deepcopy(input_features_def)
        self._combiner_def = copy.deepcopy(combiner_def)
        self._output_features_def = copy.deepcopy(output_features_def)

        self._random_seed = random_seed

        if random_seed is not None:
            torch.random.manual_seed(random_seed)

        super().__init__()

        # ================ Inputs ================
        self.input_features = LudwigFeatureDict()
        try:
            self.input_features.update(build_inputs(self._input_features_def))
        except KeyError as e:
            raise KeyError(
                f"An input feature has a name that conflicts with a class attribute of torch's ModuleDict: {e}"
            )

        # ================ Combiner ================
        logger.debug(f"Combiner {combiner_def[TYPE]}")
        combiner_class = get_combiner_class(combiner_def[TYPE])
        config, kwargs = load_config_with_kwargs(
            combiner_class.get_schema_cls(),
            combiner_def,
        )
        self.combiner = combiner_class(input_features=self.input_features, config=config, **kwargs)

        # ================ Outputs ================
        self.output_features = LudwigFeatureDict()
        self.output_features.update(build_outputs(self._output_features_def, self.combiner))

        # ================ Combined loss metric ================
        self.eval_loss_metric = torchmetrics.MeanMetric()
        self.eval_additional_losses_metrics = torchmetrics.MeanMetric()

        # After constructing all layers, clear the cache to free up memory
        clear_data_cache()
示例#4
0
    def __init__(
            self,
            input_features_def,
            combiner_def,
            output_features_def,
            random_seed=None,
    ):
        # Deep copy to prevent TensorFlow from hijacking the dicts within the config and
        # transforming them into _DictWrapper classes, which are not JSON serializable.
        self._input_features_df = copy.deepcopy(input_features_def)
        self._combiner_def = copy.deepcopy(combiner_def)
        self._output_features_df = copy.deepcopy(output_features_def)

        self._random_seed = random_seed

        if random_seed is not None:
            tf.random.set_seed(random_seed)

        super().__init__()

        # ================ Inputs ================
        self.input_features = build_inputs(
            input_features_def
        )

        # ================ Combiner ================
        logger.debug('Combiner {}'.format(combiner_def[TYPE]))
        combiner_class = get_combiner_class(combiner_def[TYPE])
        self.combiner = combiner_class(
            input_features=self.input_features,
            **combiner_def,
        )

        # ================ Outputs ================
        self.output_features = build_outputs(
            output_features_def,
            self.combiner
        )

        # ================ Combined loss metric ================
        self.eval_loss_metric = tf.keras.metrics.Mean()

        # After constructing all layers, clear the cache to free up memory
        clear_data_cache()