Пример #1
0
    def _fit_single(self, model, X, y, masked_data=None):
        Validation.check_dataset(X, y)

        if len(X) != 0:
            # Pre-compute target outputs if none are passed.
            if masked_data is None:
                output_dim = Validation.get_output_dimension(y)
                masked_data = self.masking_operation.get_predictions_after_masking(self.explained_model, X, y,
                                                                                   batch_size=
                                                                                   self.model_builder.batch_size,
                                                                                   downsample_factors=
                                                                                   self.downsample_factors,
                                                                                   flatten=
                                                                                   self.flatten_for_explained_model)

                masked_data = TensorflowCXPlain._clean_output_dims(output_dim, masked_data)

            self.last_masked_data = masked_data

            if self.model_filepath is None:
                from tempfile import NamedTemporaryFile
                model_filepath = NamedTemporaryFile(delete=False).name
            else:
                model_filepath = self.model_filepath

            self.last_history = self.model_builder.fit(model, masked_data, y, model_filepath)
        return self
    def get_predictions_after_masking(self, explained_model, X, y, downsample_factors=(1,), batch_size=64,
                                      flatten=False):
        Validation.check_dataset(X, y)

        num_batches = int(np.ceil(len(X) / float(batch_size)))

        all_outputs = []
        for batch_idx in range(num_batches):
            x_batch = X[batch_idx*batch_size:(batch_idx+1)*batch_size]
            y_pred = MaskingUtil.get_prediction(explained_model, x_batch, flatten=flatten)

            x_imputed = []
            for x_i in x_batch:
                x_curr = []
                for j in range(len(x_i)):
                    x_i_imputed_j = np.concatenate([x_i[:j], x_i[j+1:]], axis=0)  # Drop entry at each index j.
                    x_curr.append(x_i_imputed_j)
                x_imputed.append(x_curr)

            all_y_pred_imputed = []
            for j, x_imputed_curr in enumerate(x_imputed):
                if len(x_imputed_curr) == 0:
                    y_pred_imputed = y_pred[j].reshape((1, -1))
                else:
                    y_pred_imputed = MaskingUtil.get_prediction(explained_model, x_imputed_curr, flatten=flatten)
                all_y_pred_imputed.append(y_pred_imputed)

            all_outputs.append((x_batch, y_pred, all_y_pred_imputed))

        all_outputs = [np.concatenate(list(map(partial(lambda x, dim: x[dim], dim=dim), all_outputs)))
                       for dim in range(len(all_outputs[0]))]

        return all_outputs
Пример #3
0
    def _build_model(self, X, y):
        Validation.check_dataset(X, y)

        if Validation.is_variable_length(X):
            raise ValueError("Variable length inputs to CXPlain are currently not supported.")

        n, p = Validation.get_input_dimension(X)
        output_dim = Validation.get_output_dimension(y)

        if self.model is None:
            if self.num_models == 1:
                build_fun = self._build_single
            else:
                build_fun = self._build_ensemble

            self.model, self.prediction_model = build_fun(input_dim=p, output_dim=output_dim)
Пример #4
0
    def get_predictions_after_masking(self,
                                      explained_model,
                                      X,
                                      y,
                                      downsample_factors=(1, ),
                                      batch_size=64,
                                      flatten=False):
        Validation.check_dataset(X, y)

        num_batches = int(np.ceil(len(X) / float(batch_size)))

        all_outputs = []
        for batch_idx in range(num_batches):
            x = X[batch_idx * batch_size:(batch_idx + 1) * batch_size]
            y_pred = MaskingUtil.get_prediction(explained_model,
                                                x,
                                                flatten=flatten)
            x_imputed = MaskingUtil.get_x_imputed(x,
                                                  downsample_factors,
                                                  math_ops=NumpyInterface)

            all_y_pred_imputed = []
            for x_imputed_curr in x_imputed:
                y_pred_imputed = MaskingUtil.get_prediction(explained_model,
                                                            x_imputed_curr,
                                                            flatten=flatten)
                all_y_pred_imputed.append(y_pred_imputed)

            all_y_pred_imputed = np.stack(all_y_pred_imputed).swapaxes(0, 1)

            all_outputs.append((x, y_pred, all_y_pred_imputed))

        all_outputs = [
            np.concatenate(
                list(map(partial(lambda x, dim: x[dim], dim=dim),
                         all_outputs))) for dim in range(len(all_outputs[0]))
        ]
        return all_outputs