Ejemplo n.º 1
0
 def load(self, filename):
     log.info("Reading %s\n" % filename)
     state_dict = torch.load(filename)  # added map_location
     self.epoch = state_dict["epoch"]
     self.lr = state_dict["lr"]
     self.model = state_dict["model"]
     self.optimizer = state_dict["optimizer"]
     log.info("restoring epoch: %d, lr: %f" % (self.epoch, self.lr))
     return self.epoch
Ejemplo n.º 2
0
 def create_net(self):
     torch.manual_seed(42)
     torch.cuda.manual_seed(42)
     self.model = VAE_gauss_cat(self.input_dim_vec, self.width, self.depth,
                                self.latent_dim, self.pred_sig)
     if self.cuda:
         self.model = self.model.cuda()
         cudnn.benchmark = True
     log.info("Total params: %.2fM" %
              (self.get_nb_parameters() / 1000000.0))
Ejemplo n.º 3
0
 def save(self, filename):
     log.info("Writting %s\n" % filename)
     torch.save(
         {
             "epoch": self.epoch,
             "lr": self.lr,
             "model": self.model,
             "optimizer": self.optimizer,
         },
         filename,
     )
Ejemplo n.º 4
0
    def __init__(
        self,
        input_dim_vec,
        width,
        depth,
        latent_dim,
        pred_sig=False,
        lr=1e-3,
        cuda=True,
        flatten=True,
    ):
        super(VAE_gauss_cat_net, self).__init__()
        log.info("VAE_gauss_net")

        self.cuda = cuda
        self.input_dim = 0
        self.input_dim_vec = input_dim_vec
        for e in self.input_dim_vec:
            self.input_dim += e
        self.flatten = flatten
        if not self.flatten:
            pass

        self.width = width
        self.depth = depth
        self.latent_dim = latent_dim
        self.lr = lr
        self.pred_sig = pred_sig

        self.create_net()
        self.create_opt()
        self.epoch = 0
        self.schedule = None

        if self.cuda:
            self.prior = self.prior = Normal(
                loc=torch.zeros(latent_dim).cuda(),
                scale=torch.ones(latent_dim).cuda())
        else:
            self.prior = Normal(loc=torch.zeros(latent_dim),
                                scale=torch.ones(latent_dim))
        self.vlb_scale = 1 / len(
            self.input_dim_vec
        )  # scale for dimensions of input so we can use same LR always
Ejemplo n.º 5
0
def wachter_recourse(
    torch_model,
    x: np.ndarray,
    cat_feature_indices: List[int],
    binary_cat_features: bool,
    feature_costs: Optional[List[float]],
    lr: float,
    lambda_param: float,
    y_target: List[int],
    n_iter: int,
    t_max_min: float,
    norm: int,
    clamp: bool,
    loss_type: str,
) -> np.ndarray:
    """
    Generates counterfactual example according to Wachter et.al for input instance x

    Parameters
    ----------
    torch_model:
        black-box-model to discover
    x:
        Factual instance to explain.
    cat_feature_indices:
        List of positions of categorical features in x.
    binary_cat_features:
        If true, the encoding of x is done by drop_if_binary.
    feature_costs:
        List with costs per feature.
    lr:
        Learning rate for gradient descent.
    lambda_param:
        Weight factor for feature_cost.
    y_target:
        Tuple of class probabilities (BCE loss) or [Float] for logit score (MSE loss).
    n_iter:
        Maximum number of iterations.
    t_max_min:
        Maximum time amount of search.
    norm:
        L-norm to calculate cost.
    clamp:
        If true, feature values will be clamped to intverval [0, 1].
    loss_type:
        String for loss function ("MSE" or "BCE").

    Returns
    -------
    Counterfactual example as np.ndarray
    """
    device = "cuda" if torch.cuda.is_available() else "cpu"
    # returns counterfactual instance
    torch.manual_seed(0)

    if feature_costs is not None:
        feature_costs = torch.from_numpy(feature_costs).float().to(device)

    x = torch.from_numpy(x).float().to(device)
    y_target = torch.tensor(y_target).float().to(device)
    lamb = torch.tensor(lambda_param).float().to(device)
    # x_new is used for gradient search in optimizing process
    x_new = Variable(x.clone(), requires_grad=True)
    # x_new_enc is a copy of x_new with reconstructed encoding constraints of x_new
    # such that categorical data is either 0 or 1
    x_new_enc = reconstruct_encoding_constraints(x_new, cat_feature_indices,
                                                 binary_cat_features)

    optimizer = optim.Adam([x_new], lr, amsgrad=True)

    if loss_type == "MSE":
        if len(y_target) != 1:
            raise ValueError(
                f"y_target {y_target} is not a single logit score")

        # If logit is above 0.0 we want class 1, else class 0
        target_class = int(y_target[0] > 0.0)
        loss_fn = torch.nn.MSELoss()
    elif loss_type == "BCE":
        if y_target[0] + y_target[1] != 1.0:
            raise ValueError(
                f"y_target {y_target} does not contain 2 valid class probabilities"
            )

        # [0, 1] for class 1, [1, 0] for class 0
        # target is the class probability of class 1
        # target_class is the class with the highest probability
        target_class = torch.round(y_target[1]).int()
        loss_fn = torch.nn.BCELoss()
    else:
        raise ValueError(f"loss_type {loss_type} not supported")

    # get the probablity of the target class
    f_x_new = torch_model(x_new)[:, target_class]

    t0 = datetime.datetime.now()
    t_max = datetime.timedelta(minutes=t_max_min)
    while f_x_new <= DECISION_THRESHOLD:
        it = 0
        while f_x_new <= 0.5 and it < n_iter:
            optimizer.zero_grad()
            x_new_enc = reconstruct_encoding_constraints(
                x_new, cat_feature_indices, binary_cat_features)
            # use x_new_enc for prediction results to ensure constraints
            # get the probablity of the target class
            f_x_new = torch_model(x_new_enc)[:, target_class]

            if loss_type == "MSE":
                # single logit score for the target class for MSE loss
                f_x_loss = torch.log(f_x_new / (1 - f_x_new))
            elif loss_type == "BCE":
                # tuple output for BCE loss
                f_x_loss = torch_model(x_new_enc).squeeze(axis=0)
            else:
                raise ValueError(f"loss_type {loss_type} not supported")

            cost = (torch.dist(x_new_enc, x, norm) if feature_costs is None
                    else torch.norm(feature_costs * (x_new_enc - x), norm))

            loss = loss_fn(f_x_loss, y_target) + lamb * cost
            loss.backward()
            optimizer.step()
            # clamp potential CF
            if clamp:
                x_new.clone().clamp_(0, 1)
            it += 1
        lamb -= 0.05

        if datetime.datetime.now() - t0 > t_max:
            log.info("Timeout - No Counterfactual Explanation Found")
            break
        elif f_x_new >= 0.5:
            log.info("Counterfactual Explanation Found")
    return x_new_enc.cpu().detach().numpy().squeeze(axis=0)
Ejemplo n.º 6
0
    def get_counterfactuals(self, factuals: pd.DataFrame) -> pd.DataFrame:
        cfs = []
        coeffs = self._coeffs
        intercepts = self._intercepts
        action_set = self.action_set

        # to keep matching indexes for iterrows and coeffs
        factuals = factuals.reset_index()
        factuals = self._mlmodel.get_ordered_features(factuals)

        # Check if we need lime to build coefficients
        if (coeffs is None) and (intercepts is None):
            log.info("Start generating LIME coefficients")
            coeffs, intercepts = self._get_lime_coefficients(factuals)
            log.info("Finished generating LIME coefficients")
        else:
            # Local explanations via LIME generate coeffs and intercepts per instance, while global explanations
            # via input parameter need to be set into correct shape [num_of_instances, num_of_features]
            coeffs = np.vstack([self._coeffs] * factuals.shape[0])
            intercepts = np.vstack([self._intercepts] * factuals.shape[0]).squeeze(
                axis=1
            )

        # generate counterfactuals
        for index, row in factuals.iterrows():
            # asserts are essential for mypy typechecking
            assert coeffs is not None
            assert intercepts is not None
            factual_enc_norm = row.values
            coeff = coeffs[index]
            intercept = intercepts[index]

            # Default counterfactual value if no action flips the prediction
            target_shape = factual_enc_norm.shape[0]
            empty = np.empty(target_shape)
            empty[:] = np.nan
            counterfactual = empty

            # Align action set to coefficients
            action_set.set_alignment(coefficients=coeff)

            # Build AR flipset
            fs = rs.Flipset(
                x=factual_enc_norm,
                action_set=action_set,
                coefficients=coeff,
                intercept=intercept,
            )
            try:
                fs_pop = fs.populate(total_items=self._fs_size)
            except (ValueError, KeyError):
                log.warning(
                    "Actionable Recourse is not able to produce a counterfactual explanation for instance {}".format(
                        index
                    )
                )
                log.warning(row.values)
                cfs.append(counterfactual)
                continue

            # Get actions to flip predictions
            actions = fs_pop.actions

            for action in actions:
                candidate_cf = (factual_enc_norm + action).reshape(
                    (1, -1)
                )  # Reshape to keep two-dim. input
                # Check if candidate counterfactual really flipps the prediction of ML model
                pred_cf = np.argmax(self._mlmodel.predict_proba(candidate_cf))
                pred_f = np.argmax(
                    self._mlmodel.predict_proba(factual_enc_norm.reshape((1, -1)))
                )
                if pred_cf != pred_f:
                    counterfactual = candidate_cf.squeeze()
                    break

            cfs.append(counterfactual)

        # Convert output into correct format
        cfs = np.array(cfs)
        df_cfs = pd.DataFrame(cfs, columns=self._mlmodel.feature_input_order)
        df_cfs[self._mlmodel.data.target] = np.argmax(
            self._mlmodel.predict_proba(cfs), axis=1
        )

        df_cfs = check_counterfactuals(self._mlmodel, df_cfs, factuals.index)
        df_cfs = self._mlmodel.get_ordered_features(df_cfs)
        return df_cfs
Ejemplo n.º 7
0
    def _counterfactual_optimization(self, cat_features_indices, device, df_fact):
        # prepare data for optimization steps
        test_loader = torch.utils.data.DataLoader(
            df_fact.values, batch_size=1, shuffle=False
        )

        list_cfs = []
        for query_instance in test_loader:
            query_instance = query_instance.float()

            target = torch.FloatTensor(self._target_class).to(device)
            target_prediction = np.argmax(np.array(self._target_class))

            # encode the mutable features
            z = self.vae.encode(query_instance[:, self.vae.mutable_mask])[0]
            # add the immutable features to the latents
            z = torch.cat([z, query_instance[:, ~self.vae.mutable_mask]], dim=-1)
            z = z.clone().detach().requires_grad_(True)

            if self._optimizer == "adam":
                optim = torch.optim.Adam([z], self._lr)
                # z.requires_grad = True
            else:
                optim = torch.optim.RMSprop([z], self._lr)

            candidate_counterfactuals = []  # all possible counterfactuals
            # distance of the possible counterfactuals from the intial value -
            # considering distance as the loss function (can even change it just the distance)
            candidate_distances = []
            all_loss = []

            for idx in range(self._max_iter):
                cf = self.vae.decode(z)

                # add the immutable features to the reconstruction
                temp = query_instance.clone()
                temp[:, self.vae.mutable_mask] = cf
                cf = temp

                cf = reconstruct_encoding_constraints(
                    cf, cat_features_indices, self._params["binary_cat_features"]
                )
                output = self._mlmodel.predict_proba(cf)[0]
                _, predicted = torch.max(output, 0)

                z.requires_grad = True
                loss = self._compute_loss(cf, query_instance, target)
                all_loss.append(loss)

                if predicted == target_prediction:
                    candidate_counterfactuals.append(
                        cf.cpu().detach().numpy().squeeze(axis=0)
                    )
                    candidate_distances.append(loss.cpu().detach().numpy())

                loss.backward()
                optim.step()
                optim.zero_grad()
                cf.detach_()

            # Choose the nearest counterfactual
            if len(candidate_counterfactuals):
                log.info("Counterfactual found!")
                array_counterfactuals = np.array(candidate_counterfactuals)
                array_distances = np.array(candidate_distances)

                index = np.argmin(array_distances)
                list_cfs.append(array_counterfactuals[index])
            else:
                log.info("No counterfactual found")
                list_cfs.append(query_instance.cpu().detach().numpy().squeeze(axis=0))
        return list_cfs
Ejemplo n.º 8
0
setup = load_setup()

results = pd.DataFrame()

path = args.path

session_models = ["cem", "cem-vae"]
torch_methods = ["cchvae", "clue", "cruds", "wachter", "revise"]
for rm in args.recourse_method:
    backend = "tensorflow"
    if rm in torch_methods:
        backend = "pytorch"
    for data_name in args.dataset:
        dataset = OnlineCatalog(data_name)
        for model_type in args.type:
            log.info("=====================================")
            log.info("Recourse method: {}".format(rm))
            log.info("Dataset: {}".format(data_name))
            log.info("Model type: {}".format(model_type))

            if rm in session_models:
                graph = Graph()
                with graph.as_default():
                    ann_sess = Session()
                    with ann_sess.as_default():
                        mlmodel_sess = MLModelCatalog(dataset, model_type,
                                                      backend)

                        factuals_sess = predict_negative_instances(
                            mlmodel_sess, dataset)
                        factuals_sess = factuals_sess.iloc[:args.
Ejemplo n.º 9
0
 def __init__(self):
     log.info("\nNet:")