Exemplo n.º 1
0
def run(i):
    msg = "CF_FAKE.run(%s): OK." % i
    print("python: " + msg)
    #sys.stdout.flush()



    model_nt3 = tf.keras.models.load_model('/gpfs/alpine/med106/scratch/jain/Scratch/explainers/nt3.autosave.model')
    with open('/gpfs/alpine/med106/scratch/jain/Scratch/explainers/nt3.autosave.data.pkl', 'rb') as pickle_file:
            X_train,Y_train,X_test,Y_test = pickle.load(pickle_file)

    print ("opened files")
    shape_cf = (1,) + X_train.shape[1:]
    print(shape_cf)
    target_proba = 0.9
    tol = 0.1 # want counterfactuals with p(class)>0.90
    target_class = 'other' # any class other than will do
    max_iter = 1000
    lam_init = 1e-1
    max_lam_steps = 20
    learning_rate_init = 0.1
    feature_range = (0,1)


    cf = CounterFactual(model_nt3, shape=shape_cf, target_proba=target_proba, tol=tol,
                        target_class=target_class, max_iter=max_iter, lam_init=lam_init,
                        max_lam_steps=max_lam_steps, learning_rate_init=learning_rate_init,
                        feature_range=feature_range)
        

    bunch = 10
    results=[]
    for j in range(bunch*(i), bunch*(i+1)):
       shape = X_train[0].shape[0]
       X = np.concatenate([X_train,X_test])
       x_sample=X[j+1:j+2]
       print(x_sample.shape)
       start = time()
       explanation = cf.explain(x_sample)
      #print('Counterfactual prediction: {}, {}'.format(explanation.cf['class'], explanation.cf['proba']))
      #print("Actual prediction: {}".format(model_nt3.predict(x_sample)))
       results.append([explanation.cf['X'],explanation.cf['class'], explanation.cf['proba']])
       print("DONE sample=", j)
   
    filename = "save.p" + str(i)
    pickle.dump(results, open(filename, "wb"))
    results=[]


    return msg
Exemplo n.º 2
0
def setup_cf(args):
    (x_train, y_train), (x_test, y_test) = mnist_data()
    model = load_model(args.model)
    X = x_test[0].reshape((1, ) + x_test[0].shape)

    shape = X.shape
    target_proba = 1.0
    tol = 0.01  # want counterfactuals with p(class)>0.99
    target_class = 'other'  # any class other than 7 will do
    max_iter = 1000
    lam_init = 1e-1
    max_lam_steps = 10
    learning_rate_init = 0.1
    feature_range = (x_train.min(), x_train.max())

    cf = CounterFactual(model,
                        shape=shape,
                        target_proba=target_proba,
                        tol=tol,
                        target_class=target_class,
                        max_iter=max_iter,
                        lam_init=lam_init,
                        max_lam_steps=max_lam_steps,
                        learning_rate_init=learning_rate_init,
                        feature_range=feature_range)
    return cf, X
Exemplo n.º 3
0
def keras_mnist_cf_explainer(request, models):
    cf_explainer = CounterFactual(predict_fn=models[0], shape=(1, 28, 28, 1),
                                  target_class=request.param, lam_init=1e-1, max_iter=1000,
                                  max_lam_steps=10)
    yield models[0], cf_explainer
    keras.backend.clear_session()
    tf.keras.backend.clear_session()
Exemplo n.º 4
0
def keras_mnist_cf_explainer(request, keras_logistic_mnist):
    X, y, model = keras_logistic_mnist
    cf_explainer = CounterFactual(predict_fn=model,
                                  shape=(1, 784),
                                  target_class=request.param,
                                  lam_init=1e-1,
                                  max_iter=1000,
                                  max_lam_steps=10)
    return X, y, model, cf_explainer
Exemplo n.º 5
0
def cf_iris_explainer(request, logistic_iris):
    X, y, lr = logistic_iris
    predict_fn = lr.predict_proba
    cf_explainer = CounterFactual(predict_fn=predict_fn, shape=(1, 4),
                                  target_class=request.param, lam_init=1e-1, max_iter=1000,
                                  max_lam_steps=10)

    yield X, y, lr, cf_explainer
    keras.backend.clear_session()
    tf.keras.backend.clear_session()
Exemplo n.º 6
0
def cf_iris_explainer(request, logistic_iris):
    X, y, lr = logistic_iris
    predict_fn = lr.predict_proba
    cf_explainer = CounterFactual(predict_fn=predict_fn,
                                  shape=(1, 4),
                                  target_class=request.param,
                                  lam_init=1e-1,
                                  max_iter=1000,
                                  max_lam_steps=10)

    return X, y, lr, cf_explainer
Exemplo n.º 7
0
def keras_mnist_cf_explainer(request, keras_logistic_mnist):
    X, y, model = keras_logistic_mnist
    cf_explainer = CounterFactual(predict_fn=model,
                                  shape=(1, 784),
                                  target_class=request.param,
                                  lam_init=1e-1,
                                  max_iter=1000,
                                  max_lam_steps=10)
    yield X, y, model, cf_explainer
    keras.backend.clear_session()
    tf.keras.backend.clear_session()
Exemplo n.º 8
0
def tf_keras_mnist_explainer(request, tf_keras_logistic_mnist):
    X, y, model = tf_keras_logistic_mnist
    sess = K.get_session()

    cf_explainer = CounterFactual(sess=sess,
                                  predict_fn=model,
                                  shape=(1, 784),
                                  target_class=request.param,
                                  lam_init=1e-1,
                                  max_iter=1000,
                                  max_lam_steps=10)
    yield X, y, model, cf_explainer
Exemplo n.º 9
0
def iris_explainer(request, logistic_iris):
    X, y, lr = logistic_iris
    predict_fn = lr.predict_proba
    sess = tf.Session()
    cf_explainer = CounterFactual(sess=sess,
                                  predict_fn=predict_fn,
                                  shape=(1, 4),
                                  target_class=request.param,
                                  lam_init=1e-1,
                                  max_iter=1000,
                                  max_lam_steps=10)

    yield X, y, lr, cf_explainer
    tf.reset_default_graph()
    sess.close()
Exemplo n.º 10
0
    def __init__(
        self,
        model,
        shape,
        distance_fn='l1',
        target_proba=1.0,
        target_class='other',
        max_iter=1000,
        early_stop=50,
        lam_init=1e-1,
        max_lam_steps=10,
        tol=0.05,
        learning_rate_init=0.1,
        feature_range=(-1e10, 1e10),
        eps=0.01,  # feature-wise epsilons
        init='identity',
        decay=True,
        write_dir=None,
        debug=False,
        sess=None
    ):
        """Constructor.

        References:
            Counterfactual explanation implementation, parameter docstrings and defaults adapted from:
            https://github.com/SeldonIO/alibi/blob/14804f07457da881a5f70ccff2dcbfed2378b860/alibi/explainers/counterfactual.py#L85  # noqa

        The major difference in the constructor signature is that model and
        ae_model shoud be instances of depictions BaseModel
        instead of type Union[Callable, tf.keras.Model, 'keras.Model']

        Args:
            model (BaseModel): Instance implementing predict method returning
                class probabilities that is passed to the explainer
                implementation.
            shape (tuple): Shape of input data starting with batch size.
            distance_fn (str, optional): Distance function to use in the loss term. Defaults to 'l1'.
            target_proba (float, optional): Target probability for the counterfactual to reach. Defaults to 1.0.
            target_class (Union[str, int], optional): Target class for the counterfactual to reach, one of 'other',
                'same' or an integer denoting desired class membership for the counterfactual instance. Defaults to 'other'.
            max_iter (int, optional): Maximum number of interations to run the gradient descent for (inner loop).
                Defaults to 1000.
            early_stop (int, optional): Number of steps after which to terminate gradient descent if all or none of found
                instances are solutions. Defaults to 50.
            lam_init (float, optional): Initial regularization constant for the prediction part of the Wachter loss.
                Defaults to 1e-1.
            max_lam_steps (int, optional): Maximum number of times to adjust the regularization constant (outer loop)
                before terminating the search. Defaults to 10.
            tol (float, optional): Tolerance for the counterfactual target probability. Defaults to 0.05.
            learning_rate_init (float, optional): Initial learning rate for each outer loop of lambda. Defaults to 0.1.
            feature_range (Union[Tuple, str], optional): Tuple with min and max ranges to allow for perturbed instances.
                Min and max ranges can be floats or numpy arrays with dimension (1 x nb of features)
                for feature-wise ranges. Defaults to (-1e10, 1e10).
            eps (Union[float, np.ndarray], optional): Gradient step sizes used in calculating numerical gradients,
                defaults to a single value for all features, but can be passed an array for
                feature-wise step sizes. Defaults to 0.01.
            init (str): Initialization method for the search of counterfactuals, currently must be 'identity'.
            decay (bool, optional): Flag to decay learning rate to zero for each outer loop over lambda.
                Defaults to True.
            write_dir (str, optional): Directory to write Tensorboard files to. Defaults to None.
            debug (bool, optional): Flag to write Tensorboard summaries for debugging. Defaults to False.
            sess (tf.compat.v1.Session, optional): Optional Tensorflow session that will be used if passed
                instead of creating or inferring one internally. Defaults to None.
        """
        super().__init__(model)

        self.explainer = CounterFactual(
            model.predict, shape, distance_fn, target_proba, target_class,
            max_iter, early_stop, lam_init, max_lam_steps, tol,
            learning_rate_init, feature_range, eps, init, decay, write_dir,
            debug, sess
        )
Exemplo n.º 11
0
shape_cf = (1, ) + X_train.shape[1:]
print(shape_cf)
target_proba = 0.9
tol = 0.1  # want counterfactuals with p(class)>0.90
target_class = 'other'  # any class other than will do
max_iter = 1000
lam_init = 1e-1
max_lam_steps = 20
learning_rate_init = 0.1
feature_range = (0, 1)

cf = CounterFactual(model_nt3,
                    shape=shape_cf,
                    target_proba=target_proba,
                    tol=tol,
                    target_class=target_class,
                    max_iter=max_iter,
                    lam_init=lam_init,
                    max_lam_steps=max_lam_steps,
                    learning_rate_init=learning_rate_init,
                    feature_range=feature_range)

shape = X_train[0].shape[0]
results = []

X = np.concatenate([X_train, X_test])
print(X_train.shape[0], X_test.shape[0], X.shape[0], "-x shape 0")
for i in np.arange(0, X.shape[0]):
    x_sample = X[i:i + 1]
    print(x_sample.shape)
    start = time()
    explanation = cf.explain(x_sample)