예제 #1
0
파일: vgg16.py 프로젝트: slaclab/psmlearn
    def gbprop_op_pool5(self):
        if self._gbprop_pool5_op is None:
            relus = [op for op in self.after_relus]
            relus.pop()  # fc2
            relus.pop()  # fc1

            ## check that it is the relu before the max pooling for pool5
            assert relus[-1].get_shape().as_list() == [
                None, 14, 14, 512
            ], "whoops! relus[-1]=%s does not have shape [None,14,14,512]" % self.relus[
                -1]

            yy = self.pool5
            grad_ys = self._pl_pool5

            while len(relus):
                xx = relus.pop()
                dyy_xx = tf.gradients(ys=yy, xs=xx, grad_ys=grad_ys)[0]
                grad_ys = tf.nn.relu(dyy_xx)
                print(grad_ys)
                print(xx)
                print(dyy_xx)
                print(yy)
                yy = xx
            self._gbprop_pool5_op = tf.gradients(ys=yy,
                                                 xs=self.imgs,
                                                 grad_ys=grad_ys)[0]
        return self._gbprop_pool5_op, self._pl_pool5
예제 #2
0
def gbprop_op(relus, imgs_pl, logits, logits_pl):

    yy = logits
    grad_ys = logits_pl

    idx = len(relus) - 1
    while idx >= 0:
        xx = relus[idx]
        dyy_xx = tf.gradients(ys=yy, xs=xx, grad_ys=grad_ys)[0]
        grad_ys = tf.nn.relu(dyy_xx)
        yy = xx
        idx -= 1

    gbprop_op = tf.gradients(ys=yy, xs=imgs_pl, grad_ys=grad_ys)[0]
    return gbprop_op
예제 #3
0
def relprop_from_conv(X,
                      Y,
                      R,
                      rule,
                      K,
                      strides,
                      padding='SAME',
                      data_format="NHWC",
                      name=''):
    X2 = tf.mul(X, X)
    X2 *= 0.5
    X2conv = tf.nn.conv2d(X2, K, strides, padding, data_format=data_format)
    if Y is None:
        Y = tf.nn.conv2d(X, K, strides, padding, data_format=data_format)
    if rule.isBeta():
        raise Exception("relprop_from_conv not implemented for beta rule")
    if rule.isNaive():
        grad_ys = R / Y
    elif rule.isEps():
        Ynorm = Y + rule.eps * tf.sign(Y)
        grad_ys = R / Ynorm
    res = tf.gradients(X2conv, X, grad_ys)[0][0]
    print("nm=%s relprop_from_conv(X=%s,R=%s)=%s" %
          (name, X.get_shape(), R.get_shape(), res.get_shape()))
    return res
예제 #4
0
파일: vgg16.py 프로젝트: slaclab/psmlearn
 def saliency_op_pool5(self):
     if self._saliency_pool5_op is None:
         op = tf.gradients(ys=self.pool5,
                           xs=self.imgs,
                           grad_ys=self._pl_pool5)[0]
         print(op)
         self._saliency_pool5_op = op
     return self._saliency_pool5_op, self._pl_pool5
예제 #5
0
def relprop_from_max(max_input, max_output, R, name=''):
    assert max_input.get_shape()[0].value == None, "expect batch ops"
    assert max_output.get_shape()[0].value == None, "expect batch ops"
    #    assert max_output.get_shape()[1:]==R.get_shape(), "max_output.shape=%r != R.shape=%r" % (max_output.get_shape()[1:], R.get_shape())
    assert R.get_shape()[0] == 1, "expected R[0]==1, but R=%s" % R
    res = tf.gradients(max_output, max_input, R)[0][0]
    print("nm=%s relprop_from_max(%s,%s,%s)=%s" %
          (name, max_output.get_shape(), max_input.get_shape(), R.get_shape(),
           res.get_shape()))
    return res
예제 #6
0
 def __init__(self, model):
     self.model = model
     self.sess = model.sess
     self.img_processed = model.X_processed
     self.img_raw = model.X_placeholder
     self.logit2bprop = {}
     self.logit2gbprop = {}
     g = tf.get_default_graph()
     X = self.img_processed
     for logit in range(model.nnet.logits.get_shape()[1]):
         fn = model.nnet.logits[:, logit]
         self.logit2bprop[logit] = tf.gradients(fn, self.img_processed)[0]
         self.logit2gbprop[logit] = guided_backprop_op(
             fn, model.nnet.after_relus, X)
예제 #7
0
def guided_backprop_op(fn, relus, X):
    assert len(relus) > 0, "no relus"
    oplist = [X] + [op for op in relus]
    next_relu = oplist.pop()
    Dafter = tf.gradients(fn, next_relu)[0]
    Dafter_thresh = tf.to_float(Dafter < 0.0) * Dafter
    print(next_relu)
    while len(oplist):
        last_relu = next_relu
        next_relu = oplist.pop()
        print(next_relu)
        try:
            Dafter = tf.gradients(last_relu, next_relu,
                                  grad_ys=Dafter_thresh)[0]
        except:
            print("tf.gradients failed for:\n  last_relu=%s\n  next_relu=%s" %
                  (last_relu, next_relu))
            import IPython
            IPython.embed()
        Dafter_thresh = tf.to_float(Dafter < 0.0) * Dafter
        if Dafter_thresh.get_shape()[0] == 1:
            Dafter_thresh = tf.squeeze(Dafter_thresh, [0])
    return Dafter