Exemplo n.º 1
0
def create_kern(ps):
    if ps['seed'] == 1234:
        return dkern.DeepKernel(
            [1, 28, 28],
            filter_sizes=[[5, 5], [2, 2], [5, 5], [2, 2]],
            recurse_kern=dkern.ExReLU(multiply_by_sqrt2=True),
            var_weight=1.,
            var_bias=1.,
            padding=["VALID", "SAME", "VALID", "SAME"],
            strides=[[1, 1]] * 4,
            data_format="NCHW",
            skip_freq=-1,
        )

    if 'skip_freq' not in ps:
        ps['skip_freq'] = -1
    if ps['nlin'] == 'ExReLU':
        recurse_kern = dkern.ExReLU(multiply_by_sqrt2=True)
    else:
        recurse_kern = dkern.ExErf()
    return dkern.DeepKernel(
        [1, 28, 28],
        filter_sizes=[[ps['filter_sizes'], ps['filter_sizes']]] * ps['n_layers'],
        recurse_kern=recurse_kern,
        var_weight=ps['var_weight'],
        var_bias=ps['var_bias'],
        padding=ps['padding'],
        strides=[[ps['strides'], ps['strides']]] * ps['n_layers'],
        data_format="NCHW",
        skip_freq=ps['skip_freq'],
    )
Exemplo n.º 2
0
    def test_matches_old_dkern(self, D=7):
        with self.test_context() as sess:
            for L in [0, 1, 3, 6]:  # , 3, 6]:
                shape = [3, D, D]
                ka = ck.DeepKernel(
                    shape,
                    [[3, 3]] * L,
                    ck.ExReLU(),
                    var_weight=1.2,
                    var_bias=0.8,
                    padding="SAME",  # strides=1,
                    data_format="NCHW")
                kb = ck.dkern.DeepKernelTesting(shape,
                                                block_sizes=[-1] * L,
                                                block_strides=[-1] * L,
                                                kernel_size=3,
                                                recurse_kern=ck.ExReLU(),
                                                var_weight=1.2,
                                                var_bias=0.8)

                x = tf.constant(np.random.randn(12, *shape),
                                dtype=settings.float_type)
                x2 = tf.constant(np.random.randn(15, *shape),
                                 dtype=settings.float_type)

                Kxx = [ka.K(x), kb.K(x)]
                Kxx2 = [ka.K(x, x2), kb.K(x, x2)]
                Kxdiag = [ka.Kdiag(x), kb.Kdiag(x)]

                self.assertAllClose(*sess.run(Kxx))
                self.assertAllClose(*sess.run(Kxx2))
                self.assertAllClose(*sess.run(Kxdiag))
Exemplo n.º 3
0
def kernel_matrix(X,
                  X2=None,
                  image_size=28,
                  number_channels=1,
                  filter_sizes=[[5, 5], [2, 2], [5, 5], [2, 2]],
                  padding=["VALID", "SAME", "VALID", "SAME"],
                  strides=[[1, 1]] * 4,
                  sigmaw=1.0,
                  sigmab=1.0,
                  n_gpus=1):
    with tf.device("cpu:0"):
        kern = dkern.DeepKernel(
            #[number_channels, image_size, image_size],
            ([number_channels, image_size, image_size]
             if n_gpus > 0 else [image_size, image_size, number_channels]),
            filter_sizes=filter_sizes,
            recurse_kern=dkern.ExReLU(multiply_by_sqrt2=False),
            var_weight=sigmaw**2,
            var_bias=sigmab**2,
            padding=padding,
            strides=strides,
            #data_format="NCHW",
            data_format=(
                "NCHW" if n_gpus > 0 else "NHWC"
            ),  #but don't need to change inputs dkern transposes the inputs itself apparently :P
            skip_freq=-1,  # no residual connections
        )

    # kern

    # N_train=20000; N_vali=1000
    # X, Y, Xv, _, Xt, _ = mnist_1hot_all()
    # # Xv = np.concatenate([X[N_train:, :], Xv], axis=0)[:N_vali, :]
    # X = X[:N_train]
    # Y = Y[:N_train]
    #
    # Y.shape
    #
    # ys = [int((np.argmax(labels)>5))*2.0-1 for labels in Y]

    # sess.close()
    sess = gpflow.get_default_session()

    K = compute_big_K(sess, kern, 400, X, X2, n_gpus=n_gpus)
    sess.close()
    return K
Exemplo n.º 4
0
    def test_equivalent_BNN(self, L=1, n_random_tests=4):
        s = settings.get_settings()
        s.dtypes.float_type = 'float32'
        with self.test_context() as sess, settings.temp_settings(s):
            shape = [3, 12, 10]
            X = tf.ones([1] + shape, dtype=settings.float_type)
            kb = ck.DeepKernel(shape, [[3, 3]] * L,
                               ck.ExReLU(),
                               var_weight=1.2,
                               var_bias=0.8,
                               data_format="NCHW")
            tf_y_bnn = kb.equivalent_BNN(X, n_samples=2, n_filters=7)
            W0, b0 = (list(t[0] for t in t_list) for t_list in [kb._W, kb._b])
            W1, b1 = (list(t[1] for t in t_list) for t_list in [kb._W, kb._b])
            tf_y0 = cnn_from_params(W0, b0, kb, X)
            tf_y1 = cnn_from_params(W1, b1, kb, X)

            for _ in range(n_random_tests):
                y_bnn, y0, y1 = sess.run([tf_y_bnn, tf_y0, tf_y1])
                self.assertAllClose(y_bnn[0:1], y0)
                self.assertAllClose(y_bnn[1:2], y1)
Exemplo n.º 5
0
    def test_matches_relu_fc(self, D=7):
        with self.test_context() as sess:
            for L in [0, 1, 3, 6]:
                ka = DeepArcCosine(D, L, variance=1.2, bias_variance=0.8)
                kb = ck.DeepKernel([D], [[]] * L,
                                   ck.ExReLU(),
                                   var_weight=1.2,
                                   var_bias=0.8,
                                   data_format="NC")
                x = tf.constant(np.random.randn(12, D),
                                dtype=settings.float_type)
                x2 = tf.constant(np.random.randn(15, D),
                                 dtype=settings.float_type)

                Kxx = [ka.K(x), kb.K(x)]
                Kxx2 = [ka.K(x, x2), kb.K(x, x2)]
                Kxdiag = [ka.Kdiag(x), kb.Kdiag(x)]

                self.assertAllClose(*sess.run(Kxx))
                self.assertAllClose(*sess.run(Kxx2))
                self.assertAllClose(*sess.run(Kxdiag))
Exemplo n.º 6
0
    @gpflow.decors.params_as_tensors
    def _build_likelihood(self):
        # Get around fast_1sample_equivalent_BNN not getting tensors from param
        Ws_tensors = list(self.Ws[i] for i in range(len(self.Ws)))
        bs_tensors = list(self.bs[i] for i in range(len(self.bs)))
        logits = self._kern.fast_1sample_equivalent_BNN(
            tf.reshape(self.X, [-1] + self._kern.input_shape),
            Ws=Ws_tensors, bs=bs_tensors)
        return tf.losses.sparse_softmax_cross_entropy(labels=self.Y, logits=logits)

    @gpflow.decors.autoflow((settings.float_type, [None, None]))
    def predict_y(self, Xnew):
        return self._build_predict_y(Xnew), tf.constant(0.0, dtype=settings.float_type)

    @gpflow.decors.params_as_tensors
    def _build_predict_y(self, Xnew):
        Ws_tensors = list(self.Ws[i] for i in range(len(self.Ws)))
        bs_tensors = list(self.bs[i] for i in range(len(self.bs)))
        logits = self._kern.fast_1sample_equivalent_BNN(
            tf.reshape(Xnew, [-1] + self._kern.input_shape),
            Ws=Ws_tensors, bs=bs_tensors)
        return tf.nn.softmax(logits)


if __name__ == '__main__':
    import deep_ckern as dk, numpy as np
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    k = dk.DeepKernel([1, 16, 16], [[3, 3]]*5, dk.ExReLU(),[1,3])
    X = np.random.randn(3, 16**2)
    k.compute_K(X, X)