Ejemplo n.º 1
0
def test_categorical_likelihood(make_data):
    """Test aboleth with a tf.distributions.Categorical likelihood.

    Since it is a bit of an odd half-multivariate case.
    """
    x, y, _, = make_data
    N, K = x.shape

    # Make two classes (K = 2)
    Y = np.zeros(len(y), dtype=np.int32)
    Y[y[:, 0] > 0] = 1

    layers = ab.stack(
        ab.InputLayer(name='X', n_samples=10),
        lambda X: (X, 0.0)   # Mock a sampling layer, with 2-class output
    )

    nn, reg = layers(X=x.astype(np.float32))
    like = tf.distributions.Categorical(logits=nn)

    ELBO = ab.elbo(like, Y, N, reg)
    MAP = ab.max_posterior(like, Y, reg)

    tc = tf.test.TestCase()
    with tc.test_session():
        tf.global_variables_initializer().run()

        assert like.probs.eval().shape == (10, N, K)
        assert like.prob(Y).eval().shape == (10, N)

        L = ELBO.eval()
        assert np.isscalar(L)

        L = MAP.eval()
        assert np.isscalar(L)
Ejemplo n.º 2
0
def test_stack_multilayer():
    """Test implementation of stack."""
    f = StringMultiLayer("f", ["x", "y"], 10.0)
    g = StringLayer("g", 20.0)
    h = StringLayer("h", 30.0)
    r = ab.stack(f, g, h)
    tc = tf.test.TestCase()
    with tc.test_session():
        phi, loss = r(x="x", y="y")
        assert phi == "h(g(f(x,y)))"
        assert loss.eval() == 60.0
Ejemplo n.º 3
0
def make_graph():
    """Make the requirements for making a simple tf graph."""
    x, Y, X = data()

    layers = ab.stack(
        ab.InputLayer(name='X', n_samples=10),
        lambda X: (X[:, :, 0:1], 0.0)  # Mock a sampling layer
    )
    N = len(x)

    X_ = tf.placeholder(tf.float32, x.shape)
    Y_ = tf.placeholder(tf.float32, Y.shape)
    N_ = tf.placeholder(tf.float32)

    return x, Y, N, X_, Y_, N_, layers
Ejemplo n.º 4
0
def test_categorical_likelihood(make_data, likelihood):
    """Test aboleth with discrete likelihoods.

    Since these are kind of corner cases...
    """
    x, y, _, = make_data
    like, K = likelihood
    N, _ = x.shape

    # Make two classes (K = 2)
    Y = np.zeros(len(y), dtype=np.int32)
    Y[y[:, 0] > 0] = 1

    if K == 1:
        Y = Y[:, np.newaxis]

    X_ = tf.placeholder(tf.float32, x.shape)
    Y_ = tf.placeholder(tf.int32, Y.shape)
    n_samples_ = tf.placeholder(tf.int32)

    layers = ab.stack(
        ab.InputLayer(name='X', n_samples=n_samples_),
        ab.Dense(output_dim=K)
    )

    nn, reg = layers(X=X_)
    like = like(logits=nn)
    log_like = like.log_prob(Y_)
    prob = like.prob(Y_)

    ELBO = ab.elbo(log_like, reg, N)
    MAP = ab.max_posterior(log_like, reg)

    fd = {X_: x, Y_: Y, n_samples_: 10}
    tc = tf.test.TestCase()
    with tc.test_session():
        tf.global_variables_initializer().run()

        assert like.probs.eval(feed_dict=fd).shape == (10, N, K)
        assert prob.eval(feed_dict=fd).shape == (10,) + Y.shape

        L = ELBO.eval(feed_dict=fd)

        L = MAP.eval(feed_dict=fd)
        assert np.isscalar(L)
reg = 0.1

l_samples = 5
p_samples = 5

# Network architecture
net = ab.stack(
    ab.InputLayer(name='X', n_samples=l_samples),  # LSAMPLES,BATCH_SIZE,28*28
    ab.Conv2D(filters=32, kernel_size=(5, 5),
              l2_reg=reg),  # LSAMPLES, BATCH_SIZE, 28, 28, 32
    ab.Activation(h=tf.nn.relu),
    ab.MaxPool2D(pool_size=(2, 2),
                 strides=(2, 2)),  # LSAMPLES, BATCH_SIZE, 14, 14, 32
    ab.Conv2D(filters=64, kernel_size=(5, 5),
              l2_reg=reg),  # LSAMPLES, BATCH_SIZE, 14, 14, 64
    ab.Activation(h=tf.nn.relu),
    ab.MaxPool2D(pool_size=(2, 2),
                 strides=(2, 2)),  # LSAMPLES, BATCH_SIZE, 7, 7, 64
    ab.Flatten(),  # LSAMPLES, BATCH_SIZE, 7*7*64
    ab.Dense(output_dim=1024, l2_reg=reg),  # LSAMPLES, BATCH_SIZE, 1024
    ab.Activation(h=tf.nn.relu),
    ab.DropOut(0.5),
    ab.Dense(output_dim=10, l2_reg=reg),  # LSAMPLES, BATCH_SIZE, 10
)


def main():

    # Dataset
    mnist_data = tf.contrib.learn.datasets.mnist.read_data_sets('./mnist_demo',
                                                                reshape=False)
Ejemplo n.º 6
0
BSIZE = 10  # mini-batch size
CONFIG = tf.ConfigProto(device_count={'GPU': 0})  # Use GPU ?
LSAMPLES = 1  # We're only using 1 dropout "sample" for learning to be more
# like a MAP network
PSAMPLES = 50  # Number of samples for prediction
REG = 0.001  # weight regularizer

# Network structure
n_samples_ = tf.placeholder_with_default(LSAMPLES, [])
net = ab.stack(
    ab.InputLayer(name='X', n_samples=n_samples_),
    ab.DropOut(0.95, alpha=True),
    ab.Dense(output_dim=128, l2_reg=REG, init_fn="autonorm"),
    ab.Activation(h=tf.nn.selu),
    ab.DropOut(0.9, alpha=True),
    ab.Dense(output_dim=64, l2_reg=REG, init_fn="autonorm"),
    ab.Activation(h=tf.nn.selu),
    ab.DropOut(0.9, alpha=True),
    ab.Dense(output_dim=32, l2_reg=REG, init_fn="autonorm"),
    ab.Activation(h=tf.nn.selu),
    ab.DropOut(0.9, alpha=True),
    ab.Dense(output_dim=1, l2_reg=REG, init_fn="autonorm"),
)


def main():
    """Run the demo."""
    data = load_breast_cancer()
    X = data.data.astype(np.float32)
    y = data.target.astype(np.int32)[:, np.newaxis]
    X = StandardScaler().fit_transform(X).astype(np.float32)
    N, D = X.shape
Ejemplo n.º 7
0
NFEATURES = 1500  # Number of random features/bases to use in the approximation
NOISE = 3.0  # Initial estimate of the observation noise

# Random Fourier Features, this is setting up an anisotropic length scale, or
# one length scale per dimension
LENSCALE = tf.Variable(5 * np.ones((21, 1), dtype=np.float32))
KERNEL = ab.RBF(ab.pos(LENSCALE))

# Variational Fourier Features -- length-scale setting here is the "prior"
# LENSCALE = 10.
# KERNEL = ab.RBFVariational(lenscale=LENSCALE, lenscale_posterior=LENSCALE)

# Build the approximate GP
net = ab.stack(
    ab.InputLayer(name='X', n_samples=NSAMPLES),
    ab.RandomFourier(n_features=NFEATURES, kernel=KERNEL),
    ab.DenseVariational(output_dim=1, full=True)
)

# Learning and prediction settings
BATCH_SIZE = 50  # number of observations per mini batch
NEPOCHS = 100  # Number of times to iterate though the dataset
NPREDICTSAMPLES = 10  # results in NSAMPLES * NPREDICTSAMPLES samples

CONFIG = tf.ConfigProto(device_count={'GPU': 1})  # Use GPU ?


def main():
    """Run the demo."""
    data = fetch_gpml_sarcos_data()
    Xr = data.train.data.astype(np.float32)
Ejemplo n.º 8
0
def main():
    """Run the demo."""
    # Get Continuous and categorical data
    df_train, df_test = fetch_data()
    df = pd.concat((df_train, df_test))
    X_con, X_cat, n_cats, Y = input_fn(df)

    # Define the continuous layers
    con_layer = (ab.InputLayer(name='con', n_samples=T_SAMPLES) >>
                 ab.DenseVariational(output_dim=5, full=True))

    # Now define the cateogrical layers, which we embed
    # Note every embed_var call can be different, this is just "lazy"
    cat_layer_list = [ab.EmbedVariational(EMBED_DIMS, i) for i in n_cats]
    cat_layer = (
        ab.InputLayer(name='cat', n_samples=T_SAMPLES) >> ab.PerFeature(
            *cat_layer_list)  # Assign columns to an embedding layers
    )

    # Now we can feed the initial continuous and cateogrical layers to further
    # "joint" layers after we concatenate them
    net = ab.stack(ab.Concat(con_layer, cat_layer),
                   ab.RandomArcCosine(100, 1.),
                   ab.DenseVariational(output_dim=1, full=True))

    # Split data into training and testing
    Xt_con, Xs_con = np.split(X_con, [len(df_train)], axis=0)
    Xt_cat, Xs_cat = np.split(X_cat, [len(df_train)], axis=0)
    Yt, Ys = np.split(Y, [len(df_train)], axis=0)

    # Graph place holders
    X_con_ = tf.placeholder(tf.float32, [None, Xt_con.shape[1]])
    X_cat_ = tf.placeholder(tf.int32, [None, Xt_cat.shape[1]])
    Y_ = tf.placeholder(tf.float32, [None, 1])

    # Feed dicts
    train_dict = {X_con_: Xt_con, X_cat_: Xt_cat, Y_: Yt}
    test_dict = {X_con_: Xs_con, X_cat_: Xs_cat}

    # Make model
    N = len(Xt_con)
    nn, kl = net(con=X_con_, cat=X_cat_)
    likelihood = tf.distributions.Bernoulli(logits=nn)

    loss = ab.elbo(likelihood, Y_, N, kl)
    optimizer = tf.train.AdamOptimizer()
    train = optimizer.minimize(loss)
    init = tf.global_variables_initializer()

    with tf.Session(config=CONFIG):
        init.run()

        # We're going to just use a feed_dict to feed in batches, which we
        # generate here
        batches = ab.batch(train_dict, batch_size=BSIZE, n_iter=NITER)

        for i, data in enumerate(batches):
            train.run(feed_dict=data)
            if i % 1000 == 0:
                loss_val = loss.eval(feed_dict=data)
                print("Iteration {}, loss = {}".format(i, loss_val))

        # Predict
        Ep = ab.predict_expected(nn, test_dict, P_SAMPLES)

    Ey = Ep > 0.5  # Max probability assignment

    acc = accuracy_score(Ys.flatten(), Ey.flatten())
    logloss = log_loss(Ys.flatten(), np.hstack((1 - Ep, Ep)))

    print("Accuracy = {}, log loss = {}".format(acc, logloss))
Ejemplo n.º 9
0
NITER = 20000  # Training iterations per fold
BSIZE = 10  # mini-batch size
CONFIG = tf.ConfigProto(device_count={'GPU': 0})  # Use GPU ?
LSAMPLES = 1  # We're only using 1 dropout "sample" for learning to be more
# like a MAP network
PSAMPLES = 50  # Number of samples for prediction
REG = 0.001  # weight regularizer

# Network structure
n_samples_ = tf.placeholder_with_default(LSAMPLES, [])
net = ab.stack(
    ab.InputLayer(name='X', n_samples=n_samples_),
    ab.DropOut(0.95),
    ab.DenseMAP(output_dim=64, l1_reg=0., l2_reg=REG),
    ab.Activation(h=tf.nn.relu),
    ab.DropOut(0.5),
    ab.DenseMAP(output_dim=64, l1_reg=0., l2_reg=REG),
    ab.Activation(h=tf.nn.relu),
    ab.DropOut(0.5),
    ab.DenseMAP(output_dim=1, l1_reg=0., l2_reg=REG),
)


def main():
    """Run the demo."""
    data = load_breast_cancer()
    X = data.data.astype(np.float32)
    y = data.target.astype(np.int32)[:, np.newaxis]
    X = StandardScaler().fit_transform(X).astype(np.float32)
    N, D = X.shape