def nnet_dropout(X, Y): """Neural net with dropout.""" reg = 0.001 # Weight prior noise = .5 # Likelihood st. dev. net = ( ab.InputLayer(name="X", n_samples=n_samples) >> ab.DenseMAP(output_dim=30, l2_reg=reg, l1_reg=0.) >> ab.Activation(tf.tanh) >> ab.DropOut(keep_prob=0.95) >> ab.DenseMAP(output_dim=20, l2_reg=reg, l1_reg=0.) >> ab.Activation(tf.tanh) >> ab.DropOut(keep_prob=0.95) >> ab.DenseMAP(output_dim=10, l2_reg=reg, l1_reg=0.) >> ab.Activation(tf.tanh) >> ab.DropOut(keep_prob=0.95) >> ab.DenseMAP(output_dim=5, l2_reg=reg, l1_reg=0.) >> ab.Activation(tf.tanh) >> ab.DenseMAP(output_dim=1, l2_reg=reg, l1_reg=0.) ) phi, reg = net(X=X) lkhood = tf.distributions.Normal(loc=phi, scale=noise) loss = ab.max_posterior(lkhood, Y, reg) return phi, loss
def nnet(X, Y): """Neural net with regularization.""" lambda_ = 1e-4 # Weight regularizer noise = .5 # Likelihood st. dev. net = ( ab.InputLayer(name="X", n_samples=1) >> ab.DenseMAP( output_dim=40, l2_reg=lambda_, l1_reg=0.) >> ab.Activation(tf.tanh) >> ab.DenseMAP(output_dim=20, l2_reg=lambda_, l1_reg=0.) >> ab.Activation(tf.tanh) >> ab.DenseMAP(output_dim=10, l2_reg=lambda_, l1_reg=0.) >> ab.Activation( tf.tanh) >> ab.DenseMAP(output_dim=1, l2_reg=lambda_, l1_reg=0.)) f, reg = net(X=X) lkhood = tf.distributions.Normal(loc=f, scale=noise) loss = ab.max_posterior(lkhood, Y, reg) return f, loss
def linear(X, Y): """Linear regression with l2 regularization.""" lambda_ = 1e-4 # Weight regularizer noise = 1. # Likelihood st. dev. net = (ab.InputLayer(name="X") >> ab.DenseMAP( output_dim=1, l2_reg=lambda_, l1_reg=0.)) Xw, reg = net(X=X) lkhood = tf.distributions.Normal(loc=Xw, scale=noise) loss = ab.max_posterior(lkhood, Y, reg) # loss = 0.5 * tf.reduce_mean((Y - Xw)**2) + reg return Xw, loss
def linear(X, Y): """Linear regression with l2 regularization.""" reg = .01 # Weight prior noise = .5 # Likelihood st. dev. net = ( ab.InputLayer(name="X", n_samples=1) >> ab.DenseMAP(output_dim=1, l2_reg=reg, l1_reg=0.) ) phi, reg = net(X=X) lkhood = tf.distributions.Normal(loc=phi, scale=noise) loss = ab.max_posterior(lkhood, Y, reg) return phi, loss
def svr(X, Y): """Support vector regressor.""" reg = 0.1 eps = 0.01 lenscale = 1. kern = ab.RBF(lenscale=lenscale) # keep the length scale positive net = ( ab.InputLayer(name="X", n_samples=1) >> ab.RandomFourier(n_features=50, kernel=kern) >> ab.DenseMAP(output_dim=1, l2_reg=reg, l1_reg=0.) ) phi, reg = net(X=X) loss = tf.reduce_mean(tf.maximum(tf.abs(Y - phi - eps), 0.)) + reg return phi, loss
def svr(X, Y): """Support vector regressor, kind of...""" lambda_ = 1e-4 eps = 0.01 lenscale = 1. # Specify which kernel to approximate with the random Fourier features kern = ab.RBF(lenscale=lenscale) net = ( # ab.InputLayer(name="X", n_samples=n_samples_) >> ab.InputLayer(name="X", n_samples=1) >> ab.RandomFourier( n_features=50, kernel=kern) >> # ab.DropOut(keep_prob=0.9) >> ab.DenseMAP(output_dim=1, l2_reg=lambda_, l1_reg=0.)) f, reg = net(X=X) loss = tf.reduce_mean(tf.nn.relu(tf.abs(Y - f) - eps)) + reg return f, loss
# Optimization NITER = 20000 # Training iterations per fold BSIZE = 10 # mini-batch size CONFIG = tf.ConfigProto(device_count={'GPU': 0}) # Use GPU ? LSAMPLES = 1 # We're only using 1 dropout "sample" for learning to be more # like a MAP network PSAMPLES = 50 # Number of samples for prediction REG = 0.001 # weight regularizer # Network structure n_samples_ = tf.placeholder_with_default(LSAMPLES, []) net = ab.stack( ab.InputLayer(name='X', n_samples=n_samples_), ab.DropOut(0.95), ab.DenseMAP(output_dim=64, l1_reg=0., l2_reg=REG), ab.Activation(h=tf.nn.relu), ab.DropOut(0.5), ab.DenseMAP(output_dim=64, l1_reg=0., l2_reg=REG), ab.Activation(h=tf.nn.relu), ab.DropOut(0.5), ab.DenseMAP(output_dim=1, l1_reg=0., l2_reg=REG), ) def main(): """Run the demo.""" data = load_breast_cancer() X = data.data.astype(np.float32) y = data.target.astype(np.int32)[:, np.newaxis] X = StandardScaler().fit_transform(X).astype(np.float32)
kernel_size=(5, 5), l1_reg=0., l2_reg=reg), # LSAMPLES, BATCH_SIZE, 28, 28, 32 ab.Activation(h=tf.nn.relu), ab.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), # LSAMPLES, BATCH_SIZE, 14, 14, 32 ab.Conv2DMAP(filters=64, kernel_size=(5, 5), l1_reg=0., l2_reg=reg), # LSAMPLES, BATCH_SIZE, 14, 14, 64 ab.Activation(h=tf.nn.relu), ab.MaxPool2D(pool_size=(2, 2), strides=(2, 2)), # LSAMPLES, BATCH_SIZE, 7, 7, 64 ab.Reshape(target_shape=(7*7*64,)), # LSAMPLES, BATCH_SIZE, 7*7*64 ab.DenseMAP(output_dim=1024, l1_reg=0., l2_reg=reg), # LSAMPLES, BATCH_SIZE, 1024 ab.Activation(h=tf.nn.relu), ab.DropOut(0.5), ab.DenseMAP(output_dim=10, l1_reg=0., l2_reg=reg), # LSAMPLES, BATCH_SIZE, 10 ) def main(): # Dataset mnist_data = tf.contrib.learn.datasets.mnist.read_data_sets( './mnist_demo', reshape=True) N, D = mnist_data.train.images.shape