Exemplo n.º 1
0
def nnet_dropout(X, Y):
    """Neural net with dropout."""
    reg = 0.001  # Weight prior
    noise = .5  # Likelihood st. dev.

    net = (
        ab.InputLayer(name="X", n_samples=n_samples) >>
        ab.DenseMAP(output_dim=30, l2_reg=reg, l1_reg=0.) >>
        ab.Activation(tf.tanh) >>
        ab.DropOut(keep_prob=0.95) >>
        ab.DenseMAP(output_dim=20, l2_reg=reg, l1_reg=0.) >>
        ab.Activation(tf.tanh) >>
        ab.DropOut(keep_prob=0.95) >>
        ab.DenseMAP(output_dim=10, l2_reg=reg, l1_reg=0.) >>
        ab.Activation(tf.tanh) >>
        ab.DropOut(keep_prob=0.95) >>
        ab.DenseMAP(output_dim=5, l2_reg=reg, l1_reg=0.) >>
        ab.Activation(tf.tanh) >>
        ab.DenseMAP(output_dim=1, l2_reg=reg, l1_reg=0.)
    )

    phi, reg = net(X=X)
    lkhood = tf.distributions.Normal(loc=phi, scale=noise)
    loss = ab.max_posterior(lkhood, Y, reg)
    return phi, loss
Exemplo n.º 2
0
def test_dropout(random, indep):
    """Test dropout layer."""
    samples, rows, cols = 3, 5, 1000
    keep_prob = 0.9
    X = (random.randn(samples, rows, cols) + 1).astype(np.float32)
    ab.set_hyperseed(666)
    drop = ab.DropOut(keep_prob, independent=indep)

    F, KL = drop(X)

    tc = tf.test.TestCase()
    with tc.test_session():
        f = F.eval()
        dropped = np.where(f == 0)

        # Check we dropout whole columns
        if not indep:
            for s, _, c in zip(*dropped):
                assert np.allclose(f[s, :, c], 0.)

        # Check the dropout proportions are approximately correct
        active = 1 - np.sum(f[:, 0, :] == 0) / (samples * cols)

        assert f.shape == X.shape
        assert (active >= keep_prob - 0.05) and (active <= keep_prob + 0.05)
        assert KL == 0
Exemplo n.º 3
0
def nnet_dropout(X, Y):
    """Neural net with dropout."""
    lambda_ = 1e-3  # Weight prior
    noise = .5  # Likelihood st. dev.

    net = (
        ab.InputLayer(name="X", n_samples=n_samples_) >>
        ab.Dense(output_dim=32, l2_reg=lambda_) >>
        ab.Activation(tf.nn.selu) >>
        ab.DropOut(keep_prob=0.9, independent=True) >>
        ab.Dense(output_dim=16, l2_reg=lambda_) >>
        ab.Activation(tf.nn.selu) >>
        ab.DropOut(keep_prob=0.95, independent=True) >>
        ab.Dense(output_dim=8, l2_reg=lambda_) >>
        ab.Activation(tf.nn.selu) >>
        ab.Dense(output_dim=1, l2_reg=lambda_)
    )

    f, reg = net(X=X)
    lkhood = tf.distributions.Normal(loc=f, scale=noise).log_prob(Y)
    loss = ab.max_posterior(lkhood, reg)
    return f, loss
Exemplo n.º 4
0
def test_dropout(make_data):
    """Test dropout layer."""
    x, _, X = make_data
    drop = ab.DropOut(0.5)

    F, KL = drop(X)

    tc = tf.test.TestCase()
    with tc.test_session():
        f = F.eval()
        prop_zero = np.sum(f == 0) / np.prod(f.shape)

        assert f.shape == X.eval().shape
        assert (prop_zero > 0.4) and (prop_zero < 0.6)
        assert KL == 0
Exemplo n.º 5
0
def test_dropout(random):
    """Test dropout layer."""
    X = np.repeat(random.randn(1, 30, 20), 3, axis=0)
    ab.set_hyperseed(666)
    drop = ab.DropOut(0.5)

    F, KL = drop(X)

    tc = tf.test.TestCase()
    with tc.test_session():
        f = F.eval()
        prop_zero = np.sum(f == 0) / np.prod(f.shape)

        assert f.shape == X.shape
        assert (prop_zero >= 0.4) and (prop_zero <= 0.6)
        assert KL == 0
net = ab.stack(
    ab.InputLayer(name='X', n_samples=l_samples),  # LSAMPLES,BATCH_SIZE,28*28
    ab.Conv2D(filters=32, kernel_size=(5, 5),
              l2_reg=reg),  # LSAMPLES, BATCH_SIZE, 28, 28, 32
    ab.Activation(h=tf.nn.relu),
    ab.MaxPool2D(pool_size=(2, 2),
                 strides=(2, 2)),  # LSAMPLES, BATCH_SIZE, 14, 14, 32
    ab.Conv2D(filters=64, kernel_size=(5, 5),
              l2_reg=reg),  # LSAMPLES, BATCH_SIZE, 14, 14, 64
    ab.Activation(h=tf.nn.relu),
    ab.MaxPool2D(pool_size=(2, 2),
                 strides=(2, 2)),  # LSAMPLES, BATCH_SIZE, 7, 7, 64
    ab.Flatten(),  # LSAMPLES, BATCH_SIZE, 7*7*64
    ab.Dense(output_dim=1024, l2_reg=reg),  # LSAMPLES, BATCH_SIZE, 1024
    ab.Activation(h=tf.nn.relu),
    ab.DropOut(0.5),
    ab.Dense(output_dim=10, l2_reg=reg),  # LSAMPLES, BATCH_SIZE, 10
)


def main():

    # Dataset
    mnist_data = tf.contrib.learn.datasets.mnist.read_data_sets('./mnist_demo',
                                                                reshape=False)

    N = mnist_data.train.images.shape[0]

    X, Y = tf.data.Dataset.from_tensor_slices(
        (np.asarray(mnist_data.train.images, dtype=np.float32),
         np.asarray(mnist_data.train.labels, dtype=np.int64))
Exemplo n.º 7
0
        Net = self.layer(X)
        # aggregate layer regularization terms
        KL = tf.reduce_sum(self.layer.losses)

        return Net, KL


n_samples_ = tf.placeholder(tf.int32)

l1_l2_reg = tf.keras.regularizers.l1_l2(l1=0., l2=0.)
net = (ab.InputLayer(name="X", n_samples=n_samples_) >> WrapperLayer(
    tf.keras.layers.Dense,
    units=64,
    activation='tanh',
    kernel_regularizer=l1_l2_reg,
    bias_regularizer=l1_l2_reg) >> ab.DropOut(keep_prob=.9) >> WrapperLayer(
        tf.keras.layers.Dense,
        units=32,
        activation='tanh',
        kernel_regularizer=l1_l2_reg,
        bias_regularizer=l1_l2_reg) >> ab.DropOut(keep_prob=.9) >>
       WrapperLayer(tf.keras.layers.Dense,
                    units=1,
                    kernel_regularizer=l1_l2_reg,
                    bias_regularizer=l1_l2_reg))


def main():
    """Run the demo."""
    n_iters = int(round(n_epochs * N / batch_size))
    print("Iterations = {}".format(n_iters))
Exemplo n.º 8
0
ab.set_hyperseed(RSEED)

# Optimization
NITER = 20000  # Training iterations per fold
BSIZE = 10  # mini-batch size
CONFIG = tf.ConfigProto(device_count={'GPU': 0})  # Use GPU ?
LSAMPLES = 1  # We're only using 1 dropout "sample" for learning to be more
# like a MAP network
PSAMPLES = 50  # Number of samples for prediction
REG = 0.001  # weight regularizer

# Network structure
n_samples_ = tf.placeholder_with_default(LSAMPLES, [])
net = ab.stack(
    ab.InputLayer(name='X', n_samples=n_samples_),
    ab.DropOut(0.95, alpha=True),
    ab.Dense(output_dim=128, l2_reg=REG, init_fn="autonorm"),
    ab.Activation(h=tf.nn.selu),
    ab.DropOut(0.9, alpha=True),
    ab.Dense(output_dim=64, l2_reg=REG, init_fn="autonorm"),
    ab.Activation(h=tf.nn.selu),
    ab.DropOut(0.9, alpha=True),
    ab.Dense(output_dim=32, l2_reg=REG, init_fn="autonorm"),
    ab.Activation(h=tf.nn.selu),
    ab.DropOut(0.9, alpha=True),
    ab.Dense(output_dim=1, l2_reg=REG, init_fn="autonorm"),
)


def main():
    """Run the demo."""
Exemplo n.º 9
0
ab.set_hyperseed(RSEED)

# Optimization
NITER = 20000  # Training iterations per fold
BSIZE = 10  # mini-batch size
CONFIG = tf.ConfigProto(device_count={'GPU': 0})  # Use GPU ?
LSAMPLES = 1  # We're only using 1 dropout "sample" for learning to be more
# like a MAP network
PSAMPLES = 50  # Number of samples for prediction
REG = 0.001  # weight regularizer

# Network structure
n_samples_ = tf.placeholder_with_default(LSAMPLES, [])
net = ab.stack(
    ab.InputLayer(name='X', n_samples=n_samples_),
    ab.DropOut(0.95),
    ab.DenseMAP(output_dim=64, l1_reg=0., l2_reg=REG),
    ab.Activation(h=tf.nn.relu),
    ab.DropOut(0.5),
    ab.DenseMAP(output_dim=64, l1_reg=0., l2_reg=REG),
    ab.Activation(h=tf.nn.relu),
    ab.DropOut(0.5),
    ab.DenseMAP(output_dim=1, l1_reg=0., l2_reg=REG),
)


def main():
    """Run the demo."""
    data = load_breast_cancer()
    X = data.data.astype(np.float32)
    y = data.target.astype(np.int32)[:, np.newaxis]