Ejemplo n.º 1
0
BATCH_SIZE = 1
network = Network(
    
    Network(
        DropoutLayer(rng, 0.25),    
        Conv1DLayer(rng, (64, 66, 25), (BATCH_SIZE, 66, 240)),
        Pool1DLayer(rng, (2,), (BATCH_SIZE, 64, 240)),
        ActivationLayer(rng),

        DropoutLayer(rng, 0.25),    
        Conv1DLayer(rng, (128, 64, 25), (BATCH_SIZE, 64, 120)),
        Pool1DLayer(rng, (2,), (BATCH_SIZE, 128, 120)),
        ActivationLayer(rng),
    ),
    
    Network(
        VariationalLayer(rng),
    ),
    
    Network(
        InverseNetwork(Pool1DLayer(rng, (2,), (BATCH_SIZE, 64, 120))),
        DropoutLayer(rng, 0.25),    
        Conv1DLayer(rng, (64, 64, 25), (BATCH_SIZE, 64, 120)),
        ActivationLayer(rng),

        InverseNetwork(Pool1DLayer(rng, (2,), (BATCH_SIZE, 64, 240))),
        DropoutLayer(rng, 0.25),    
        Conv1DLayer(rng, (66, 64, 25), (BATCH_SIZE, 64, 240)),
    )
    
)
print "motion dataset shape = ", train_motion_dataset.shape

E = shared(train_motion_dataset)

BATCH_SIZE = 100

network = Network(
    Conv1DLayer(rng, (64, 66, 25), (BATCH_SIZE, 66, 240)),
    Pool1DLayer(rng, (2,), (BATCH_SIZE, 64, 240)),
    ActivationLayer(rng, f='elu'),

    Conv1DLayer(rng, (128, 64, 25), (BATCH_SIZE, 64, 120)),
    Pool1DLayer(rng, (2,), (BATCH_SIZE, 128, 120)),
    ActivationLayer(rng, f='elu'),
    
    InverseNetwork(Pool1DLayer(rng, (2,), (BATCH_SIZE, 128, 120))),
    Conv1DLayer(rng, (64, 128, 25), (BATCH_SIZE, 128, 120)),
    ActivationLayer(rng, f='elu'),

    InverseNetwork(Pool1DLayer(rng, (2,), (BATCH_SIZE, 64, 240))),
    Conv1DLayer(rng, (66, 64, 25), (BATCH_SIZE, 64, 240)),
    ActivationLayer(rng, f='elu'),
)

network.load(['../models/locomotion/ae/ae_layer_0.npz', None, None,
                '../models/locomotion/ae/ae_layer_1.npz', None, None,
                None, '../models/locomotion/ae/ae_layer_2.npz', None,
                None, '../models/locomotion/ae/ae_layer_3.npz', None,])

func = theano.function([], network(E))
Ejemplo n.º 3
0
shared = lambda d: theano.shared(d, borrow=True)

train_set_x, train_set_y = map(shared, datasets[0])
valid_set_x, valid_set_y = map(shared, datasets[1])
test_set_x, test_set_y = map(shared, datasets[2])

batchsize = 100

train_set_x = train_set_x.reshape((50000, 1, 28, 28))
valid_set_x = valid_set_x.reshape((10000, 1, 28, 28))
test_set_x = test_set_x.reshape((10000, 1, 28, 28))

network = Network(Conv2DLayer(rng, (4, 1, 5, 5), (batchsize, 1, 28, 28)),
                  BatchNormLayer(rng, (batchsize, 4, 28, 28), axes=(0, 2, 3)),
                  ActivationLayer(rng, f='ReLU'),
                  Pool2DLayer(rng, (batchsize, 4, 28, 28)),
                  ReshapeLayer(rng, (batchsize, 4 * 14 * 14)),
                  HiddenLayer(rng, (4 * 14 * 14, 10)),
                  ActivationLayer(rng, f='softmax'))

trainer = AdamTrainer(rng=rng,
                      batchsize=batchsize,
                      epochs=15,
                      alpha=0.0001,
                      cost='cross_entropy')
trainer.train(network=network,
              train_input=train_set_x,
              train_output=train_set_y,
              valid_input=valid_set_x,
              valid_output=valid_set_y,
              test_input=test_set_x,
encoder = HiddenLayer(rng, (128, H_SIZE))
encode_igate = HiddenLayer(rng, (128, H_SIZE))
encode_fgate = HiddenLayer(rng, (128, H_SIZE))

recoder = HiddenLayer(rng, (H_SIZE, H_SIZE))
recode_igate = HiddenLayer(rng, (H_SIZE, H_SIZE))
recode_fgate = HiddenLayer(rng, (H_SIZE, H_SIZE))

activation = ActivationLayer(rng, f='elu')
dropout = DropoutLayer(rng, 0.2)

encoder_network = Network(
    Conv1DLayer(rng, (64, 63, 25), (BATCH_SIZE, 63, 240)),
    Pool1DLayer(rng, (2, ), (BATCH_SIZE, 64, 240)),
    ActivationLayer(rng, f='elu'),
    Conv1DLayer(rng, (256, 64, 25), (BATCH_SIZE, 64, 120)),
    Pool1DLayer(rng, (2, ), (BATCH_SIZE, 256, 120)),
    ActivationLayer(rng, f='elu'),
)

encoder_network.load([
    '../models/vae_lstm/3_vae_lstm_layer_0.npz',
    None,
    None,
    '../models/vae_lstm/3_vae_lstm_layer_1.npz',
    None,
    None,
])

ff_network = Network(
    Conv1DLayer(rng, (64, 3, 25), (BATCH_SIZE, 3, 240)),
Ejemplo n.º 5
0
datasets = load_data(dataset)

shared = lambda d: T.shared(d, borrow=True)

train_set_x, train_set_y = map(shared, datasets[0])
valid_set_x, valid_set_y = map(shared, datasets[1])
test_set_x, test_set_y   = map(shared, datasets[2])

batchsize = 1

train_set_x = train_set_x.reshape((50000, 1, 28, 28))
valid_set_x = valid_set_x.reshape((10000, 1, 28, 28))
test_set_x  = test_set_x.reshape((10000, 1, 28, 28))

network = Network(
	NoiseLayer(rng, 0.3),

	Conv2DLayer(rng, (4, 1, 5, 5), (batchsize, 1, 28, 28)),
	Pool2DLayer(rng, (batchsize, 4, 28, 28)),
	ActivationLayer(rng, f='ReLU'),
	ReshapeLayer(rng, (4*14*14, )),

	HiddenLayer(rng, (4*14*14, 10)),
	ActivationLayer(rng, f='softmax')
)

trainer = AdamTrainer(rng=rng, batchsize=batchsize, epochs=5, alpha=0.00001, cost='cross_entropy')
trainer.train(network=network, train_input=train_set_x, train_output=train_set_y,
                               valid_input=valid_set_x, valid_output=valid_set_y,
                               test_input=test_set_x, test_output=test_set_y, filename=None)
Ejemplo n.º 6
0
E = shared(train_motion_dataset)

BATCH_SIZE = 40

network = Network(
    DropoutLayer(rng, 0.2),
    Conv1DLayer(rng, (64, 66, 25), (BATCH_SIZE, 66, 240)),
    Pool1DLayer(rng, (2,), (BATCH_SIZE, 64, 240)),
    ActivationLayer(rng, f='elu'),

    DropoutLayer(rng, 0.2),
    Conv1DLayer(rng, (128, 64, 25), (BATCH_SIZE, 64, 120)),
    Pool1DLayer(rng, (2,), (BATCH_SIZE, 128, 120)),
    ActivationLayer(rng, f='elu'),
    
    InverseNetwork(Pool1DLayer(rng, (2,), (BATCH_SIZE, 128, 120))),
    DropoutLayer(rng, 0.2),  
    Conv1DLayer(rng, (64, 128, 25), (BATCH_SIZE, 128, 120)),
    ActivationLayer(rng, f='elu'),

    InverseNetwork(Pool1DLayer(rng, (2,), (BATCH_SIZE, 64, 240))),
    DropoutLayer(rng, 0.2),    
    Conv1DLayer(rng, (66, 64, 25), (BATCH_SIZE, 64, 240)),
    ActivationLayer(rng, f='elu'),
)

trainer = AdamTrainer(rng, batchsize=BATCH_SIZE, epochs=500, alpha=0.0005, cost='mse')
trainer.train(network, E, E, filename=[None, '../models/locomotion/ae/ae_layer_0.npz', None, None,
                                            None, '../models/locomotion/ae/ae_layer_1.npz', None, None,
                                            None, None, '../models/locomotion/ae/ae_layer_2.npz', None,
from utils import load_data

rng = np.random.RandomState(23455)

dataset = '../data/mnist/mnist.pkl.gz'
datasets = load_data(dataset)

shared = lambda d: theano.shared(d)

train_set_x, train_set_y = map(shared, datasets[0])
valid_set_x, valid_set_y = map(shared, datasets[1])
test_set_x, test_set_y = map(shared, datasets[2])

network = Network(HiddenLayer(rng, (784, 500)), BatchNormLayer((784, 500)),
                  ActivationLayer(rng, f='ReLU'), HiddenLayer(rng, (500, 10)),
                  BatchNormLayer((500, 10)), ActivationLayer(rng, f='softmax'))

trainer = AdamTrainer(rng=rng,
                      batchsize=2,
                      epochs=1,
                      alpha=0.00001,
                      cost='cross_entropy')
trainer.train(network=network,
              train_input=train_set_x,
              train_output=train_set_y,
              valid_input=valid_set_x,
              valid_output=valid_set_y,
              test_input=test_set_x,
              test_output=test_set_y,
              filename=None)
Y_test = theano.shared(np.array(Y)[test_split:], borrow=True)

batchsize = 10

network = Network(
    Conv1DLayer(rng, (64, 66, 25), (batchsize, 66, 240)),
    # For stable computation using batchnorm layer, 
    # please ensure to normalize the features of the data
    # (3rd axis for style transfer data)
    BatchNormLayer(rng, (batchsize, 64, 240), axes=(0,2,)),
    ActivationLayer(rng, f='ReLU'),
    Pool1DLayer(rng, (2,), (batchsize, 64, 240)),

    Conv1DLayer(rng, (128, 64, 25), (batchsize, 64, 120)),
    BatchNormLayer(rng, (batchsize, 128, 120), axes=(0,2,)),
    ActivationLayer(rng, f='ReLU'),
    Pool1DLayer(rng, (2,), (batchsize, 128, 120)),
    
    Conv1DLayer(rng, (256, 128, 25), (batchsize, 128, 60)),
    BatchNormLayer(rng, (batchsize, 256, 60), axes=(0,2,)),    
    ActivationLayer(rng, f='ReLU'),
    Pool1DLayer(rng, (2,), (batchsize, 256, 60)),

    # 256*60 = 7680
    ReshapeLayer(rng, (batchsize, 7680)),
    HiddenLayer(rng, (np.prod([256, 30]), 8)),
    ActivationLayer(rng, f='softmax'),
)

# Load the pre-trained conv-layers
network.load(['../models/conv_ae/layer_0.npz', None, None,
              '../models/conv_ae/layer_1.npz', None, None,
encoder = HiddenLayer(rng, (128, H_SIZE))
encode_igate = HiddenLayer(rng, (128, H_SIZE))
encode_fgate = HiddenLayer(rng, (128, H_SIZE))

recoder = HiddenLayer(rng, (H_SIZE, H_SIZE))
recode_igate = HiddenLayer(rng, (H_SIZE, H_SIZE))
recode_fgate = HiddenLayer(rng, (H_SIZE, H_SIZE))

activation = ActivationLayer(rng, f='elu')
dropout = DropoutLayer(rng, 0.2)

control_network = Network(
    Conv1DLayer(rng, (64, 3, 25), (BATCH_SIZE, 3, 240)),
    Pool1DLayer(rng, (2, ), (BATCH_SIZE, 64, 240)),
    ActivationLayer(rng, f='elu'),
    Conv1DLayer(rng, (128, 64, 25), (BATCH_SIZE, 64, 120)),
    Pool1DLayer(rng, (2, ), (BATCH_SIZE, 128, 120)),
    ActivationLayer(rng, f='elu'),
    LSTM1DTestLayer(encoder, recoder, encode_igate, recode_igate, encode_fgate,
                    recode_fgate, activation, dropout, H),
)

control_network.load([
    '../models/cmu/vae_lstm/1_normalized_layer_0.npz',
    None,
    None,
    '../models/cmu/vae_lstm/1_normalized_layer_1.npz',
    None,
    None,
    '../models/cmu/vae_lstm/1_normalized_layer_2.npz',
])
network.load([
    None,
    '../models/conv_ae/layer_0.npz', None, None,
    '../models/conv_ae/layer_1.npz', None, None,
    '../models/conv_ae/layer_2.npz', None, None,
])

for layer in network.layers:
    if isinstance(layer, NoiseLayer): layer.amount = 0.0
    if isinstance(layer, Pool1DLayer):  layer.depooler = lambda x, **kw: x/2

# Values received from  data
Xnout = np.empty([17924,256,30], dtype=theano.config.floatX)
Xoout = np.empty([17924,256,30], dtype=theano.config.floatX)

# Go through inputs in factors of 4481
for input in range(0,len(X),BATCH_SIZE):

    amount = 0.5

    # Pass data through the network 1 by 1
    Xorig = X[input:input+BATCH_SIZE]
    # Add Noise to data
    Xnois = (Xorig * rng.binomial(size=Xorig.shape, n=1, p=(1-amount)).astype(theano.config.floatX))
    # Build the noisy outputs
    Xnout[input:input + BATCH_SIZE] = np.array(Network(network)(Xnois).eval()).astype(theano.config.floatX)[:]
    # Build the non-noisy outputs
    Xoout[input:input+BATCH_SIZE] = np.array(Network(network)(Xorig).eval()).astype(theano.config.floatX)[:]

#Save the noisy activations
np.savez_compressed('../data/Joe/HiddenActivations', Noisy=Xnout, Orig=Xoout)
train_motion_dataset = dataset[0][0][:100]

print "motion dataset shape = ", train_motion_dataset.shape

E = shared(train_motion_dataset)

BATCH_SIZE = 100

network = Network(
    Conv1DLayer(rng, (64, 66, 25), (BATCH_SIZE, 66, 240)),
    Pool1DLayer(rng, (2, ), (BATCH_SIZE, 64, 240)),
    ActivationLayer(rng, f='elu'),
    Conv1DLayer(rng, (128, 64, 25), (BATCH_SIZE, 64, 120)),
    Pool1DLayer(rng, (2, ), (BATCH_SIZE, 128, 120)),
    ActivationLayer(rng, f='elu'),
    InverseNetwork(Pool1DLayer(rng, (2, ), (BATCH_SIZE, 128, 120))),
    Conv1DLayer(rng, (64, 128, 25), (BATCH_SIZE, 128, 120)),
    ActivationLayer(rng, f='elu'),
    InverseNetwork(Pool1DLayer(rng, (2, ), (BATCH_SIZE, 64, 240))),
    Conv1DLayer(rng, (66, 64, 25), (BATCH_SIZE, 64, 240)),
    ActivationLayer(rng, f='elu'),
)

network.load([
    '../models/locomotion/ae/ae_layer_0.npz',
    None,
    None,
    '../models/locomotion/ae/ae_layer_1.npz',
    None,
    None,
    None,
dataset, std, mean = load_locomotion(rng)
E = shared(dataset[0][0])

BATCH_SIZE = 40

FC_SIZE = 800

encoderNetwork = Network(
    Conv1DLayer(rng, (64, 66, 25), (BATCH_SIZE, 66, 240)),
    BatchNormLayer(rng, (BATCH_SIZE, 64, 240)),
    ActivationLayer(rng, f='elu'),
    Pool1DLayer(rng, (2,), (BATCH_SIZE, 64, 240)),

    DropoutLayer(rng, 0.25),
    Conv1DLayer(rng, (128, 64, 25), (BATCH_SIZE, 64, 120)),
    BatchNormLayer(rng, (BATCH_SIZE, 128, 120)),
    ActivationLayer(rng, f='elu'),
    Pool1DLayer(rng, (2,), (BATCH_SIZE, 128, 120)),

    ReshapeLayer(rng, (BATCH_SIZE, 128*60)),
    DropoutLayer(rng, 0.25),    
    HiddenLayer(rng, (128*60, FC_SIZE)),
    BatchNormLayer(rng, (128*60, FC_SIZE)),
    ActivationLayer(rng, f='elu'),
)

variationalNetwork = Network(
    VariationalLayer(rng),
)

decoderNetwork = Network(
    HiddenLayer(rng, (FC_SIZE/2, 64*30)),
Ejemplo n.º 13
0
import numpy as np
import theano
import theano.tensor as T

from nn.ActivationLayer import ActivationLayer
from nn.NoiseLayer import NoiseLayer
from nn.Pool1DLayer import Pool1DLayer
from nn.Conv1DLayer import Conv1DLayer
from nn.Network import Network, AutoEncodingNetwork

rng = np.random.RandomState(23455)

BATCH_SIZE = 1
network = Network(NoiseLayer(rng, 0.3),
                  Conv1DLayer(rng, (64, 66, 25), (BATCH_SIZE, 66, 240)),
                  Pool1DLayer(rng, (2, ), (BATCH_SIZE, 64, 240)),
                  ActivationLayer(rng),
                  Conv1DLayer(rng, (128, 64, 25), (BATCH_SIZE, 64, 120)),
                  Pool1DLayer(rng, (2, ), (BATCH_SIZE, 128, 120)),
                  ActivationLayer(rng),
                  Conv1DLayer(rng, (256, 128, 25), (BATCH_SIZE, 128, 60)),
                  Pool1DLayer(rng, (2, ), (BATCH_SIZE, 256, 60)),
                  ActivationLayer(rng))
network = Network(
    Conv1DLayer(rng, (64, 66, 25), (batchsize, 66, 240)),
    # For stable computation using batchnorm layer,
    # please ensure to normalize the features of the data
    # (3rd axis for style transfer data)
    BatchNormLayer(rng, (batchsize, 64, 240), axes=(
        0,
        2,
    )),
    ActivationLayer(rng, f='ReLU'),
    Pool1DLayer(rng, (2, ), (batchsize, 64, 240)),
    Conv1DLayer(rng, (128, 64, 25), (batchsize, 64, 120)),
    BatchNormLayer(rng, (batchsize, 128, 120), axes=(
        0,
        2,
    )),
    ActivationLayer(rng, f='ReLU'),
    Pool1DLayer(rng, (2, ), (batchsize, 128, 120)),
    Conv1DLayer(rng, (256, 128, 25), (batchsize, 128, 60)),
    BatchNormLayer(rng, (batchsize, 256, 60), axes=(
        0,
        2,
    )),
    ActivationLayer(rng, f='ReLU'),
    Pool1DLayer(rng, (2, ), (batchsize, 256, 60)),

    # 256*60 = 7680
    ReshapeLayer(rng, (batchsize, 7680)),
    HiddenLayer(rng, (np.prod([256, 30]), 8)),
    ActivationLayer(rng, f='softmax'),
)
encode_igate = HiddenLayer(rng, (128, H_SIZE))
encode_fgate = HiddenLayer(rng, (128, H_SIZE))

recoder = HiddenLayer(rng, (H_SIZE, H_SIZE))
recode_igate = HiddenLayer(rng, (H_SIZE, H_SIZE))
recode_fgate = HiddenLayer(rng, (H_SIZE, H_SIZE))

activation = ActivationLayer(rng, f='elu')
dropout = DropoutLayer(rng, 0.2)

control_network = Network(    
    Conv1DLayer(rng, (64, 3, 25), (BATCH_SIZE, 3, 240)),
    Pool1DLayer(rng, (2,), (BATCH_SIZE, 64, 240)),
    ActivationLayer(rng, f='elu'),

    Conv1DLayer(rng, (128, 64, 25), (BATCH_SIZE, 64, 120)),
    Pool1DLayer(rng, (2,), (BATCH_SIZE, 128, 120)),
    ActivationLayer(rng, f='elu'),

    LSTM1DTestLayer(encoder, recoder, encode_igate, recode_igate, encode_fgate, recode_fgate, activation, dropout, H),
)

control_network.load(['../models/cmu/vae_lstm/1_normalized_layer_0.npz', None, None, 
                '../models/cmu/vae_lstm/1_normalized_layer_1.npz', None, None, 
                '../models/cmu/vae_lstm/1_normalized_layer_2.npz',])

control_func = theano.function([], control_network(E))
control_result = control_func()
print control_result.shape

C = shared(control_result)
Ejemplo n.º 16
0
dataset, std, mean = load_locomotion(rng)
E = shared(dataset[0][0])

BATCH_SIZE = 40

generatorNetwork = Network(
    DropoutLayer(rng, 0.15),
    HiddenLayer(rng, (800, 64 * 30)),
    BatchNormLayer(rng, (800, 64 * 30)),
    ActivationLayer(rng, f='elu'),
    ReshapeLayer(rng, (BATCH_SIZE, 64, 30)),
    InverseNetwork(Pool1DLayer(rng, (2, ), (BATCH_SIZE, 64, 60))),
    DropoutLayer(rng, 0.15),
    Conv1DLayer(rng, (64, 64, 25), (BATCH_SIZE, 64, 60)),
    ActivationLayer(rng, f='elu'),
    InverseNetwork(Pool1DLayer(rng, (2, ), (BATCH_SIZE, 64, 120))),
    DropoutLayer(rng, 0.25),
    Conv1DLayer(rng, (64, 64, 25), (BATCH_SIZE, 64, 120)),
    ActivationLayer(rng, f='elu'),
    InverseNetwork(Pool1DLayer(rng, (2, ), (BATCH_SIZE, 64, 240))),
    DropoutLayer(rng, 0.25),
    Conv1DLayer(rng, (66, 64, 25), (BATCH_SIZE, 64, 240)),
    ActivationLayer(rng, f='elu'),
)

discriminatorNetwork = Network(
    DropoutLayer(rng, 0.15),
    Conv1DLayer(rng, (64, 66, 25), (BATCH_SIZE * 2, 66, 240)),
    ActivationLayer(rng, f='elu'),
    Pool1DLayer(rng, (2, ), (BATCH_SIZE * 2, 64, 240)),