'x'
)  # the data is presented as a vector of inputs with many exchangeable examples of this vector
x = clip_gradient(x, 1.0)
y = T.matrix(
    'y'
)  # the data is presented as a vector of inputs with many exchangeable examples of this vector
trial_no = T.matrix('trial_no')

rng = numpy.random.RandomState(1234)

# Architecture: input --> CNN (Pooling only on frequency) --> LSTM --> Poisson Regression on Neural data
layer0_input = x.reshape((1, 1, song_size - 1, 60))

layer0 = LeNetConvPoolLayer(rng,
                            input=layer0_input,
                            image_shape=(1, 1, song_size - 1, 60),
                            filter_shape=(filter_number_1, 1, 1, 3),
                            poolsize=(1, 3),
                            dim2=1)

layer1 = LeNetConvPoolLayer(rng,
                            input=layer0.output,
                            image_shape=(1, filter_number_1, song_size - 1,
                                         20),
                            filter_shape=(filter_number_2, filter_number_1, 1,
                                          2),
                            poolsize=(1, 2),
                            dim2=1)

lstm_input = layer1.output.reshape((song_size - 1, 10 * filter_number_2))

#May be worth splitting to different LSTMs...would require smaller filter size
# allocate symbolic variables for the data

index = T.lscalar()  # index to a [mini]batch
x = T.matrix(
    'x'
)  # the data is presented as a vector of inputs with many exchangeable examples of this vector
rng = numpy.random.RandomState(1234)

# Reshape matrix of rasterized images of shape (batch_size, 2000 * 60)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
layer0_input = x.reshape((minibatch_size, 1, 1000, 60))

layer0 = LeNetConvPoolLayer(rng,
                            input=layer0_input,
                            image_shape=(minibatch_size, 1, 1000, 60),
                            filter_shape=(layer0_filters, 1, 5, 5),
                            poolsize=(5, 1),
                            dim2=1)

layer1 = LeNetConvPoolLayer(rng,
                            input=layer0.output,
                            image_shape=(minibatch_size, layer0_filters, 200,
                                         60),
                            filter_shape=(layer1_filters, layer0_filters, 2,
                                          2),
                            poolsize=(2, 1),
                            dim2=1)

layer2 = LeNetConvPoolLayer(rng,
                            input=layer1.output,
                            image_shape=(minibatch_size, layer1_filters, 100,
print 'Building model...'

# allocate symbolic variables for the data

index = T.lscalar()  # index to a [mini]batch
x = T.matrix('x')  # the data is presented as a vector of inputs with many exchangeable examples of this vector
rng = numpy.random.RandomState(1234)
	
# Reshape matrix of rasterized images of shape (batch_size, 2000 * 60)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
layer0_input = x.reshape((minibatch_size, 1, 1000, 60))

layer0 = LeNetConvPoolLayer(
    rng,
    input=layer0_input,
    image_shape=(minibatch_size, 1, 1000, 60),
    filter_shape=( layer0_filters, 1, 5, 5),
    poolsize=(5, 1),
    dim2 = 1
)

layer1 = LeNetConvPoolLayer(
    rng,
    input=layer0.output,
    image_shape=(minibatch_size, layer0_filters, 200, 60),
    filter_shape=( layer1_filters, layer0_filters, 2, 2),
    poolsize=(2, 1),
    dim2 = 1
)

layer2 = LeNetConvPoolLayer(
    rng,
Beispiel #4
0
print "Building model..."

# allocate symbolic variables for the data

index = T.lscalar()  # index to a [mini]batch
x = T.matrix("x")  # the data is presented as a vector of inputs with many exchangeable examples of this vector
rng = numpy.random.RandomState(1234)

# Reshape matrix of rasterized images of shape (batch_size, 2000 * 60)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
layer0_input = x.reshape((minibatch_size, 1, 2000, 60))

layer0 = LeNetConvPoolLayer(
    rng,
    input=layer0_input,
    image_shape=(minibatch_size, 1, 2000, 60),
    filter_shape=(layer0_filters, 1, 3, 3),
    poolsize=(2, 2),
    dim2=1,
)

layer1 = LeNetConvPoolLayer(
    rng,
    input=layer0.output,
    image_shape=(minibatch_size, layer0_filters, 1000, 30),
    filter_shape=(layer1_filters, layer0_filters, 2, 2),
    poolsize=(2, 2),
    dim2=1,
)

layer2 = LeNetConvPoolLayer(
    rng,
T_mat0 = T.matrix('T_mat0')
T_mat1 = T.matrix('T_mat1')
T_mat2 = T.matrix('T_mat2')
T_mat3 = T.matrix('T_mat3')
T_mat4 = T.matrix('T_mat4')
rng = numpy.random.RandomState(1234)
	
# Reshape matrix of rasterized images of shape (batch_size, 2000 * 60)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
layer0_input = x.reshape((minibatch_size, 1, 1000, 60))

layer0 = LeNetConvPoolLayer(
    rng,
    input=layer0_input,
    image_shape=(minibatch_size, 1, 1000, 60),
    filter_shape=( layer0_filters, 1, 5, 5),
    poolsize=(5, 1),
    dim2 = 1
)

layer1 = LeNetConvPoolLayer(
    rng,
    input=layer0.output,
    image_shape=(minibatch_size, layer0_filters, 200, 60),
    filter_shape=( layer1_filters, layer0_filters, 2, 2),
    poolsize=(2, 1),
    dim2 = 1
)

layer2 = LeNetConvPoolLayer(
    rng,
x = shared(init, borrow=True)

T_mat0 = T.matrix('T_mat0')
T_mat1 = T.matrix('T_mat1')
T_mat2 = T.matrix('T_mat2')
T_mat3 = T.matrix('T_mat3')
T_mat4 = T.matrix('T_mat4')
rng = numpy.random.RandomState(1234)

# Reshape matrix of rasterized images of shape (batch_size, 2000 * 60)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
layer0_input = x.reshape((minibatch_size, 1, 1000, 60))

layer0 = LeNetConvPoolLayer(rng,
                            input=layer0_input,
                            image_shape=(minibatch_size, 1, 1000, 60),
                            filter_shape=(layer0_filters, 1, 5, 5),
                            poolsize=(5, 1),
                            dim2=1)

layer1 = LeNetConvPoolLayer(rng,
                            input=layer0.output,
                            image_shape=(minibatch_size, layer0_filters, 200,
                                         60),
                            filter_shape=(layer1_filters, layer0_filters, 2,
                                          2),
                            poolsize=(2, 1),
                            dim2=1)

layer2 = LeNetConvPoolLayer(rng,
                            input=layer1.output,
                            image_shape=(minibatch_size, layer1_filters, 100,
index = T.lscalar()  # index to a [mini]batch
x = T.matrix('x')  # the data is presented as a vector of inputs with many exchangeable examples of this vector
x = clip_gradient(x,1.0)     
y = T.matrix('y')  # the data is presented as a vector of inputs with many exchangeable examples of this vector

is_train = T.iscalar('is_train') # pseudo boolean for switching between training and prediction

rng = numpy.random.RandomState(1234)

# Architecture: input --> LSTM --> predict one-ahead
layer0_input = x.reshape((1, 1, song_size-1, 60))

layer0 = LeNetConvPoolLayer(
    rng,
    input=layer0_input,
    image_shape=(1, 1, song_size-1, 60),
    filter_shape=( filter_number, 1, 1, 10),
    poolsize=(1, 3),
    dim2 = 1
)

lstm_input = layer0.output.reshape((song_size-1,20 * filter_number))

#May be worth splitting to different LSTMs...would require smaller filter size
lstm_1 = LSTM(rng, lstm_input, n_in=20 * filter_number, n_out=n_hidden)

#output = LinearRegression(input=lstm_1.output, n_in=n_hidden, n_out=data_set_x.get_value(borrow=True).shape[1])

lstm_output = lstm_1.output.reshape((1,filter_number,song_size-1,20))

layer0.reverseConv(lstm_output,(1,filter_number,song_size-1,60),(1,filter_number,1,10)) #filter flipped on first two axes
index = T.lscalar()  # index to a [mini]batch
x = T.matrix('x')  # the data is presented as a vector of inputs with many exchangeable examples of this vector
x = clip_gradient(x,1.0)     
y = T.matrix('y')  # the data is presented as a vector of inputs with many exchangeable examples of this vector

is_train = T.iscalar('is_train') # pseudo boolean for switching between training and prediction

rng = numpy.random.RandomState(1234)

# Architecture: input --> LSTM --> predict one-ahead
layer0_input = x.reshape((1, 1, song_size-1, 60))

layer0 = LeNetConvPoolLayer(
    rng,
    input=layer0_input,
    image_shape=(1, 1, song_size-1, 60),
    filter_shape=( filter_number_1, 1, 1, 3),
    poolsize=(1, 3),
    dim2 = 1
)

layer1 = LeNetConvPoolLayer(
    rng,
    input=layer0.output,
    image_shape=(1, filter_number_1, song_size-1, 20),
    filter_shape=( filter_number_2, filter_number_1, 1, 2),
    poolsize=(1, 2),
    dim2 = 1
)

lstm_input = layer1.output.reshape((song_size-1,10 * filter_number_2))
index = T.lscalar()  # index to a [mini]batch
x = T.tensor3('x')  # the data is presented as a vector of inputs with many exchangeable examples of this vector
y = T.imatrix('y')  # the data is presented as a vector of inputs with many exchangeable examples of this vector
y_enc = T.imatrix('y_enc')
rng = numpy.random.RandomState(1234)
number = T.matrix('number')
variation = T.matrix('variation')

# Reshape matrix of rasterized images of shape (batch_size, 2000 * 60)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
layer0_input = x.reshape((minibatch_size, 1, 60, 60))

layer0 = LeNetConvPoolLayer(
    rng,
    input=layer0_input,
    image_shape=(minibatch_size, 1, 60, 60),
    filter_shape=( layer0_filters, 1, 3, 3),
    poolsize=(2, 2),
    dim2 = 1
)

layer1 = LeNetConvPoolLayer(
    rng,
    input=layer0.output,
    image_shape=(minibatch_size, layer0_filters, 30, 30),
    filter_shape=( layer1_filters, layer0_filters, 2, 2),
    poolsize=(2, 2),
    dim2 = 1
)

layer2 = LeNetConvPoolLayer(
    rng,
Beispiel #10
0
)  # the data is presented as a vector of inputs with many exchangeable examples of this vector
y = T.imatrix(
    'y'
)  # the data is presented as a vector of inputs with many exchangeable examples of this vector
y_enc = T.imatrix('y_enc')
rng = numpy.random.RandomState(1234)
number = T.matrix('number')
variation = T.matrix('variation')

# Reshape matrix of rasterized images of shape (batch_size, 2000 * 60)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
layer0_input = x.reshape((minibatch_size, 1, 60, 60))

layer0 = LeNetConvPoolLayer(rng,
                            input=layer0_input,
                            image_shape=(minibatch_size, 1, 60, 60),
                            filter_shape=(layer0_filters, 1, 3, 3),
                            poolsize=(2, 2),
                            dim2=1)

layer1 = LeNetConvPoolLayer(rng,
                            input=layer0.output,
                            image_shape=(minibatch_size, layer0_filters, 30,
                                         30),
                            filter_shape=(layer1_filters, layer0_filters, 2,
                                          2),
                            poolsize=(2, 2),
                            dim2=1)

layer2 = LeNetConvPoolLayer(rng,
                            input=layer1.output,
                            image_shape=(minibatch_size, layer1_filters, 15,
x = clip_gradient(x, 1.0)
y = T.matrix(
    'y'
)  # the data is presented as a vector of inputs with many exchangeable examples of this vector

is_train = T.iscalar(
    'is_train')  # pseudo boolean for switching between training and prediction

rng = numpy.random.RandomState(1234)

# Architecture: input --> LSTM --> predict one-ahead
layer0_input = x.reshape((1, 1, song_size - 1, 60))

layer0 = LeNetConvPoolLayer(rng,
                            input=layer0_input,
                            image_shape=(1, 1, song_size - 1, 60),
                            filter_shape=(filter_number_1, 1, 1, 3),
                            poolsize=(1, 3),
                            dim2=1)

layer1 = LeNetConvPoolLayer(rng,
                            input=layer0.output,
                            image_shape=(1, filter_number_1, song_size - 1,
                                         20),
                            filter_shape=(filter_number_2, filter_number_1, 1,
                                          2),
                            poolsize=(1, 2),
                            dim2=1)

lstm_input = layer1.output.reshape((song_size - 1, 10 * filter_number_2))

#May be worth splitting to different LSTMs...would require smaller filter size
index = T.lscalar()  # index to a [mini]batch
x = T.matrix('x')  # the data is presented as a vector of inputs with many exchangeable examples of this vector
y = T.imatrix('y')  # the data is presented as a vector of inputs with many exchangeable examples of this vector
y_enc = T.imatrix('y_enc')
rng = numpy.random.RandomState(1234)
number = T.matrix('number')
variation = T.matrix('variation')

# Reshape matrix of rasterized images of shape (batch_size, 2000 * 60)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
layer0_input = x.reshape((minibatch_size, 1, 28, 28))

layer0 = LeNetConvPoolLayer(
    rng,
    input=layer0_input,
    image_shape=(minibatch_size, 1, 28, 28),
    filter_shape=( layer0_filters, 1, 3, 3),
    poolsize=(2, 2),
    dim2 = 1
)

layer1 = LeNetConvPoolLayer(
    rng,
    input=layer0.output,
    image_shape=(minibatch_size, layer0_filters, 14, 14),
    filter_shape=( layer1_filters, layer0_filters, 2, 2),
    poolsize=(2, 2),
    dim2 = 1
)

layer2 = LeNetConvPoolLayer(
    rng,