Пример #1
0
    filter_shape=( layer3_filters, layer2_filters, 2, 2),
    poolsize=(2, 1),
    dim2 = 1
)

layer4 = LeNetConvPoolLayer(
    rng,
    input=layer3.output,
    image_shape=(minibatch_size, layer3_filters, 25, 60),
    filter_shape=( layer4_filters, layer3_filters, 2, 2),
    poolsize=(1, 1),
    dim2 = 1
)


layer4.reverseConv(layer4.output,(minibatch_size,layer4_filters,25,60),(layer3_filters,layer4_filters,2,2))

layer3.reverseConv(layer4.reverseOutput,(minibatch_size,layer3_filters,50,60),(layer2_filters,layer3_filters,2,2))

layer2.reverseConv(layer3.reverseOutput,(minibatch_size,layer2_filters,100,60),(layer1_filters,layer2_filters,2,2))

layer1.reverseConv(layer2.reverseOutput,(minibatch_size,layer1_filters,200,60),(layer0_filters,layer1_filters,2,2))

layer0.reverseConv(layer1.reverseOutput,(minibatch_size,layer0_filters,1000,60),(1,layer0_filters,5,5)) #filter flipped on first two axes

reconstructed = layer0.reverseOutput

cost = T.mean( (layer0_input-reconstructed) **2 )

###########################################################
###########################################################
Пример #2
0
    poolsize=(2, 1),
    dim2=1,
)

layer4 = LeNetConvPoolLayer(
    rng,
    input=layer3.output,
    image_shape=(minibatch_size, layer3_filters, 125, 15),
    filter_shape=(layer4_filters, layer3_filters, 2, 2),
    poolsize=(1, 1),
    dim2=1,
)

# need log_reg and decovariate layers

layer4.reverseConv(layer4.output, (1, layer4_filters, 125, 15), (layer3_filters, layer4_filters, 2, 2))

layer3.reverseConv(layer4.reverseOutput, (1, layer3_filters, 250, 15), (layer2_filters, layer3_filters, 2, 2))

layer2.reverseConv(layer3.reverseOutput, (1, layer2_filters, 500, 15), (layer1_filters, layer2_filters, 2, 2))

layer1.reverseConv(layer2.reverseOutput, (1, layer1_filters, 1000, 30), (layer0_filters, layer1_filters, 2, 2))

layer0.reverseConv(
    layer1.reverseOutput, (1, layer0_filters, 2000, 60), (1, layer0_filters, 3, 3)
)  # filter flipped on first two axes

reconstructed = layer0.reverseOutput

cost = T.mean((x - reconstructed) ** 2)
Пример #3
0
                                         60),
                            filter_shape=(layer3_filters, layer2_filters, 2,
                                          2),
                            poolsize=(2, 1),
                            dim2=1)

layer4 = LeNetConvPoolLayer(rng,
                            input=layer3.output,
                            image_shape=(minibatch_size, layer3_filters, 25,
                                         60),
                            filter_shape=(layer4_filters, layer3_filters, 2,
                                          2),
                            poolsize=(1, 1),
                            dim2=1)

layer4.reverseConv(layer4.output, (minibatch_size, layer4_filters, 25, 60),
                   (layer3_filters, layer4_filters, 2, 2))

layer3.reverseConv(layer4.reverseOutput,
                   (minibatch_size, layer3_filters, 50, 60),
                   (layer2_filters, layer3_filters, 2, 2))

layer2.reverseConv(layer3.reverseOutput,
                   (minibatch_size, layer2_filters, 100, 60),
                   (layer1_filters, layer2_filters, 2, 2))

layer1.reverseConv(layer2.reverseOutput,
                   (minibatch_size, layer1_filters, 200, 60),
                   (layer0_filters, layer1_filters, 2, 2))

layer0.reverseConv(
    layer1.reverseOutput, (minibatch_size, layer0_filters, 1000, 60),
Пример #4
0
                                          2),
                            poolsize=(2, 1),
                            dim2=1)

layer4 = LeNetConvPoolLayer(rng,
                            input=layer3.output,
                            image_shape=(minibatch_size, layer3_filters, 125,
                                         15),
                            filter_shape=(layer4_filters, layer3_filters, 2,
                                          2),
                            poolsize=(1, 1),
                            dim2=1)

#need log_reg and decovariate layers

layer4.reverseConv(layer4.output, (1, layer4_filters, 125, 15),
                   (layer3_filters, layer4_filters, 2, 2))

layer3.reverseConv(layer4.reverseOutput, (1, layer3_filters, 250, 15),
                   (layer2_filters, layer3_filters, 2, 2))

layer2.reverseConv(layer3.reverseOutput, (1, layer2_filters, 500, 15),
                   (layer1_filters, layer2_filters, 2, 2))

layer1.reverseConv(layer2.reverseOutput, (1, layer1_filters, 1000, 30),
                   (layer0_filters, layer1_filters, 2, 2))

layer0.reverseConv(layer1.reverseOutput, (1, layer0_filters, 2000, 60), (
    1,
    layer0_filters,
    3,
    3,
Пример #5
0
    image_shape=(1, 1, song_size-1, 60),
    filter_shape=( filter_number, 1, 1, 10),
    poolsize=(1, 3),
    dim2 = 1
)

lstm_input = layer0.output.reshape((song_size-1,20 * filter_number))

#May be worth splitting to different LSTMs...would require smaller filter size
lstm_1 = LSTM(rng, lstm_input, n_in=20 * filter_number, n_out=n_hidden)

#output = LinearRegression(input=lstm_1.output, n_in=n_hidden, n_out=data_set_x.get_value(borrow=True).shape[1])

lstm_output = lstm_1.output.reshape((1,filter_number,song_size-1,20))

layer0.reverseConv(lstm_output,(1,filter_number,song_size-1,60),(1,filter_number,1,10)) #filter flipped on first two axes

reconstructed = layer0.reverseOutput.reshape((song_size-1,60))

################################
# Objective function and GD
################################

print 'defining cost, parameters, and learning function...'

# the cost we minimize during training is the negative log likelihood of
# the model 
cost = T.mean((y-reconstructed) **2)

#Defining params
params = lstm_1.params + layer0.params
Пример #6
0
    filter_shape=( filter_number_2, filter_number_1, 1, 2),
    poolsize=(1, 2),
    dim2 = 1
)

lstm_input = layer1.output.reshape((song_size-1,10 * filter_number_2))

#May be worth splitting to different LSTMs...would require smaller filter size
lstm_1 = LSTM(rng, lstm_input, n_in=10 * filter_number_2, n_out=n_hidden)

#output = LinearRegression(input=lstm_1.output, n_in=n_hidden, n_out=data_set_x.get_value(borrow=True).shape[1])
dnn = HiddenLayer(rng, lstm_1.output, n_in=n_hidden, n_out= 10 * filter_number_2)

dnn_output = dnn.output.reshape((1,filter_number_2,song_size-1,10))

layer1.reverseConv(dnn_output,(1,filter_number_2,song_size-1,20),(filter_number_1,filter_number_2,1,2)) #filter flipped on first two axes

layer0.reverseConv(layer1.reverseOutput,(1,filter_number_1,song_size-1,60),(1,filter_number_1,1,3)) #filter flipped on first two axes

reconstructed = layer0.reverseOutput.reshape((song_size-1,60))

################################
# Objective function and GD
################################

print 'defining cost, parameters, and learning function...'

# the cost we minimize during training is the negative log likelihood of
# the model 
cost = T.mean((y-reconstructed) **2)
Пример #7
0
lin_reg = LinearRegressionRandom(reg_input,15*15*layer3_filters,2,True)

log_input = log_reg.p_y_given_x
lin_input = lin_reg.E_y_given_x

log_reg.reconstruct(log_input)
lin_reg.reconstruct(lin_input)

reconstructed_regressions = T.concatenate([log_reg.reconstructed_x,lin_reg.reconstructed_x],axis=1)

reverse_layer = LinearRegression(reconstructed_regressions, 2*15*15*layer3_filters, 15*15*layer3_filters,False)

reconstruct = reverse_layer.E_y_given_x.reshape((minibatch_size,layer3_filters,15,15))

layer3.reverseConv(reconstruct,(minibatch_size,layer3_filters,15,15),(layer2_filters,layer3_filters,2,2))

layer2.reverseConv(layer3.reverseOutput,(minibatch_size,layer2_filters,15,15),(layer1_filters,layer2_filters,2,2))

layer1.reverseConv(layer2.reverseOutput,(minibatch_size,layer1_filters,30,30),(layer0_filters,layer1_filters,2,2))

layer0.reverseConv(layer1.reverseOutput,(minibatch_size,layer0_filters,60,60),(1,layer0_filters,3,3,))

difference = (layer0_input-layer0.reverseOutput) ** 2

encoder_cost = T.mean( difference )

cross_entropy_cost = T.mean(log_reg.cross_entropy_binary(y))

y_hat_mean = T.mean(log_reg.p_y_given_x,axis=0)
Пример #8
0
lin_input = lin_reg.E_y_given_x

log_reg.reconstruct(log_input)
lin_reg.reconstruct(lin_input)

reconstructed_regressions = T.concatenate(
    [log_reg.reconstructed_x, lin_reg.reconstructed_x], axis=1)

reverse_layer = LinearRegression(reconstructed_regressions,
                                 2 * 15 * 15 * layer3_filters,
                                 15 * 15 * layer3_filters, False)

reconstruct = reverse_layer.E_y_given_x.reshape(
    (minibatch_size, layer3_filters, 15, 15))

layer3.reverseConv(reconstruct, (minibatch_size, layer3_filters, 15, 15),
                   (layer2_filters, layer3_filters, 2, 2))

layer2.reverseConv(layer3.reverseOutput,
                   (minibatch_size, layer2_filters, 15, 15),
                   (layer1_filters, layer2_filters, 2, 2))

layer1.reverseConv(layer2.reverseOutput,
                   (minibatch_size, layer1_filters, 30, 30),
                   (layer0_filters, layer1_filters, 2, 2))

layer0.reverseConv(layer1.reverseOutput,
                   (minibatch_size, layer0_filters, 60, 60), (
                       1,
                       layer0_filters,
                       3,
                       3,
Пример #9
0
lstm_input = layer1.output.reshape((song_size - 1, 10 * filter_number_2))

#May be worth splitting to different LSTMs...would require smaller filter size
lstm_1 = LSTM(rng, lstm_input, n_in=10 * filter_number_2, n_out=n_hidden)

#output = LinearRegression(input=lstm_1.output, n_in=n_hidden, n_out=data_set_x.get_value(borrow=True).shape[1])
dnn = HiddenLayer(rng,
                  lstm_1.output,
                  n_in=n_hidden,
                  n_out=10 * filter_number_2)

dnn_output = dnn.output.reshape((1, filter_number_2, song_size - 1, 10))

layer1.reverseConv(dnn_output, (1, filter_number_2, song_size - 1, 20),
                   (filter_number_1, filter_number_2, 1,
                    2))  #filter flipped on first two axes

layer0.reverseConv(
    layer1.reverseOutput, (1, filter_number_1, song_size - 1, 60),
    (1, filter_number_1, 1, 3))  #filter flipped on first two axes

reconstructed = layer0.reverseOutput.reshape((song_size - 1, 60))

################################
# Objective function and GD
################################

print 'defining cost, parameters, and learning function...'

# the cost we minimize during training is the negative log likelihood of
Пример #10
0
lin_reg = LinearRegressionRandom(reg_input,7*7*layer3_filters,2,True)

log_input = log_reg.p_y_given_x
lin_input = lin_reg.E_y_given_x

log_reg.reconstruct(log_input)
lin_reg.reconstruct(lin_input)

reconstructed_regressions = T.concatenate([log_reg.reconstructed_x,lin_reg.reconstructed_x],axis=1)

reverse_layer = LinearRegression(reconstructed_regressions, 2*7*7*layer3_filters, 7*7*layer3_filters,False)

reconstruct = reverse_layer.E_y_given_x.reshape((minibatch_size,layer3_filters,7,7))

layer3.reverseConv(reconstruct,(minibatch_size,layer3_filters,7,7),(layer2_filters,layer3_filters,2,2))

layer2.reverseConv(layer3.reverseOutput,(minibatch_size,layer2_filters,7,7),(layer1_filters,layer2_filters,2,2))

layer1.reverseConv(layer2.reverseOutput,(minibatch_size,layer1_filters,14,14),(layer0_filters,layer1_filters,2,2))

layer0.reverseConv(layer1.reverseOutput,(minibatch_size,layer0_filters,28,28),(1,layer0_filters,3,3,))

difference = (layer0_input-layer0.reverseOutput) ** 2

encoder_cost = T.mean( difference )

cross_entropy_cost = T.mean(log_reg.cross_entropy_binary(y))

y_hat_mean = T.mean(log_reg.p_y_given_x,axis=0)