Пример #1
0
              n_in=data_set_x.get_value(borrow=True).shape[1],
              n_out=n_hidden)

output = LogisticRegression(input=lstm_1.output,
                            n_in=n_hidden,
                            n_out=data_set_x.get_value(borrow=True).shape[1])

################################
# Objective function and GD
################################

print 'defining cost, parameters, and learning function...'

# the cost we minimize during training is the negative log likelihood of
# the model
cost = T.mean(output.cross_entropy_binary(y))

#Defining params
params = lstm_1.params + output.params

# updates from ADAM
updates = Adam(cost, params)

#######################
# Objective function
#######################

print 'compiling train....'

train_model = theano.function(
    inputs=[index],
Пример #2
0
#reconstructed_regressions = T.concatenate([log_reg.reconstructed_x,lin_reg.reconstructed_x],axis=1)
#
#reverse_layer = LinearRegression(reconstructed_regressions, 2*lstm_2_hidden, lstm_2_hidden,False)

lstm_3 = LSTM(rng, log_reg.reconstructed_x, lstm_2_hidden, lstm_1_hidden)

lstm_4 = LSTM(rng, lstm_3.output, lstm_1_hidden, 30)

init_reg.reconstruct(lstm_4.output)

difference = (ahead - init_reg.reconstructed_x)**2

encoder_cost = T.mean(difference)

cross_entropy_cost = T.mean(log_reg.cross_entropy_binary(y))

#y_hat_mean = T.mean(log_reg.p_y_given_x,axis=0)
#
#z_hat_mean = T.mean(lin_reg.E_y_given_x,axis=0)
#
#z_variance = lin_reg.E_y_given_x - z_hat_mean
#z_var = z_variance.reshape((60,2,1)) #must reshape for outer product
#
#y_variance = log_reg.p_y_given_x - y_hat_mean
#y_var = y_variance.reshape((60,1,10))
#
#product = T.batched_dot(z_var,y_var) #an outer product across batches
#
#product_mean_sqr = (T.mean(product,axis=0) **2)
#
#reconstructed_regressions = T.concatenate([log_reg.reconstructed_x,lin_reg.reconstructed_x],axis=1)
#
#reverse_layer = LinearRegression(reconstructed_regressions, 2*lstm_2_hidden, lstm_2_hidden,False)

lstm_3 = LSTM(rng,log_reg.reconstructed_x,lstm_2_hidden,lstm_1_hidden)

lstm_4 = LSTM(rng,lstm_3.output,lstm_1_hidden,30)

init_reg.reconstruct(lstm_4.output)

difference = (ahead-init_reg.reconstructed_x) ** 2

encoder_cost = T.mean( difference )

cross_entropy_cost = T.mean(log_reg.cross_entropy_binary(y))

#y_hat_mean = T.mean(log_reg.p_y_given_x,axis=0)
#
#z_hat_mean = T.mean(lin_reg.E_y_given_x,axis=0)
#
#z_variance = lin_reg.E_y_given_x - z_hat_mean
#z_var = z_variance.reshape((60,2,1)) #must reshape for outer product
#
#y_variance = log_reg.p_y_given_x - y_hat_mean
#y_var = y_variance.reshape((60,1,10))
#
#product = T.batched_dot(z_var,y_var) #an outer product across batches
#
#product_mean_sqr = (T.mean(product,axis=0) **2)
#
Пример #4
0
# Architecture: input --> LSTM --> predict one-ahead

lstm_1 = LSTM(rng, x, n_in=data_set_x.get_value(borrow=True).shape[1], n_out=n_hidden)

output = LogisticRegression(input=lstm_1.output, n_in=n_hidden, n_out=data_set_x.get_value(borrow=True).shape[1])


################################
# Objective function and GD
################################

print 'defining cost, parameters, and learning function...'

# the cost we minimize during training is the negative log likelihood of
# the model 
cost = T.mean(output.cross_entropy_binary(y))

#Defining params
params = lstm_1.params + output.params

# updates from ADAM
updates = Adam(cost, params)

#######################
# Objective function
#######################

print 'compiling train....'

train_model = theano.function(inputs=[index], outputs=cost,
        updates=updates,