rng = numpy.random.RandomState(1234) init_reg = LinearRegression(x, 60, 30,True) lstm_1 = LSTM(rng,init_reg.E_y_given_x,30,lstm_1_hidden) lstm_2 = LSTM(rng,lstm_1.output,lstm_1_hidden,lstm_2_hidden) reg_input = lstm_2.output #need log_reg and cross covariate layers log_reg = LogisticRegression(reg_input,lstm_2_hidden, 41) #lin_reg = LinearRegression(reg_input,lstm_2_hidden,1,True) log_reg.reconstruct(log_reg.p_y_given_x) #lin_reg.reconstruct(lin_reg.E_y_given_x) #reconstructed_regressions = T.concatenate([log_reg.reconstructed_x,lin_reg.reconstructed_x],axis=1) # #reverse_layer = LinearRegression(reconstructed_regressions, 2*lstm_2_hidden, lstm_2_hidden,False) lstm_3 = LSTM(rng,log_reg.reconstructed_x,lstm_2_hidden,lstm_1_hidden) lstm_4 = LSTM(rng,lstm_3.output,lstm_1_hidden,30) init_reg.reconstruct(lstm_4.output) difference = (ahead-init_reg.reconstructed_x) ** 2 encoder_cost = T.mean( difference )
image_shape=(minibatch_size, layer2_filters, 15, 15), filter_shape=( layer3_filters, layer2_filters, 2, 2), poolsize=(1, 1), dim2 = 1 ) reg_input = layer3.output.flatten(2) log_reg = LogisticRegression(reg_input,15*15*layer3_filters, 41) lin_reg = LinearRegressionRandom(reg_input,15*15*layer3_filters,2,True) log_input = log_reg.p_y_given_x lin_input = lin_reg.E_y_given_x log_reg.reconstruct(log_input) lin_reg.reconstruct(lin_input) reconstructed_regressions = T.concatenate([log_reg.reconstructed_x,lin_reg.reconstructed_x],axis=1) reverse_layer = LinearRegression(reconstructed_regressions, 2*15*15*layer3_filters, 15*15*layer3_filters,False) reconstruct = reverse_layer.E_y_given_x.reshape((minibatch_size,layer3_filters,15,15)) layer3.reverseConv(reconstruct,(minibatch_size,layer3_filters,15,15),(layer2_filters,layer3_filters,2,2)) layer2.reverseConv(layer3.reverseOutput,(minibatch_size,layer2_filters,15,15),(layer1_filters,layer2_filters,2,2)) layer1.reverseConv(layer2.reverseOutput,(minibatch_size,layer1_filters,30,30),(layer0_filters,layer1_filters,2,2)) layer0.reverseConv(layer1.reverseOutput,(minibatch_size,layer0_filters,60,60),(1,layer0_filters,3,3,))
rng = numpy.random.RandomState(1234) init_reg = LinearRegression(x, 1, 30, True) lstm_1 = LSTM(rng, init_reg.E_y_given_x, 30, lstm_1_hidden) lstm_2 = LSTM(rng, lstm_1.output, lstm_1_hidden, lstm_2_hidden) reg_input = lstm_2.output #need log_reg and cross covariate layers log_reg = LogisticRegression(reg_input, lstm_2_hidden, 41) #lin_reg = LinearRegression(reg_input,lstm_2_hidden,1,True) log_reg.reconstruct(log_reg.p_y_given_x) #lin_reg.reconstruct(lin_reg.E_y_given_x) #reconstructed_regressions = T.concatenate([log_reg.reconstructed_x,lin_reg.reconstructed_x],axis=1) # #reverse_layer = LinearRegression(reconstructed_regressions, 2*lstm_2_hidden, lstm_2_hidden,False) lstm_3 = LSTM(rng, log_reg.reconstructed_x, lstm_2_hidden, lstm_1_hidden) lstm_4 = LSTM(rng, lstm_3.output, lstm_1_hidden, 30) init_reg.reconstruct(lstm_4.output) difference = (ahead - init_reg.reconstructed_x)**2 encoder_cost = T.mean(difference)
15), filter_shape=(layer3_filters, layer2_filters, 2, 2), poolsize=(1, 1), dim2=1) reg_input = layer3.output.flatten(2) log_reg = LogisticRegression(reg_input, 15 * 15 * layer3_filters, 41) lin_reg = LinearRegressionRandom(reg_input, 15 * 15 * layer3_filters, 2, True) log_input = log_reg.p_y_given_x lin_input = lin_reg.E_y_given_x log_reg.reconstruct(log_input) lin_reg.reconstruct(lin_input) reconstructed_regressions = T.concatenate( [log_reg.reconstructed_x, lin_reg.reconstructed_x], axis=1) reverse_layer = LinearRegression(reconstructed_regressions, 2 * 15 * 15 * layer3_filters, 15 * 15 * layer3_filters, False) reconstruct = reverse_layer.E_y_given_x.reshape( (minibatch_size, layer3_filters, 15, 15)) layer3.reverseConv(reconstruct, (minibatch_size, layer3_filters, 15, 15), (layer2_filters, layer3_filters, 2, 2))