コード例 #1
0
for i in range(0, 5):
    plt.plot(np.arange(i*40,i*40+40), X[i])
    
np.random.seed(0)  # For reproducability
#data = np.genfromtxt('closingAdjLog.csv', delimiter=',')
(X_train, y_train), (X_test, y_test) = train_test_split(_prices.values)  # retrieve data
print "Data loaded."

print(X_train.shape)
print(y_train.shape)

# Simplier model than in initial post
in_out_neurons = 2  
hidden_neurons = 2

model = Sequential()  
model.add(GRU(hidden_neurons, input_dim=in_out_neurons, return_sequences=True))
model.add(Dropout(0.2))
model.add(TimeDistributedDense(in_out_neurons))  
model.add(Activation("linear"))  
model.compile(loss="mean_squared_error", optimizer="rmsprop") 
print "Model compiled."

print_layer_shapes(model, input_shapes =(X_train.shape))

# and now train the model. 
model.fit(X_train, y_train, batch_size=30, nb_epoch=200, validation_data=(X_test, y_test))  
print_layer_shapes(model, input_shapes =(X_train.shape))
predicted = model.predict(X_test)  
print np.sqrt(((predicted - y_test) ** 2).mean(axis=0)).mean()  # Printing RMSE 
コード例 #2
0
# pickle.dump( conv.get_weights(), open( "result/w_"+filename+".p", "wb" ) )
# pickle.dump( Z_predict, open( "result/z_"+filename+".p", "wb" ) )
# pickle.dump( X_recon, open( "result/recon_"+filename+".p", "wb" ) )

# -- train encoder
# encoder.add(ZeroPadding1D(padding=(filter_length-1)/2))
encoder = Sequential()
enc = Convolution2D(nb_filter=nb_filter, nb_row=1, nb_col=filter_length,
					init='glorot_uniform', activation='tanh', weights=None,
					border_mode='full', subsample=(1, 1),
					W_regularizer=l2(0.01), b_regularizer=None, activity_regularizer=None,
					W_constraint=None, b_constraint=None,input_shape=(1, 1, 1000))
encoder.add(enc)

# -- testing dimension
layer_utils.print_layer_shapes(encoder,[(1,1,1,1000)]) 
adg = Adagrad(lr=0.01, epsilon=1e-6)
encoder.compile(loss='mean_squared_error', optimizer=adg)
encoder.fit(X_train, Z_predict, batch_size=10, nb_epoch=1)

Z_recon = encoder.predict(X_train);









コード例 #3
0
left.add(MaxPooling2D(pool_size=(2, 2)))
left.add(Flatten())
left.add(Dense(32 * 13 * 13, 50))
left.add(Activation('relu'))

right = Sequential()
right.add(Dense(784, 30))
right.add(Activation('relu'))

model = Sequential()
model.add(Merge([left, right], mode='concat'))

model.add(Dense(80, 10))
model.add(Activation('softmax'))

layer_utils.print_layer_shapes(model, [(1, 1, 28, 28), (1, 784)])

print('-- Graph model')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_input(name='input2', ndim=4)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')

graph.add_node(Convolution2D(32, 1, 3, 3), name='conv1', input='input2')
graph.add_node(Flatten(), name='flatten1', input='conv1')
graph.add_node(Dense(32 * 13 * 13, 10), name='dense4', input='flatten1')

graph.add_output(name='output1', inputs=['dense1', 'dense3'], merge_mode='sum')
graph.add_output(name='output2', inputs=['dense1', 'dense4'], merge_mode='concat')
コード例 #4
0
cnn.add_node(MaxPooling2D(pool_size=(2,2)), name='pool1', input='act1')
cnn.add_node(Dropout(0.25), name='dropout1', input='pool1')

cnn.add_node(Convolution2D(48, 3, 3, border_mode='valid', init=weights_init), 
             name='conv2', input='dropout1')
cnn.add_node(Activation(activation), name='act2', input='conv2')
cnn.add_node(MaxPooling2D(pool_size=(2,2)), name='pool2', input='act2')
cnn.add_node(Dropout(0.25), name='dropout2', input='pool2')
cnn.add_node(Flatten(), name='flatten', input='dropout2')
cnn.add_node(Dense(128, init=weights_init), name='dense1',
                      input='flatten')
cnn.add_node(Activation(activation), name='act3', input='dense1')
cnn.add_node(Dropout(0.1), name='dropout3', input='act3')
cnn.add_output(name='cnn_output', input='dropout3')

print_layer_shapes(cnn, input_shapes={'win':(42, 3, 21, 21)})

# -- pred
cnn_pretrain.add_node(cnn, name='cnn', input='extract_windows')
cnn_pretrain.add_node(Dense(nclasses, init=weights_init), name='dense2',
                      input='cnn')
cnn_pretrain.add_node(Activation('softmax'), name='softmax', input='dense2')
cnn_pretrain.add_output(name='output', input='softmax')

cnn_pretrain.compile('adam', {'output':'categorical_crossentropy'})

print_layer_shapes(cnn_pretrain, input_shapes={'ij':train_ij[:42].shape})
#print_layer_shapes(cnn_pretrain, input_shapes={'extract_windows':train_win[:42].shape})

keras_utils.print_model_params(cnn_pretrain)
コード例 #5
0
left.add(MaxPooling2D(poolsize=(2, 2)))
left.add(Flatten())
left.add(Dense(32 * 13 * 13, 50))
left.add(Activation("relu"))

right = Sequential()
right.add(Dense(784, 30))
right.add(Activation("relu"))

model = Sequential()
model.add(Merge([left, right], mode="concat"))

model.add(Dense(80, 10))
model.add(Activation("softmax"))

layer_utils.print_layer_shapes(model, [(1, 1, 28, 28), (1, 784)])

print("-- Graph model")
graph = Graph()
graph.add_input(name="input1", ndim=2)
graph.add_input(name="input2", ndim=4)
graph.add_node(Dense(32, 16), name="dense1", input="input1")
graph.add_node(Dense(16, 4), name="dense3", input="dense1")

graph.add_node(Convolution2D(32, 1, 3, 3), name="conv1", input="input2")
graph.add_node(Flatten(), name="flatten1", input="conv1")
graph.add_node(Dense(32 * 13 * 13, 10), name="dense4", input="flatten1")

graph.add_output(name="output1", inputs=["dense1", "dense3"], merge_mode="sum")
graph.add_output(name="output2", inputs=["dense1", "dense4"], merge_mode="concat")
コード例 #6
0
left.add(MaxPooling2D(pool_size=(2, 2)))
left.add(Flatten())
left.add(Dense(32 * 13 * 13, 50))
left.add(Activation('relu'))

right = Sequential()
right.add(Dense(784, 30))
right.add(Activation('relu'))

model = Sequential()
model.add(Merge([left, right], mode='concat'))

model.add(Dense(80, 10))
model.add(Activation('softmax'))

layer_utils.print_layer_shapes(model, [(1, 1, 28, 28), (1, 784)])

print('-- Graph model')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_input(name='input2', ndim=4)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')

graph.add_node(Convolution2D(32, 1, 3, 3), name='conv1', input='input2')
graph.add_node(Flatten(), name='flatten1', input='conv1')
graph.add_node(Dense(32 * 13 * 13, 10), name='dense4', input='flatten1')

graph.add_output(name='output1', inputs=['dense1', 'dense3'], merge_mode='sum')
graph.add_output(name='output2',
                 inputs=['dense1', 'dense4'],
コード例 #7
0
ファイル: main.py プロジェクト: yjpark1/kerasConvAutoEncoder
                            activation = "tanh",
                            activity_regularizer=activity_l1(0.001),
                            W_regularizer=l2(.001),
                            subsample_length = 1))
    decoder = Sequential()
    decoder.add(Convolution1D(input_length=1000,
                            input_dim = nb_filter,
                            nb_filter = 1,
                            filter_length = filter_length,
                            border_mode = "same",
                            activation = "linear",
                            W_regularizer=l2(.001),
                            subsample_length = 1))
    ae=Sequential()
    ae.add(AutoEncoder(encoder=encoder, decoder=decoder,output_reconstruction=False))
    layer_utils.print_layer_shapes(ae,[(1,1000,1)]) 
    print "....compile"
    # rms = RMSprop(lr=0.0000001)
    ae.compile(loss='mean_squared_error', optimizer='Adagrad')
    print "....fitting"
    ae.fit(X_train_tmp, X_train_tmp, batch_size=batch_size, nb_epoch=nb_epoch)
    trained_encoders.append(ae.layers[0].encoder)
    X_train_tmp = ae.predict(X_train_tmp)

# Fine-tuning
# print('Fine-tuning')
# model = Sequential()
# for encoder in trained_encoders:
#     model.add(encoder)
# model.add(Flatten());
# model.add(Dense(?, 2, activation='softmax'))
コード例 #8
0
    decoder = Sequential()
    decoder.add(
        Convolution1D(input_length=1000,
                      input_dim=nb_filter,
                      nb_filter=1,
                      filter_length=filter_length,
                      border_mode="same",
                      activation="linear",
                      W_regularizer=l2(.001),
                      subsample_length=1))
    ae = Sequential()
    ae.add(
        AutoEncoder(encoder=encoder,
                    decoder=decoder,
                    output_reconstruction=False))
    layer_utils.print_layer_shapes(ae, [(1, 1000, 1)])
    print "....compile"
    # rms = RMSprop(lr=0.0000001)
    ae.compile(loss='mean_squared_error', optimizer='Adagrad')
    print "....fitting"
    ae.fit(X_train_tmp, X_train_tmp, batch_size=batch_size, nb_epoch=nb_epoch)
    trained_encoders.append(ae.layers[0].encoder)
    X_train_tmp = ae.predict(X_train_tmp)

# Fine-tuning
# print('Fine-tuning')
# model = Sequential()
# for encoder in trained_encoders:
#     model.add(encoder)
# model.add(Flatten());
# model.add(Dense(?, 2, activation='softmax'))