def main(_):
    assert sum([FLAGS.train, FLAGS.predict, FLAGS.eval]) == 1

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.log_dir):
        os.makedirs(FLAGS.log_dir)

    # config = tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.5))
    # config.gpu_options.allow_growth = True
    os.environ["CUDA_VISIBLE_DEVICES"] = '1'
    # with tf.Session(config=config) as sess:
    with tf.Session() as sess:
        dnn = DNN(sess, FLAGS)
        if FLAGS.train:
            # os.environ["CUDA_VISIBLE_DEVICES"] = '1'
            dnn.fit()
        elif FLAGS.predict:
            dnn.load_network()
            samples = np.array(
                pd.read_csv('dataset/gen_samples.csv', header=None))
            gen_y = samples[:, -1]
            predict = dnn.predict(np.delete(samples, -1, 1))
            # assert gen_y.shape[0] == predict.shape[0]
            # print 'Accuracy: {}%'.format((predict == gen_y).sum() / float(predict.shape[0]) * 100)
            samples = samples[gen_y != predict]
            pd.DataFrame(samples).to_csv('dataset/gen_samples.csv',
                                         index=False,
                                         header=None)
        elif FLAGS.eval:
            dnn.load_network()
            dnn.eval()
示例#2
0
        

boston = datasets.load_boston()
#boston = datasets.make_regression()
#data = boston[0]
#target = boston[1]
data = boston.data
target = boston.target
matrix = Preprocess.to_matrix(list(data))
matrix = Preprocess.scale(matrix)
matrix = list(matrix)
target = list(target)
layers = [13,7,1]

dnn = DNN(matrix, target, layers, hidden_layer="TanhLayer", final_layer="LinearLayer", compression_epochs=5, smoothing_epochs=0, bias=True)
full = dnn.fit()
print full
#preds = [dnn.predict(d)[0] for d in matrix]
preds = [full.activate(d)[0] for d in matrix]

print "mrse preds {0}".format(mrse(preds, target))
print "rmse preds {0}".format(rmse(preds, target))

#mean = avg(target)
#mean = [mean for i in range(len(target))]
#print "mrse mean {0}".format(mrse(mean, target))
#print "rmse mean {0}".format(rmse(mean, target))

#for i in range(10):
#    d = matrix[i]
#    t = target[i]
示例#3
0
from mnist_dnn_data import load_data
from dnn import DNN

(train_x, train_y), (test_x, test_y) = load_data()

num_input = train_x.shape[1]
num_hiddens = [100, 50]
num_output = train_y.shape[1]

model = DNN(num_input, num_hiddens, num_output)

history = model.fit(train_x, train_y, epochs=5, batch_size=100, validation_split=0.2)

performance_test = model.evaluate(test_x, test_y, batch_size=100)
print('Test Loss and Accuracy ->', performance_test)
示例#4
0
import numpy as np
from dnn import DNN
from keras.datasets import mnist
from keras.utils import np_utils, to_categorical

(x_train, y_train), (x_test, y_test) = mnist.load_data()
mnist_original_size = 28
x_train = x_train.reshape(x_train.shape[0], mnist_original_size, mnist_original_size, 1)
x_test = x_test.reshape(x_test.shape[0], mnist_original_size, mnist_original_size, 1)
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)


model1 = DNN(width=x_train.shape[1], height=x_train.shape[2], depth=x_train.shape[3], classes=10)
model1.compile(loss="categorical_crossentropy", optimizer='adadelta', metrics=["accuracy"])
model1.fit(x_train, y_train, epochs=1, verbose=1)

loss, accuracy = model1.evaluate(x_test, y_test, verbose=1)
print('\nloss: {:.2f}%, accuracy: {:.2f}%'.format(loss*100, accuracy*100))
    
m1 = model1.predict_proba(x_test)



model2 = DNN(width=x_train.shape[1], height=x_train.shape[2], depth=x_train.shape[3], classes=10)
model2.compile(loss="categorical_crossentropy", optimizer='adadelta', metrics=["accuracy"])
model2.fit(x_train, y_train, nb_epoch=1, verbose=1)

loss, accuracy = model2.evaluate(x_test, y_test, verbose=1)
print('\nloss: {:.2f}%, accuracy: {:.2f}%'.format(loss*100, accuracy*100))