Пример #1
0
def predict(model_name, model_file_name, layers, input_features):
    # Restore the well-trained model
    if model_name == 'dbn':
        network = dbn.DBN(layers=layers, batch_size=100)
    else:
        return -1, 'Invalid model'

    with tf.Session() as sess:
        tf_saver = tf.train.Saver()
        tf_saver.restore(sess, model_file_path + model_file_name)
        outputs = network.get_output(sess, input_features)
    return 0, outputs
Пример #2
0
def test_main(testing_features):
    # Restore the well-trained model
    network = dbn.DBN(layers=layers, batch_size=1)

    with tf.Session() as sess:
        tf_saver = tf.train.Saver()
        tf_saver.restore(sess, model_file_path + model_file_name)
    
        # Get the ouput from test data
        # for i in testing_features:
        #     outputs = network.get_output(sess, [i])[0]
        #     print outputs
        outputs = network.get_output(sess, testing_features)
    return outputs
Пример #3
0
def predict(model_name, model_file_name, layers, input_features, target_size):
    img_width=input_features.shape[1]
    img_height=input_features.shape[2]
    input_features = input_features.reshape(len(input_features), len(input_features[0].flatten()))
    # Restore the well-trained model
    if model_name == 'dbn':
        layers  = map(int, layers.strip().split(','))
        layers  = [input_features.shape[1]] + layers + [target_size]
        network = dbn.DBN(layers=layers, batch_size=100)
    elif model_name == 'cnn':
        conv_layers, hid_layers = layers.strip().split('#')
        conv_layers = map(int, conv_layers.strip().split(','))
        hid_layers  = map(int, hid_layers.strip().split(','))
        network = cnn.CNN(img_width=img_width, img_height=img_height, conv_layers=conv_layers, hidden_layers=hid_layers, batch_size=128)
    else:
        return -1, 'Invalid model'
    
    with tf.Session() as sess:
        tf_saver = tf.train.Saver()
        tf_saver.restore(sess, model_file_path + model_file_name)
        outputs = network.get_output(sess, input_features)
    return 0, outputs
Пример #4
0
    print 'The resource path is not existed, create a new one.'
    os.makedirs(model_path)
if not os.path.exists(res_path):
    print 'The result path is not existed, create a new one.'
    os.makedirs(res_path)

# Training
# _, input_size  = training_features.shape
# _, output_size = training_labels.shape
print '------ [ %s ] ------' % arrow.now()
print 'Create an instance of the neural network.'
if model == 'dbn':
    layers = map(int, layers.strip().split(','))
    layers = [features.shape[1]] + layers + [labels.shape[1]]
    network = dbn.DBN(layers=layers,
                      iters=1000,
                      batch_size=100,
                      mu=LEARNING_RATE)  #.0001)
elif model == 'cnn':
    conv_layers, hid_layers = layers.strip().split('#')
    conv_layers = map(int, conv_layers.strip().split(','))
    hid_layers = map(int, hid_layers.strip().split(','))
    network = cnn.CNN(img_width=img_width,
                      img_height=img_height,
                      conv_layers=conv_layers,
                      hidden_layers=hid_layers,
                      learning_rate=LEARNING_RATE,
                      training_iters=20000,
                      batch_size=128,
                      display_step=10)

with tf.Session() as sess:
Пример #5
0
print 'The number of training data:\t%d' % len(features)
print 'A example of the feature data:\t%s' % features[0]
print 'A example of the label data:\t%s' % labels[0]

# Divide the raw data into the training part and testing part
start_test = end_train = int(
    float(len(features)) / float(TRAIN_TEST_RATIO + 1) * TRAIN_TEST_RATIO)
training_features = np.array(features[0:end_train])
training_labels = np.array(labels[0:end_train])
testing_features = np.array(features[start_test:len(features)])
testing_labels = np.array(labels[start_test:len(labels)])

# Check path
if not os.path.exists(model_path):
    os.makedirs(model_path)
if not os.path.exists(res_path):
    os.makedirs(res_path)

# Training
# _, input_size  = training_features.shape
# _, output_size = training_labels.shape
network = dbn.DBN(layers=layers, iters=50, batch_size=100, mu=.0005)
with tf.Session() as sess:
    tr, test = network.train(sess, training_features, training_labels,
                             testing_features, testing_labels)
    np.savetxt(res_path + 'training_result.txt', tr)
    np.savetxt(res_path + 'testing_result.txt', test)

    tf_saver = tf.train.Saver()
    tf_saver.save(sess, model_path + model_file_name)