Ejemplo n.º 1
0
def main():
    model = create_model()

    print 'Loading data'
    x_test, y_test = get_test_data()
    x_train, y_train = get_train_data()
    x_validation, y_validation = get_validation_data()

    print 'Starting training'
    try:
        model.fit(x_train,
                  y_train,
                  validation_data=(x_validation, y_validation),
                  batch_size=32,
                  epochs=30,
                  verbose=1)
    except KeyboardInterrupt:
        print 'Training Interrupted'

    print 'Evaluating'
    loss_and_accuracy = model.evaluate(x_test, y_test, batch_size=32)
    print "Loss: ", loss_and_accuracy[0]
    print "Accuracy: ", loss_and_accuracy[1]

    print 'Saving model'
    save_model(model, "../models/", loss_and_accuracy[1])
Ejemplo n.º 2
0
def main(_):
    # Create the model
    x = tf.placeholder(tf.float32, [None, IMG_SIZE])
    W = tf.Variable(tf.zeros([IMG_SIZE, 10]))
    b = tf.Variable(tf.zeros([10]))
    h = tf.matmul(x, W) + b

    y = tf.placeholder(tf.float32, [None, 10])

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=h))
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()
    # Train
    for _ in range(1000):
        batch_xs, batch_ys = data.get_train_data()
        rtn = sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys})

    # Test trained model
    correct_prediction = tf.equal(tf.argmax(h, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    test_xs, test_ys = data.get_test_data()
    print("Accuracy: ", sess.run(accuracy, feed_dict={x: test_xs, y: test_ys}))

    img = data.get_img(FLAGS.ask)
    print("The image is: ", sess.run(tf.argmax(h, 1), feed_dict={x: [img]}))
Ejemplo n.º 3
0
def train():
    # Load training and eval data
    data_train_data, data_train_labels = data.get_train_data(onehot=False)
    train_data = np.asarray(data_train_data, dtype=np.float32)
    train_labels = np.asarray(data_train_labels, dtype=np.int32)
    data_eval_data, data_eval_labels = data.get_test_data(onehot=False)
    eval_data = np.asarray(data_eval_data, dtype=np.float32)
    eval_labels = np.asarray(data_eval_labels, dtype=np.int32)

    # Create the Estimator
    mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn,
                                              model_dir=FLAGS.model)

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)
    #debug_hook = tf_debug.LocalCLIDebugHook()

    # Train the model
    train_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": train_data},
                                                        y=train_labels,
                                                        batch_size=10,
                                                        num_epochs=None,
                                                        shuffle=True)
    mnist_classifier.train(input_fn=train_input_fn,
                           steps=FLAGS.step,
                           hooks=[logging_hook])

    # Evaluate the model and print results
    eval_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": eval_data},
                                                       y=eval_labels,
                                                       num_epochs=1,
                                                       shuffle=False)
    eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
    print(eval_results)
Ejemplo n.º 4
0
    test_loss = 0
    with torch.no_grad():
        for data, (index, target_rating) in enumerate(data_loader):
            output = model(data)
            test_loss += pow(target_rating - output[index], 2)

    test_loss /= len(data_loader.dataset)
    test_loss = math.sqrt(test_loss)
    print('\nTest set: Average loss: {:.4f}'.format(test_loss))



# -----------------------------------------------------------------------------
# TRAINING LOOP
# -----------------------------------------------------------------------------

#model = 0
for epoch in range(1000):
    train_epoch(epoch + 1, model, dl.get_train_data(), optimizer, autorec_loss)
    # validate_epoch(model, data_loader)


'''
with torch.no_grad():
    for batch_idx, (target, input) in enumerate(dl.get_train_data()):
        #output = model(target)
        #print(target[0][input[0].item()-1])
        print(target)
        print(model(target))
'''
Ejemplo n.º 5
0
import data_loader
import options
import os
from tensorboardX import SummaryWriter

if __name__ == '__main__':

    writer = SummaryWriter()
    opt = options.Option()

    net = model.Net()

    #net.load_state_dict(torch.load('./network_model/model.para'))
    net.to(torch.device(opt.device_name))

    sample_data, sample_label = data_loader.get_train_data(0, 5)
    functions.cal_k_nearnest(sample_data)
    out = net(sample_data)

    with open('Output.off', 'w') as f:
        f.write('OFF\n')
        f.write(str(opt.pointnum) + ' 0 0\n')
        for i in range(opt.pointnum):
            f.write(
                str(out[0][i][0].item()) + ' ' + str(out[0][i][1].item()) +
                ' ' + str(out[0][i][2].item()) + '\n')

    with open('STD.off', 'w') as f:
        f.write('OFF\n')
        f.write(str(opt.pointnum) + ' 0 0\n')
        for i in range(opt.pointnum):
Ejemplo n.º 6
0
    net.to(torch.device(opt.device_name))
    optimizer = optim.Adam(net.parameters(),
                           opt.learning_rate,
                           betas=(0.9, 0.99))
    print(net)

    iter = 0
    for epoch in range(opt.total_epoch):
        print('Epoch:{}'.format(epoch))

        Epoch_loss = 0
        data_loader.shuffle_TrainingSet()

        for now in range(0, data_loader.get_TrainingSize(), opt.batch_size):

            sample_data, sample_label = data_loader.get_train_data(
                now, opt.batch_size)
            functions.cal_k_nearnest(sample_data)

            optimizer.zero_grad()
            out = net(sample_data)
            loss = net.lossfunc(out, sample_data)
            loss.backward()
            optimizer.step()

            Epoch_loss = Epoch_loss + loss.item()
            writer.add_scalar('Train/Loss', loss.item(), iter)
            iter = iter + 1
            print('{}/{}:loss: {}'.format(now, data_loader.get_TrainingSize(),
                                          loss.item()))

        Epoch_loss = Epoch_loss / (data_loader.get_TrainingSize() /