コード例 #1
0
def main():
    global mnist, scores, sess, x, graph, btch_sz
    mnist = learn.datasets.mnist.read_data_sets('MNIST_data', one_hot=True)
    btch_sz = 5
    init()
    predict_all_test_data()
    y = graph.get_tensor_by_name("y:0")
    eval_op = evaluateModel(scores, y)
    test_feed_dict = {
        x: mnist.test.images,
        y: mnist.test.labels,
    }
    print(scores.shape)
    _netacc = sess.run(eval_op, feed_dict=test_feed_dict)
    print("Net Accuracy:", _netacc)
    print(scores[0:5, :], " predicted value = ",
          np.argmax(scores[0:5, :], axis=1), " actual value",
          np.argmax(mnist.test.labels[0:5, :], axis=1))
    run_logger = data_collector.current_run()
    run_logger.log("Accuracy", _netacc)

    print("Calling prepare schema")
    inputs = {
        "nparr": SampleDefinition(DataTypes.NUMPY,
                                  mnist.test.images[0:btch_sz])
    }
    outputs = {
        "probs_and_class_category_df":
        SampleDefinition(DataTypes.PANDAS, retdf)
    }
    amlo16n.generate_schema(inputs=inputs,
                            outputs=outputs,
                            filepath="outputs/mnistschema.json",
                            run_func=run)

    amlo16n.generate_main(user_file="mnistscore.py",
                          schema_file="outputs/schema.json",
                          main_file_name="outputs/main.py")
    print("End of prepare schema")
コード例 #2
0
# Please make sure scikit-learn is included the conda_dependencies.yml file.

import pickle
import sys
import os

from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from azureml.sdk import data_collector

# initialize the logger
run_logger = data_collector.current_run()

# create the outputs folder
os.makedirs('./outputs', exist_ok=True)

print('Python version: {}'.format(sys.version))
print()

# load Iris dataset
iris = load_iris()
print('Iris dataset shape: {}'.format(iris.data.shape))

# load features and labels
X, Y = iris.data, iris.target

# change regularization rate and you will likely get a different accuracy.
reg = 0.01
# load regularization rate from argument if present
if len(sys.argv) > 1:
    reg = float(sys.argv[1])
コード例 #3
0
def sessionrun(num_epochs):
    global mnist, serialized_tf_example, prediction_classes, values
    global tensor_info_x, tensor_info_y, sessinfo
    global train_x, train_y
    downloaddata()
    
    batch_size = 100

    x = tf.placeholder(tf.float32, [None, 784], name='x')
    y = tf.placeholder(tf.float32, [None, 10], name='y')
    phase_train = tf.placeholder(tf.bool, name='phase_train')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')
    batch_norm = tf.placeholder(tf.bool, name='batch_norm')

    pred_op = inference(x, keep_prob, batch_norm=True, phase_train=phase_train)
    loss_op = sploss(pred_op, y)
    ts_op = train(loss_op)
    eval_op = evaluateModel(pred_op, y)
    values, indices = tf.nn.top_k(pred_op, 10)

    loss_list = []
    acc_list = []
    merged = tf.summary.merge_all()

    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter('outputs/tflogs/train', sess.graph)
        test_writer = tf.summary.FileWriter('outputs/tflogs/test')

        sess.run(tf.global_variables_initializer())
        saver0 = tf.train.Saver()
        for epoch in range(num_epochs):
            avgloss = 0.
            avgacc = 0.

            total_batch = int(mnist.train.num_examples/batch_size)
            for i in range(total_batch):
                mx, my = mnist.train.next_batch(batch_size)
                #nx = 1-mx - this is for training images on whitebackground

                feed_dict = {x: mx, y:my, phase_train:True, batch_norm: True, keep_prob:0.4}
                _trsumm, _totloss, _trainstep, _predseriescc = sess.run(
                    [merged, loss_op, ts_op, pred_op],
                    feed_dict=feed_dict)
                avgloss += _totloss/total_batch
                #this is for training images on whitebackground
                #feed_dict = {x: nx, y: my, phase_train: True, batch_norm: True, keep_prob: 0.4}
                #_totloss, _trainstep, _predseriescc = sess.run(
                #    [loss_op, ts_op, pred_op],
                #    feed_dict=feed_dict)
                #avgloss += _totloss / total_batch
                loss_list.append(avgloss)
                if (i%10==0):
                    train_writer.add_summary(_trsumm, i)
            val_feed_dict = {
                x: mnist.validation.images,
                y: mnist.validation.labels,
                phase_train: False,
                batch_norm: True,
                keep_prob: 1
            }
            _valsumm,_acc = sess.run([merged, eval_op], feed_dict=val_feed_dict)
            avgacc = _acc
            acc_list.append(_acc)
            print("In Epoch ", epoch, " with loss ", avgloss, " and with accuracy ", avgacc)
            train_writer.add_summary(_trsumm, epoch*batch_size)
            test_writer.add_summary(_valsumm, epoch)

        test_feed_dict = {
            x: mnist.test.images,
            y: mnist.test.labels,
            phase_train: False,
            batch_norm: True,
            keep_prob: 1
        }

        
        _tstsumm,_netacc = sess.run([merged, eval_op], feed_dict= test_feed_dict)
        print("Net accuracy: ", _netacc)
        tensor_info_x = tf.saved_model.utils.build_tensor_info(x)
        tensor_info_y = tf.saved_model.utils.build_tensor_info(pred_op)
        run_logger = data_collector.current_run() 
        run_logger.log("Accuracy",_netacc)
        run_logger.log("Number of Epochs",num_epochs)
        run_logger.log("Data Size", mnist.train.num_examples)

        # export model
        export_path_base = 'outputs/mnist'
        print('export_path_base:', export_path_base)
        if os.path.exists(export_path_base):
           print("model path already exist, removing model path files and directory")
           shutil.rmtree(export_path_base)
        os.mkdir(export_path_base)
        saver0.save(sess, 'outputs/mnist/mnistmodel')
        print('Done exporting!')