예제 #1
0
def get_eval_metrics():
    return {
        'accuracy':
        tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy),
        'training/hptuning/metric':
        tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy),
    }
예제 #2
0
def train_and_eval(train_steps, log_dir, training_set, validation_set, testing_set, ):
    sparse_columns = [
        layers.sparse_column_with_keys(attribute, training_set[attribute].unique()) for attribute in FEATURE_ATTRIBUTES
    ]
    embedding_columns = [
        layers.embedding_column(column, dimension=8) for column in sparse_columns
    ]
    m = learn.DNNClassifier(
        hidden_units=[10, 50, ],
        feature_columns=embedding_columns,
        model_dir=log_dir,
        config=learn.RunConfig(save_checkpoints_secs=1, ),
    )
    validation_metrics = {
        "accuracy": learn.MetricSpec(metric_fn=metrics.streaming_accuracy, prediction_key="classes"),
        "precision": learn.MetricSpec(metric_fn=metrics.streaming_precision, prediction_key="classes"),
        "recall": learn.MetricSpec(metric_fn=metrics.streaming_recall, prediction_key="classes"),
    }
    monitors = [
        learn.monitors.ValidationMonitor(
            input_fn=lambda: input_fn(validation_set),
            every_n_steps=1000,
            metrics=validation_metrics,
            early_stopping_rounds=1,
        ),
    ]
    m.fit(
        input_fn=lambda: input_fn(training_set),
        steps=train_steps,
        monitors=monitors,
    )
    results = m.evaluate(input_fn=lambda: input_fn(testing_set), steps=1)
    for key in sorted(results):
        print("%s: %s" % (key, results[key]))
예제 #3
0
def get_eval_metrics():
    return {
        'rmse':
        tflearn.MetricSpec(
            metric_fn=metrics.streaming_root_mean_squared_error),
        'training/hptuning/metric':
        tflearn.MetricSpec(
            metric_fn=metrics.streaming_root_mean_squared_error),
    }
예제 #4
0
def main(unused_argv):
    classes = 1721
    # Load training and eval data
    training, t_labels, validation, v_labels = prep.data_from_base(
        'training_data')
    # t_labels = onehot_labels(t_labels, classes)
    # v_labels = onehot_labels(v_labels, classes)

    # Create the Estimator
    hiragana_classifier = learn.Estimator(model_fn=cnn_model_fn,
                                          model_dir="/tmp/kanji_cnn_test2")
    # Set up logging for predictions
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=100)
    # Train the model
    hiragana_classifier.fit(x=training,
                            y=t_labels,
                            batch_size=50,
                            steps=1000,
                            monitors=[logging_hook])
    # Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }
    # Evaluate the model and print results
    eval_results = hiragana_classifier.evaluate(x=validation,
                                                y=v_labels,
                                                metrics=metrics)
    print(eval_results)
예제 #5
0
def Training():
    print("[+] Welcome to the training program.")
    X, Y = LoadData()
    train_data, eval_data, train_labels, eval_labels = train_test_split(
        X, Y, test_size=0.2, random_state=42)
    # print(type(train_labels))
    global classifier
    # print("[+] till now working")
    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    # global logging_hook
    # print("[+] till now working")

    # Train the model
    classifier.fit(x=train_data, y=train_labels, batch_size=15, steps=10000)
    # print("[+] till now working")
    # Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }

    # Evaluate the model and print results
    eval_results = classifier.evaluate(x=eval_data,
                                       y=eval_labels,
                                       metrics=metrics)
    print(eval_results)
예제 #6
0
def main(unused_argv):
    global label_names
    log_dir = 'tmp/cnn_convnet_model' 
    label_names = unpickle('dataset/cifar-10-batches-py/batches.meta')['label_names']
    data, labels = load_cifar10_data()
    eval_data, eval_labels = load_cifar10_data(type=learn.ModeKeys.EVAL)
    
    classifier = learn.Estimator(
        model_fn=cnn_model, 
        model_dir=log_dir)
    logging_hook = tf.train.LoggingTensorHook(
        tensors={'probabilities': 'softmax'}, 
        every_n_iter=50)
    classifier.fit(
        x=data,
        y=labels,
        batch_size=100,
        steps=20000,
        #monitors=[logging_hook])
        monitors=None)
    
    metrics={
        'accuracy': learn.MetricSpec(metric_fn=tf.metrics.accuracy, prediction_key='classes'),
    }
    
    eval_results = classifier.evaluate(
        x=eval_data,
        y=eval_labels,
        metrics=metrics)
    
    print(eval_results)
def main(unused_argv):

  print("+++  running main +++")
  
  # define global variable for number of classes that we will fill
  # as per the number of people refurned from the dataset
  global n_classes
  
  # Slices out images of 64x64 from the dataset. Returns images of 34 different people
  lfw_people = fetch_lfw_people(min_faces_per_person=30, 
                                slice_ = (slice(61,189),slice(61,189)),
                                resize=0.5, color = True)
  X = lfw_people.images
  y = lfw_people.target
  
  # get count of number of possible labels - need to use this as
  # number of units for dense layer in call to tf.layers.dense and
  # for defining the one-hot matrix. Here the number of possible
  # labels is 34 based on the subset of LFW that we selected above. 
  target_names = lfw_people.target_names
  n_classes = target_names.shape[0]
  y = np.asarray(y, dtype=np.int32)
  
  # split into a training and testing set
  train_data, eval_data, train_labels, eval_labels = train_test_split(
X, y, test_size=0.25, random_state=42)
 
  print("+++ I split the data  +++")
  # Create the Estimator - changed here to relfect use of LFW not MNIST
  lfw_classifier = learn.Estimator(model_fn=cnn_model_fn, model_dir="/tmp/lfw_CNN_model")

  print("+++ I made the estimator  +++")
  # Set up logging for predictions
  # Log the values in the "Softmax" tensor with label "probabilities"
  tensors_to_log = {"probabilities": "softmax_tensor"}
  logging_hook = tf.train.LoggingTensorHook(
      tensors=tensors_to_log, every_n_iter=50)
  
  print("+++ Train the model +++")
  lfw_classifier.fit(
      x=train_data,
      y=train_labels,
      batch_size=64,
      steps=1000,
      monitors=[logging_hook])
  print("+++ Doing the metrics  +++")
  # Configure the accuracy metric for evaluation
  metrics = {
      "accuracy":
          learn.MetricSpec(
              metric_fn=tf.metrics.accuracy, prediction_key="classes"),
  }
      
  print("+++ printing the evaluation  +++")
  # Evaluate the model and print results
  eval_results = lfw_classifier.evaluate(
      x=eval_data, y=eval_labels, metrics=metrics)
  print("+++ eval results are")
  print(eval_results)
  print("++++++++++++++++++")
예제 #8
0
def main(unused_argv):
    # Load training and eval data
    star_ratings = []
    images = []
    star_rating = 0
    for root, dirs, files in os.walk("photos2/"):
        path = root.split(os.sep)
        print((len(path) - 1) * '---', os.path.basename(root))

        if os.path.basename(root) != '':
            star_rating = int(float(os.path.basename(root)[-3:]))
        for filename in files:
            star_ratings.append(star_rating)
            if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
                filepath = os.path.join(root, filename)
                image = ndimage.imread(filepath, mode="L")
                image_resized = misc.imresize(image, (54, 54))
                images.append(image_resized)


#     train_labels = np.random.randint(1, high=5, size=len(star_ratings))
    print(len(star_ratings))
    train_labels = np.array(star_ratings)
    train_data = np.asarray(images, dtype=np.float32)
    # Create the Estimator
    classifier = learn.Estimator(model_fn=cnn_model_fn,
                                 model_dir="/tmp/convnet_model")

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)

    # Train the model
    classifier.fit(x=train_data[:-100],
                   y=train_labels[:-100],
                   batch_size=1,
                   steps=200,
                   monitors=[logging_hook])

    print("hi")

    # Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }
    print("bye")

    # Evaluate the model and print results
    eval_results = classifier.evaluate(x=train_data[-100:],
                                       y=train_labels[-100:],
                                       metrics=metrics)

    predictions = classifier.predict(x=train_data[-100:])

    print(predictions)
    print(eval_results)
예제 #9
0
def main(unused_argv):

    train_data = op[0:80000, ]
    train_labels = np.array(s["stars"][0:80000])
    eval_data = op[80000:100000, ]
    eval_labels = np.array(s["stars"][80000:100000])

    # Create the Estimator
    mnist_classifier = learn.Estimator(
        model_fn=cnn_model_fn, model_dir="/tmp/ahahahahahaha_convnet_model")

    # Train the model
    mnist_classifier.fit(x=train_data,
                         y=train_labels,
                         batch_size=100,
                         steps=20000)

    # Configure the accuracy metric for evaluation
    metrics = {
        "rmse":
        learn.MetricSpec(metric_fn=tf.metrics.mean_squared_error,
                         prediction_key="ratings"),
    }

    # Evaluate the model and print results
    eval_results = mnist_classifier.evaluate(x=eval_data,
                                             y=eval_labels,
                                             metrics=metrics)
    print(eval_results)
예제 #10
0
def experiment_fn(output_dir):
    # run experiment

    #train_monitors = tf.contrib.learn.monitors.ValidationMonitor(test_set.target, test_set.target,every_n_steps=5)
    #logging_hook = tf.train.LoggingTensorHook({"accuracy" : tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy, prediction_key='class')}, every_n_iter=10)

    return tflearn.Experiment(
        tflearn.Estimator(model_fn=cnn_model,
                          model_dir=output_dir,
                          config=tf.contrib.learn.RunConfig(
                              save_checkpoints_steps=CHECKPOINT_STEPS,
                              save_checkpoints_secs=None,
                              save_summary_steps=SUMMARY_STEPS)),
        train_input_fn=get_train(),
        eval_input_fn=get_valid(),
        eval_metrics={
            'acc':
            tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy,
                               prediction_key='class')
        },
        checkpoint_and_export=True,
        train_monitors=None,
        export_strategies=[
            saved_model_export_utils.make_export_strategy(
                serving_input_fn,
                default_output_alternative_key=None,
                exports_to_keep=1)
        ],
        train_steps=TRAIN_STEPS,
        eval_steps=EVAL_STEPS)
예제 #11
0
def main(unused_argv):
    print("New2")
    # Load training and eval data
    mnist = learn.datasets.load_dataset("mnist")
    train_data = mnist.train.images  # Returns np.array
    train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
    eval_data = mnist.test.images  # Returns np.array
    eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

    # Create the Estimator
    mnist_classifier = learn.Estimator(model_fn=cnn_model_fn,
                                       model_dir="/tmp/mnist_convnet_model")

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)
    # Train the model
    mnist_classifier.fit(x=train_data,
                         y=train_labels,
                         batch_size=100,
                         steps=20000,
                         monitors=[logging_hook])
    # Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }
    # Evaluate the model and print results
    eval_results = mnist_classifier.evaluate(x=eval_data,
                                             y=eval_labels,
                                             metrics=metrics)
    print(eval_results)
예제 #12
0
def main(unused_argv):
    mnist = learn.datasets.load_dataset("mnist")
    train_data = mnist.train.images
    train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
    eval_data = mnist.test.images
    eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

    mnist_classifier = learn.Estimator(model_fn=cnn_model_fn,
                                       model_dir="/tmp/mnist_convnet_model")

    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)

    mnist_classifier.fit(x=train_data,
                         y=train_labels,
                         batch_size=100,
                         steps=20000,
                         monitors=[logging_hook])

    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }

    eval_results = mnist_classifer.evaluate(x=eval_data,
                                            y=eval_labels,
                                            metrics=metrics)
    print(eval_results)
def main(unused_argv):
    ts = time.time()
    mnist = learn.datasets.load_dataset("mnist")
    train_data = mnist.train.images
    train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
    eval_data = mnist.test.images
    eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
    mnist_classifier = learn.Estimator(
        model_fn=cnn_model_fn,
        model_dir=os.popen('pwd').read().replace('\n', '') + "/MNIST_Model")
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)
    mnist_classifier.fit(x=train_data,
                         y=train_labels,
                         batch_size=600,
                         steps=8000,
                         monitors=[logging_hook])
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }
    eval_results = mnist_classifier.evaluate(x=eval_data,
                                             y=eval_labels,
                                             metrics=metrics)
    print(eval_results)
    print("CONVNET TOOK {0}s TO TRAIN".format(time.time() - ts))
예제 #14
0
def run_learn(pixels, label):
    cnn_dir = os.environ['VIRTUAL_ENV'] + "/mnist_convnet_model"
    mnist_dataset = mnist.load_mnist(train_dir=os.environ['VIRTUAL_ENV'] +
                                     '/MNIST-data')

    train_data = concat(mnist_dataset.train.images, pixels)
    train_labels = concat(mnist_dataset.train.labels, label)

    eval_data = concat(mnist_dataset.test.images, pixels)
    eval_labels = concat(mnist_dataset.test.labels, label)

    estimator = learn.Estimator(model_fn=cnn_model_fn, model_dir=cnn_dir)
    dataset_classifier = learn.SKCompat(estimator)

    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=500)

    dataset_classifier.fit(x=train_data,
                           y=train_labels,
                           batch_size=128,
                           steps=5000,
                           monitors=[logging_hook])

    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes")
    }

    eval_results = dataset_classifier.score(x=eval_data,
                                            y=eval_labels,
                                            metrics=metrics)
    return eval_results
예제 #15
0
def main(unused_argv):
    # Load training and eval data
    prepper = prep_hiragana.prepper('hiragana', 'hiragana.txt')
    train_data = prepper.train_images()  # Returns np.array
    train_labels = np.asarray(prepper.train_labels(), dtype=np.int32)

    eval_data = prepper.validate_images()  # Returns np.array
    eval_labels = np.asarray(prepper.validate_labels(), dtype=np.int32)
    # Create the Estimator
    hiragana_classifier = learn.Estimator(
        model_fn=cnn_model_fn, model_dir="/tmp/hiragana_convnet_model2")
    # Set up logging for predictions
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)
    # Train the model
    hiragana_classifier.fit(x=train_data,
                            y=train_labels,
                            batch_size=50,
                            steps=1000,
                            monitors=[logging_hook])
    # Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }
    # Evaluate the model and print results
    eval_results = hiragana_classifier.evaluate(x=eval_data,
                                                y=eval_labels,
                                                metrics=metrics)
    print(eval_results)
예제 #16
0
def train():
    train_data, train_labels, eval_data, eval_labels = load_dataset(
        '/Users/sunary/Downloads/train')

    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)

    dogvscat_classifier = learn.SKCompat(
        learn.Estimator(model_fn=cnn_model_fn,
                        model_dir='/tmp/dogvscat_convnet_model'))
    dogvscat_classifier.fit(x=train_data,
                            y=train_labels,
                            batch_size=50,
                            steps=1000,
                            monitors=[logging_hook])

    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes")
    }
    eval_results = dogvscat_classifier.score(x=eval_data,
                                             y=eval_labels,
                                             metrics=metrics)
    print(eval_results)
예제 #17
0
def train():
    mnist = learn.datasets.load_dataset('mnist')

    train_data = mnist.train.images
    train_labels = np.asarray(mnist.train.labels, dtype=np.int32)

    eval_data = mnist.test.images
    eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

    mnist_classifier = learn.Estimator(model_fn=cnn_model_fn,
                                       model_dir='/tmp/mnist_convnet_models')
    mnist_classifier.fit(x=train_data,
                         y=train_labels,
                         batch_size=50,
                         steps=10000)

    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes")
    }
    eval_results = mnist_classifier.evaluate(x=eval_data,
                                             y=eval_labels,
                                             metrics=metrics)
    print(eval_results)
예제 #18
0
def main(unused_argv):
    # Input data
    ROOT_PATH = "/Users/miaoyan/PycharmProjects/hori_verti_line_recognization/short_line/"
    train_data_dir = os.path.join(ROOT_PATH, "train")
    train_data_index, train_labels = load_data(train_data_dir) #read vertical-01 first, then read horizontal-10

    train_data = np.asarray(train_data_index)
    train_data = 1 - train_data
    train_data = train_data.astype('float32')

    train_labels = np.asarray(train_labels, dtype=np.float32) #01-vertical 10-horizontal

    # create variable for horizontal and vertical filters
    variable_line = tf.Variable(tf.random_uniform(shape=[3, 3, 1, 1], minval=1, maxval=2, dtype=tf.float32))
    sess.run(variable_line.initializer)

    # Use cnn_training_model
    y = cnn_model_line(train_data, variable_line) #output of CNNs

    # initialize and reshape the original labels
    y_= tf.placeholder(tf.float32, [None, 1]) #Initialization of the labels
    y_ = tf.reshape(train_labels, [-1]) # 01 is vertical line, 10 is horizontal line

    # Define loss and optimizer
    loss = None
    train_op =  None

    # mean square error
    onehot_labels_prediction = tf.one_hot(indices=y, depth=2)
    onehot_labels_label = tf.one_hot(indices=tf.cast(y_, dtype=tf.int32), depth=2)
    print(sess.run(onehot_labels_prediction))
    print(sess.run(onehot_labels_label))
    mse_loss = tf.losses.mean_squared_error(labels=onehot_labels_label, predictions=onehot_labels_prediction)
    print(sess.run(mse_loss))

    # Train the model
    # Create the Estimator
    line_classifier = learn.Estimator(model_fn=cnn_model_line,
        model_dir="/Users/miaoyan/PycharmProjects/hori_verti_line_recognization")

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    tensors_to_log = {"probabilities": "softmax_tensor"}

    # Prints the given tensors once every N local steps
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=2)

    # Fit the data for training the model
    line_classifier.fit(x=train_data, y=train_labels, batch_size=20, steps=1000, monitors=[logging_hook])

    # Configure the accuracy metric for evaluation
    metrics = {"accuracy": learn.MetricSpec(metric_fn=tf.metrics.accuracy, prediction_key="classes"), }

    train_op = tf.contrib.layers.optimize_loss(
        loss=mse_loss,
        global_step=tf.contrib.framework.get_global_step(),
        learning_rate=0.001,
        optimizer='SGD') # use stochastic gradient descent as optimization algorithm
    print(sess.run(variable_line))
def main(unused_argv):

    # load training and eval datasets.
    ROOT_PATH = "/Users/miaoyan/PycharmProjects/line"
    train_data_dir = os.path.join(ROOT_PATH, "Training")
    test_data_dir = os.path.join(ROOT_PATH, "Testing")

    train_data_index, train_labels = load_data(train_data_dir)
    train_data_index = [
        transform.resize(image, (28, 28)) for image in train_data_index
    ]
    train_data_index = [image.astype(np.float32) for image in train_data_index]
    train_labels = np.asarray(train_labels, dtype=np.int32)
    train_data = np.asarray(train_data_index)

    eval_data_index, eval_labels = load_data(test_data_dir)

    eval_labels = np.asarray(eval_labels, dtype=np.int32)

    eval_data_index = [
        transform.resize(image, (28, 28)) for image in eval_data_index
    ]
    eval_data_index = [image.astype(np.float32) for image in eval_data_index]
    eval_data = np.asarray(eval_data_index)

    ###############################################################################
    # Create the Estimator
    line_classifier = learn.Estimator(
        model_fn=cnn_model_fn,
        model_dir="/Users/miaoyan/PycharmProjects/hori_verti_line_recognization"
    )

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    tensors_to_log = {"probabilities": "softmax_tensor"}

    # Prints the given tensors once every N local steps
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=2)

    # Fit the data for training the model
    line_classifier.fit(x=train_data,
                        y=train_labels,
                        batch_size=20,
                        steps=1000,
                        monitors=[logging_hook])

    # Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }

    # Evaluate the model and print results
    eval_results = line_classifier.evaluate(x=eval_data,
                                            y=eval_labels,
                                            metrics=metrics)
    print(eval_results)
예제 #20
0
def main(unused_argv):
    # Load the training data

    #7392 in training data
    mnist = learn.datasets.load_dataset("mnist")

    X = mnist.train.images  # Returns np.array
    Y = np.asarray(mnist.train.labels, dtype=np.int32)
    noTrain = int(0.9 * len(X))

    trainData = X[0:noTrain, ...]
    trainLabels = Y[0:noTrain, ...]
    evalData = X[noTrain:len(X) - 1, ...]
    evalLabels = Y[noTrain:len(Y) - 1, ...]

    testData = mnist.test.images  # Returns np.array
    testLabels = np.asarray(mnist.test.labels, dtype=np.int32)

    print("labels shape (training): ", np.shape(trainLabels),
          " (evaluation): ", np.shape(evalLabels))
    print("mean value for evaluation labels (coin-flip score): ",
          np.mean(evalLabels))

    #    print(trainData[0:20])

    print("labels shape (training): ", np.shape(trainLabels),
          " (evaluation): ", np.shape(evalLabels))
    print("mean value for evaluation labels (coin-flip score): ",
          np.mean(evalLabels))
    sTime = time.time()
    # Create estimator
    simpleClassifier = learn.Estimator(model_fn=simpleClass,
                                       model_dir="./models/simpleClassifier")
    # set up logging
    tensors_to_log = {"probabilities": "softmaxTensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=500)

    for ck in range(0, maxSteps, dispIt):
        # Train Model
        simpleClassifier.fit(x=trainData,
                             y=trainLabels,
                             batch_size=batchSize,
                             steps=dispIt,
                             monitors=[logging_hook])

        # Metrics for evaluation
        metrics = {
            "accuracy":
            learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                             prediction_key="classes")
        }

        print("elapsed time: ", time.time() - sTime)
        # Evaluate model and display results
        evalResults = simpleClassifier.evaluate(x=evalData,
                                                y=evalLabels,
                                                metrics=metrics)
        print("Evaluation Results epoch %i" % (ck * dispIt), evalResults)
예제 #21
0
  def _experiment_fn(output_dir):
    return tflearn.Experiment(
        get_model(output_dir, nbuckets, hidden_units, learning_rate),
        train_input_fn=read_dataset(traindata, mode=tf.contrib.learn.ModeKeys.TRAIN, num_training_epochs=num_training_epochs, batch_size=batch_size),
        eval_input_fn=read_dataset(evaldata),
        export_strategies=[saved_model_export_utils.make_export_strategy(
            serving_input_fn,
            default_output_alternative_key=None,
            exports_to_keep=1
        )],
        eval_metrics = {
	    'rmse' : tflearn.MetricSpec(metric_fn=my_rmse, prediction_key='probabilities'),
            'training/hptuning/metric' : tflearn.MetricSpec(metric_fn=my_rmse, prediction_key='probabilities')
        },
        min_eval_frequency = 100,
        **args
    )
예제 #22
0
 def test(self):
     metrics = {
         "accuracy":
         learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                          prediction_key="classes")
     }
     print self.mnist_estimator.evaluate(x=self.mnist.test.images,
                                         y=self.mnist.test.labels,
                                         steps=1,
                                         metrics=metrics)
예제 #23
0
def get_eval_metrics():
  """
  This function returns which metrics would like you see for evaluating
  These links would be help
  https://www.tensorflow.org/api_docs/python/tf/contrib/learn/MetricSpec
  https://www.tensorflow.org/api_docs/python/tf/metrics/accuracy
  """
  return {"accuracy": learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                                     prediction_key="classes")
         }
예제 #24
0
def experiment_fn(output_dir):
    return tflearn.Experiment(
        tflearn.LinearRegressor(feature_columns=feature_cols,
                                model_dir=output_dir),
        train_input_fn=get_train(),
        eval_input_fn=get_valid(),
        eval_metrics={
            'rmse':
            tflearn.MetricSpec(
                metric_fn=metrics.streaming_root_mean_squared_error)
        })
예제 #25
0
def main(unused_argv):
    # Load the training data

    #7392 in training data
    mnist = learn.datasets.load_dataset("mnist")
    trainData = mnist.train.images  # Returns np.array
    trainLabels = np.asarray(mnist.train.labels, dtype=np.int32)
    evalData = mnist.test.images  # Returns np.array
    evalLabels = np.asarray(mnist.test.labels, dtype=np.int32)

    print("labels shape (training): ", np.shape(trainLabels),
          " (evaluation): ", np.shape(evalLabels))
    print("mean value for evaluation labels (coin-flip score): ",
          np.mean(evalLabels))

    print(trainData[0:20])

    print("labels shape (training): ", np.shape(trainLabels),
          " (evaluation): ", np.shape(evalLabels))
    print("mean value for evaluation labels (coin-flip score): ",
          np.mean(evalLabels))
    sTime = time.time()
    # Create estimator
    MTClassifier = learn.Estimator(model_fn=cNNMTModel,
                                   model_dir="./MNIST/MTConvNetModel")
    # set up logging
    tensors_to_log = {"probabilities": "softmaxTensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=100)

    # Train Model
    MTClassifier.fit(x=trainData,
                     y=trainLabels,
                     batch_size=batchSize,
                     steps=10000,
                     monitors=[logging_hook])

    # Metrics for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes")
    }
    print(np.mean(evalLabels))
    print("elapsed time: ", time.time() - sTime)
    # Evaluate model and display results
    evalResults = MTClassifier.evaluate(x=evalData,
                                        y=evalLabels,
                                        metrics=metrics)
    print("wobobobob", evalResults)
    print(np.mean(trainData))
    print(np.mean(evalData))
예제 #26
0
def experiment_fn(output_dir):
    PADWORD = '[PAD]'
    MAX_DOCUMENT_LENGTH = 3

    titles = [
        'Biodegradable Bags Cause Outrage in Italy',
        'Tom Brady denies key points of ESPN Patriots article',
        'Aldi to open first Kingwood store', PADWORD
    ]
    labels = ['International', 'Sport', 'Business']

    TARGETS = tf.constant(["International", "Sport", "Business"])

    words = tf.sparse_tensor_to_dense(tf.string_split(titles),
                                      default_value=PADWORD)

    vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
        MAX_DOCUMENT_LENGTH)
    vocab_processor.fit(titles)

    outfilename = "/Users/eliapalme/Newsriver/Newsriver-classifier/training/vocabfile.vcb"

    vocab_processor.save(outfilename)

    nwords = len(vocab_processor.vocabulary_)

    ## Transform the documents using the vocabulary.
    XX = np.array(list(vocab_processor.fit_transform(titles)))

    # make targets numeric
    table = tf.contrib.lookup.index_table_from_tensor(mapping=TARGETS,
                                                      num_oov_buckets=1,
                                                      default_value=-1)
    features = tf.constant(["International", "Sport", "Business"])
    targetX = table.lookup(features)

    return tflearn.Experiment(
        tflearn.Estimator(model_fn=cnn_model, model_dir=output_dir),
        train_input_fn=XX,
        eval_input_fn=targetX,
        eval_metrics={
            'acc':
            tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy,
                               prediction_key='class')
        },
        export_strategies=[
            saved_model_export_utils.make_export_strategy(
                serving_input_fn,
                default_output_alternative_key=None,
                exports_to_keep=1)
        ],
        train_steps=TRAIN_STEPS)
예제 #27
0
def main(unused_argv):
    # Load training and eval data
    # mnist = learn.datasets.load_dataset("mnist")
    # train_data = mnist.train.images  # Returns np.array
    # train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
    # eval_data = mnist.test.images  # Returns np.array
    # eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

    feature, labels = di.import_data('KATAKANA')

    feature_arr = np.asarray(feature, dtype = np.float32)
    labels_arr = np.asarray(labels)
    assert len(feature_arr) == len(labels_arr)
    p = np.random.permutation(len(feature_arr))
    feature_arr = feature_arr[p]
    labels_arr = labels_arr[p]
    labels_one_hot = pd.get_dummies(labels_arr).values
    # labels_arr = labels_arr - 9250.0
    #labels_arr = labels_arr - 166
    train_data, eval_data, train_labels, eval_labels = train_test_split(
        feature_arr, labels_one_hot, test_size=0.10, random_state=42)

    # Create the Estimator
    mnist_classifier = learn.Estimator(
        model_fn=cnn_model_fn, model_dir="/tmp/japcnnn_cnn_model_kanji_god_4")

    # Set up logging for predictions
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(
        tensors=tensors_to_log, every_n_iter=100)

    # Train the model
    mnist_classifier.fit(
        x=train_data,
        y=train_labels,
        batch_size=100,
        steps=100,
        monitors=[logging_hook])

# Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
            learn.MetricSpec(
                metric_fn=tf.metrics.accuracy, prediction_key="classes", label_key="labels"),
    }

    # Evaluate the model and print results
    eval_results = mnist_classifier.evaluate(
        x=eval_data, y=eval_labels, batch_size=100)
    print(eval_results)
예제 #28
0
def main(unused_argv):
  # Load training and eval data
  flags.DEFINE_string('name', 'main', 'main')

  train_data, train_labels = create_roofs_arrays(LEARN)
  eval_data, eval_labels = create_roofs_arrays(TEST)

  print(train_data.shape)

  metrics = {
      "accuracy":
          learn.MetricSpec(
              metric_fn=tf.metrics.accuracy, prediction_key="classes"),
  }


  validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
      eval_data,
      eval_labels,
      every_n_steps=2,
      metrics = metrics)

  # Create the Estimator
  classifier = learn.Estimator(
      model_fn=cnn_model_fn,
      model_dir="model4")

  # Set up logging for predictions
  tensors_to_log = {"probabilities": "softmax_tensor"}
  logging_hook = tf.train.LoggingTensorHook(
      tensors=tensors_to_log, every_n_iter=100)



  # Train the model
  classifier.fit(
      x=train_data,
      y=train_labels,
      batch_size=100,
      steps=1000,
      monitors=[ validation_monitor])

  # Configure the accuracy metric for evaluation

  # Evaluate the model and print results
  eval_results = classifier.evaluate(
      x=eval_data, y=eval_labels, metrics=metrics)
  print(eval_results)
예제 #29
0
def main(unused_argv):
    # Load training and eval data
    data = np.loadtxt('new_class_bouding_1/feature.txt')
    label_data = np.loadtxt('new_class_bouding_1/label.txt')

    train_data = data[:50]
    train_data = np.array(train_data)

    train_labels = label_data[:50]
    train_labels = np.array(train_labels)

    eval_data = data[50:]
    eval_data = np.array(eval_data)

    eval_labels = label_data[50:]
    eval_labels = np.array(eval_labels)

    print(train_data.shape)
    print(train_labels.shape)
    # Create the Estimator
    mnist_classifier = learn.Estimator(model_fn=boat_model_fn,
                                       model_dir="boat_models")

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)

    # Train the model
    mnist_classifier.fit(x=train_data,
                         y=train_labels,
                         batch_size=50,
                         steps=10000,
                         monitors=None)

    # Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }

    # Evaluate the model and print results
    eval_results = mnist_classifier.evaluate(x=eval_data,
                                             y=eval_labels,
                                             metrics=metrics)
    print(eval_results)
예제 #30
0
def experiment_fn(output_dir):
    return tflearn.Experiment(
        tflearn.Estimator(model_fn=simple_rnn, model_dir=output_dir),
        train_input_fn=get_train(),
        eval_input_fn=get_valid(),
        eval_metrics={
            'rmse':
            tflearn.MetricSpec(
                metric_fn=metrics.streaming_root_mean_squared_error)
        },
        export_strategies=[
            saved_model_export_utils.make_export_strategy(
                serving_input_fn,
                default_output_alternative_key=None,
                exports_to_keep=1)
        ])