Example #1
0
def main():
    tf.logging.set_verbosity(tf.logging.INFO)
    estimator = learn.Estimator(
        model_fn=bow_model,
        model_dir="results/bow/",
    )
    estimator.fit(input_fn=get_wikireading_input(), steps=10000)
def main(unused_argv):
    global n_words
    # Prepare training and testing data
    dbpedia = learn.datasets.load_dataset(
        'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
    x_train = pandas.DataFrame(dbpedia.train.data)[1]
    y_train = pandas.Series(dbpedia.train.target)
    x_test = pandas.DataFrame(dbpedia.test.data)[1]
    y_test = pandas.Series(dbpedia.test.target)

    # Process vocabulary
    vocab_processor = learn.preprocessing.VocabularyProcessor(
        MAX_DOCUMENT_LENGTH)
    x_train = np.array(list(vocab_processor.fit_transform(x_train)))
    x_test = np.array(list(vocab_processor.transform(x_test)))
    n_words = len(vocab_processor.vocabulary_)
    print('Total words: %d' % n_words)

    # Build model
    classifier = learn.Estimator(model_fn=bag_of_words_model)

    # Train and predict
    classifier.fit(x_train, y_train, steps=100)
    y_predicted = classifier.predict(x_test)
    score = metrics.accuracy_score(y_test, y_predicted['class'])
    print('Accuracy: {0:f}'.format(score))
Example #3
0
def main(unused_argv):
    classes = 1721
    # Load training and eval data
    training, t_labels, validation, v_labels = prep.data_from_base(
        'training_data')
    # t_labels = onehot_labels(t_labels, classes)
    # v_labels = onehot_labels(v_labels, classes)

    # Create the Estimator
    hiragana_classifier = learn.Estimator(model_fn=cnn_model_fn,
                                          model_dir="/tmp/kanji_cnn_test2")
    # Set up logging for predictions
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=100)
    # Train the model
    hiragana_classifier.fit(x=training,
                            y=t_labels,
                            batch_size=50,
                            steps=1000,
                            monitors=[logging_hook])
    # Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }
    # Evaluate the model and print results
    eval_results = hiragana_classifier.evaluate(x=validation,
                                                y=v_labels,
                                                metrics=metrics)
    print(eval_results)
def main(unused_argv):
    mnist = learn.datasets.load_dataset("mnist")
    train_data = mnist.train.images
    train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
    eval_data = mnist.test.images
    eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

    mnist_classifier = learn.Estimator(model_fn=cnn_model_fn,
                                       model_dir="/tmp/mnist_convnet_model")

    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)

    mnist_classifier.fit(x=train_data,
                         y=train_labels,
                         batch_size=100,
                         steps=20000,
                         monitors=[logging_hook])

    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }

    eval_results = mnist_classifer.evaluate(x=eval_data,
                                            y=eval_labels,
                                            metrics=metrics)
    print(eval_results)
def main(unused_argv):
    ts = time.time()
    mnist = learn.datasets.load_dataset("mnist")
    train_data = mnist.train.images
    train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
    eval_data = mnist.test.images
    eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
    mnist_classifier = learn.Estimator(
        model_fn=cnn_model_fn,
        model_dir=os.popen('pwd').read().replace('\n', '') + "/MNIST_Model")
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)
    mnist_classifier.fit(x=train_data,
                         y=train_labels,
                         batch_size=600,
                         steps=8000,
                         monitors=[logging_hook])
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }
    eval_results = mnist_classifier.evaluate(x=eval_data,
                                             y=eval_labels,
                                             metrics=metrics)
    print(eval_results)
    print("CONVNET TOOK {0}s TO TRAIN".format(time.time() - ts))
Example #6
0
def predict(image_name, speed):
    #Create the Estimator
    gta_driver = learn.Estimator(model_fn=get_model_fn(),
                                 model_dir="/tmp/gta_driver_model")

    filename_queue = tf.train.string_input_producer([image_name])

    reader = tf.WholeFileReader()
    key, value = reader.read(filename_queue)

    image = tf.image.decode_bmp(value, channels=3)

    init_op = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init_op)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        image = tf.reshape(image, [width * height * 3])
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)
        image = image.eval()

        speed = tf.reshape(speed, [1]).eval()

        features = tf.concat([speed, image], axis=0).eval()

        coord.request_stop()
        coord.join(threads)

    predictions = gta_driver.predict(x=features, as_iterable=True)

    for i, p in enumerate(predictions):
        print("Predictions [Steering Angle: %s, Throttle: %s, Brake: %s]" %
              (p["predictions"][0], p["predictions"][1], p["predictions"][2]))
Example #7
0
def main(argv):
    global args

    parser = argparse.ArgumentParser()
    parser.add_argument('--regularization_type',
                        default="none",
                        help="Regularization type: l1, l2")
    parser.add_argument('--regularization_value',
                        type=float,
                        default=0.0,
                        help="Value used for regularization. defualt 0.0")
    parser.add_argument(
        '--weights_file',
        default='weights_hist.png',
        help="Filename to save the histogram. Default: weights_hist.png")
    args = parser.parse_args()
    iris = datasets.load_iris()
    x_train, x_test, y_train, y_test = cross_validation.train_test_split(
        iris.data, iris.target, test_size=0.2)
    classifier = learn.Estimator(model_fn=model)
    classifier.fit(x_train, y_train, steps=1000)
    y_predicted = [
        p['class'] for p in classifier.predict(x_test, as_iterable=True)
    ]
    score = metrics.accuracy_score(y_test, y_predicted)
    print('Accuracy: {0:f}'.format(score))

    weights = classifier.get_variable_value(WEIGHTS_NAME)
    flat_weights = [w for wl in weights for w in wl]
    plot_weights(flat_weights, args.weights_file, args.regularization_type)
 def __init__(self, config):
     self.config = config
     self.model = tflearn.SKCompat(
         tflearn.Estimator(model_fn=self._model(),
                           model_dir=self.config["log_dir"]))
     self.bias = None
     self.weights = None
Example #9
0
def run_learn(pixels, label):
    cnn_dir = os.environ['VIRTUAL_ENV'] + "/mnist_convnet_model"
    mnist_dataset = mnist.load_mnist(train_dir=os.environ['VIRTUAL_ENV'] +
                                     '/MNIST-data')

    train_data = concat(mnist_dataset.train.images, pixels)
    train_labels = concat(mnist_dataset.train.labels, label)

    eval_data = concat(mnist_dataset.test.images, pixels)
    eval_labels = concat(mnist_dataset.test.labels, label)

    estimator = learn.Estimator(model_fn=cnn_model_fn, model_dir=cnn_dir)
    dataset_classifier = learn.SKCompat(estimator)

    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=500)

    dataset_classifier.fit(x=train_data,
                           y=train_labels,
                           batch_size=128,
                           steps=5000,
                           monitors=[logging_hook])

    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes")
    }

    eval_results = dataset_classifier.score(x=eval_data,
                                            y=eval_labels,
                                            metrics=metrics)
    return eval_results
Example #10
0
def main(unused_args):
    ### Download and load MNIST dataset.
    mnist = learn.datasets.load_dataset('mnist')

    ### Linear classifier.
    feature_columns = learn.infer_real_valued_columns_from_input(
        mnist.train.images)
    classifier = learn.LinearClassifier(feature_columns=feature_columns,
                                        n_classes=10)
    classifier.fit(mnist.train.images,
                   mnist.train.labels.astype(np.int32),
                   batch_size=100,
                   steps=1000)
    score = metrics.accuracy_score(mnist.test.labels,
                                   list(classifier.predict(mnist.test.images)))
    print('Accuracy: {0:f}'.format(score))

    ### Convolutional network
    classifier = learn.Estimator(model_fn=conv_model)
    classifier.fit(mnist.train.images,
                   mnist.train.labels,
                   batch_size=100,
                   steps=20000)
    score = metrics.accuracy_score(mnist.test.labels,
                                   list(classifier.predict(mnist.test.images)))
    print('Accuracy: {0:f}'.format(score))
Example #11
0
def main(unused_argv):
    # Prepare training and testing data
    x_train, y_train, x_test, y_test, vocabulary_processor = \
        data_helper.load_data_labels(FLAGS.data_dir + FLAGS.data_file, FLAGS.dev_sample_percentage)
    n_class = y_train.shape[1]
    y_test = np.argmax(y_test, 1)
    y_train = np.argmax(y_train, 1)
    # Build model
    classifier = learn.Estimator(model_fn=lambda features, target, mode: rnn_model(features, target, mode, len(
                  vocabulary_processor.vocabulary_), FLAGS.embedding_size, n_class),
                                                model_dir=FLAGS.log_dir)
    # config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1e3)))
    # Train and evaluate
    y_train = (y for y in y_train)
    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    # tensors_to_log = {"probabilities": "softmax_tensor"}
    # logging_hook = tf.train.LoggingTensorHook(
    #     tensors=tensors_to_log, every_n_iter=100)
    # classifier.fit(x=x_train, y=y_train, batch_size=FLAGS.batch_size, steps=FLAGS.train_steps, monitors=[logging_hook])
    # , monitors=[validation_monitor])
    classifier.fit(x=x_train, y=y_train, batch_size=FLAGS.batch_size, steps=FLAGS.train_steps)
    y_test = (y for y in y_test)
    # Configure the accuracy metric for evaluation
    # metrics = {
    #   learn.metric_spec.MetricSpec(
    #           metric_fn=tf.metrics.accuracy, prediction_key="classes"),
    #                 }
    # score = classifier.evaluate(x=x_test, y=y_test, batch_size=FLAGS.batch_size, steps=FLAGS.dev_steps, metrics=metrics)
    score = classifier.evaluate(x=x_test, y=y_test, batch_size=FLAGS.batch_size, steps=FLAGS.dev_steps)
    print(score)
Example #12
0
def main(unused_argv):
    # Load training and eval data
    star_ratings = []
    images = []
    star_rating = 0
    for root, dirs, files in os.walk("photos2/"):
        path = root.split(os.sep)
        print((len(path) - 1) * '---', os.path.basename(root))

        if os.path.basename(root) != '':
            star_rating = int(float(os.path.basename(root)[-3:]))
        for filename in files:
            star_ratings.append(star_rating)
            if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
                filepath = os.path.join(root, filename)
                image = ndimage.imread(filepath, mode="L")
                image_resized = misc.imresize(image, (54, 54))
                images.append(image_resized)


#     train_labels = np.random.randint(1, high=5, size=len(star_ratings))
    print(len(star_ratings))
    train_labels = np.array(star_ratings)
    train_data = np.asarray(images, dtype=np.float32)
    # Create the Estimator
    classifier = learn.Estimator(model_fn=cnn_model_fn,
                                 model_dir="/tmp/convnet_model")

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)

    # Train the model
    classifier.fit(x=train_data[:-100],
                   y=train_labels[:-100],
                   batch_size=1,
                   steps=200,
                   monitors=[logging_hook])

    print("hi")

    # Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }
    print("bye")

    # Evaluate the model and print results
    eval_results = classifier.evaluate(x=train_data[-100:],
                                       y=train_labels[-100:],
                                       metrics=metrics)

    predictions = classifier.predict(x=train_data[-100:])

    print(predictions)
    print(eval_results)
Example #13
0
def main():
    localtime = time.asctime(time.localtime(time.time()))
    print(localtime)
    outputlist = []
    filecount = 0
    explore_path = "/Users/praneet/Documents/Kaggle/Amazon/test"
    classifier = SKCompat(
        learn.Estimator(model_fn=train,
                        model_dir="/Users/praneet/Downloads/model"))
    for root, dirs, files in os.walk(explore_path):
        for file_name in files:
            if file_name.endswith(".jpg"):
                lst = []
                eval_data = []
                filecount += 1
                file_path = os.path.abspath(os.path.join(root, file_name))
                img = cv2.imread(file_path)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                fixed_size = (56, 56)
                img = cv2.resize(img, dsize=fixed_size)
                eval_data.append(img)
                eval_data = np.array(eval_data, dtype=np.float32) / 255.
                predictions = classifier.predict(x=eval_data)
                print(file_name)
                lst.append(file_name)
                for x in predictions['probabilities']:
                    for y in x:
                        lst.append(y)
                outputlist.append(lst)
    print(filecount)
    df = pd.DataFrame(outputlist)
    df.to_csv('output.csv', index=False, header=False)
    localtime = time.asctime(time.localtime(time.time()))
    print(localtime)
Example #14
0
def pred(unused_argv):
    print("Prediction")
    gene_classifier = learn.Estimator(
        model_fn=cnn_model_fn,
        model_dir=FLAGS.model_dir,
        params={'learning_rate': FLAGS.learning_rate})

    train_data_file = data_dir + '/' + test_file

    train_data, train_labels = load_data_from_file(train_data_file,
                                                   startline=FLAGS.startline,
                                                   endline=FLAGS.endline)

    input_fn = numpy_io.numpy_input_fn(x={'train_data': train_data[20]},
                                       shuffle=False)
    pred_result = gene_classifier.predict(input_fn=input_fn)
    shape = train_labels.shape
    print(shape)
    result = []
    for res in pred_result:
        result.append(res)
    print(len(result))
    result = np.array(result)
    print(' '.join([str(x) for x in train_labels[20, :]]))
    print(' '.join([str(x) for x in result[0, :]]))
Example #15
0
def main():
    mode = 'ebgan'
    params = {
        'learning_rate': 0.005,
        'z_dim': 1,
        'generator': partial(linear_generator, hidden_size=10),
    }
    if mode == 'gan':
        params.update({
            'discriminator':
            partial(linear_discriminator, hidden_size=10),
            'loss_builder':
            model.make_gan_loss
        })
    elif mode == 'ebgan':
        params.update({
            'discriminator':
            partial(autoencoder_discriminator, hidden_size=10),
            'loss_builder':
            partial(model.make_ebgan_loss, epsilon=0.0001)
        })
    tf.logging._logger.setLevel(logging.INFO)
    data = np.random.normal(4, 0.5, 10000).astype(np.float32)
    data.sort()
    est = learn.SKCompat(
        learn.Estimator(model_fn=model.gan_model,
                        model_dir='models/gan_intro/',
                        params=params))
    print_monitor = tf.train.LoggingTensorHook(
        ['loss_discr', 'loss_generator'], every_n_iter=100)
    est.fit(x=data,
            y=data,
            steps=10000,
            batch_size=32,
            monitors=[print_monitor])
Example #16
0
def main(unused_argv):

    train_data = op[0:80000, ]
    train_labels = np.array(s["stars"][0:80000])
    eval_data = op[80000:100000, ]
    eval_labels = np.array(s["stars"][80000:100000])

    # Create the Estimator
    mnist_classifier = learn.Estimator(
        model_fn=cnn_model_fn, model_dir="/tmp/ahahahahahaha_convnet_model")

    # Train the model
    mnist_classifier.fit(x=train_data,
                         y=train_labels,
                         batch_size=100,
                         steps=20000)

    # Configure the accuracy metric for evaluation
    metrics = {
        "rmse":
        learn.MetricSpec(metric_fn=tf.metrics.mean_squared_error,
                         prediction_key="ratings"),
    }

    # Evaluate the model and print results
    eval_results = mnist_classifier.evaluate(x=eval_data,
                                             y=eval_labels,
                                             metrics=metrics)
    print(eval_results)
Example #17
0
 def build_model(self):
     #建立预测模型
     self.model = tflearn.SKCompat(
         tflearn.Estimator(model_fn=lstm_model(self.timesteps,
                                               self.rnn_layer,
                                               self.dense_layer),
                           model_dir=self.model_dir))
Example #18
0
def experiment_fn(output_dir):
    # run experiment

    #train_monitors = tf.contrib.learn.monitors.ValidationMonitor(test_set.target, test_set.target,every_n_steps=5)
    #logging_hook = tf.train.LoggingTensorHook({"accuracy" : tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy, prediction_key='class')}, every_n_iter=10)

    return tflearn.Experiment(
        tflearn.Estimator(model_fn=cnn_model,
                          model_dir=output_dir,
                          config=tf.contrib.learn.RunConfig(
                              save_checkpoints_steps=CHECKPOINT_STEPS,
                              save_checkpoints_secs=None,
                              save_summary_steps=SUMMARY_STEPS)),
        train_input_fn=get_train(),
        eval_input_fn=get_valid(),
        eval_metrics={
            'acc':
            tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy,
                               prediction_key='class')
        },
        checkpoint_and_export=True,
        train_monitors=None,
        export_strategies=[
            saved_model_export_utils.make_export_strategy(
                serving_input_fn,
                default_output_alternative_key=None,
                exports_to_keep=1)
        ],
        train_steps=TRAIN_STEPS,
        eval_steps=EVAL_STEPS)
Example #19
0
def train():
    train_data, train_labels, eval_data, eval_labels = load_dataset(
        '/Users/sunary/Downloads/train')

    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)

    dogvscat_classifier = learn.SKCompat(
        learn.Estimator(model_fn=cnn_model_fn,
                        model_dir='/tmp/dogvscat_convnet_model'))
    dogvscat_classifier.fit(x=train_data,
                            y=train_labels,
                            batch_size=50,
                            steps=1000,
                            monitors=[logging_hook])

    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes")
    }
    eval_results = dogvscat_classifier.score(x=eval_data,
                                             y=eval_labels,
                                             metrics=metrics)
    print(eval_results)
def main(unused_argv):

  print("+++  running main +++")
  
  # define global variable for number of classes that we will fill
  # as per the number of people refurned from the dataset
  global n_classes
  
  # Slices out images of 64x64 from the dataset. Returns images of 34 different people
  lfw_people = fetch_lfw_people(min_faces_per_person=30, 
                                slice_ = (slice(61,189),slice(61,189)),
                                resize=0.5, color = True)
  X = lfw_people.images
  y = lfw_people.target
  
  # get count of number of possible labels - need to use this as
  # number of units for dense layer in call to tf.layers.dense and
  # for defining the one-hot matrix. Here the number of possible
  # labels is 34 based on the subset of LFW that we selected above. 
  target_names = lfw_people.target_names
  n_classes = target_names.shape[0]
  y = np.asarray(y, dtype=np.int32)
  
  # split into a training and testing set
  train_data, eval_data, train_labels, eval_labels = train_test_split(
X, y, test_size=0.25, random_state=42)
 
  print("+++ I split the data  +++")
  # Create the Estimator - changed here to relfect use of LFW not MNIST
  lfw_classifier = learn.Estimator(model_fn=cnn_model_fn, model_dir="/tmp/lfw_CNN_model")

  print("+++ I made the estimator  +++")
  # Set up logging for predictions
  # Log the values in the "Softmax" tensor with label "probabilities"
  tensors_to_log = {"probabilities": "softmax_tensor"}
  logging_hook = tf.train.LoggingTensorHook(
      tensors=tensors_to_log, every_n_iter=50)
  
  print("+++ Train the model +++")
  lfw_classifier.fit(
      x=train_data,
      y=train_labels,
      batch_size=64,
      steps=1000,
      monitors=[logging_hook])
  print("+++ Doing the metrics  +++")
  # Configure the accuracy metric for evaluation
  metrics = {
      "accuracy":
          learn.MetricSpec(
              metric_fn=tf.metrics.accuracy, prediction_key="classes"),
  }
      
  print("+++ printing the evaluation  +++")
  # Evaluate the model and print results
  eval_results = lfw_classifier.evaluate(
      x=eval_data, y=eval_labels, metrics=metrics)
  print("+++ eval results are")
  print(eval_results)
  print("++++++++++++++++++")
Example #21
0
def infer():
  """Inference routine, outputting answers to `FLAGS.answers_path`."""
  _set_ckpt()
  estimator = learn.Estimator(
      model_fn=model_fn, config=_get_config(), params=_get_hparams())
  predictions = estimator.predict(
      input_fn=_get_test_input_fn(), as_iterable=True)
  global_step = estimator.get_variable_value('global_step')
  path = FLAGS.answers_path or os.path.join(FLAGS.restore_dir,
                                            'answers-%d.json' % global_step)
  answer_dict = {'no_answer_prob': {}, 'answer_prob': {}}
  for prediction in tqdm(predictions):
    id_ = prediction['id'].decode('utf-8')
    answer_dict[id_] = prediction['a'].decode('utf-8')
    answer_dict['answer_prob'][id_] = prediction['answer_prob'].tolist()
    answer_dict['no_answer_prob'][id_] = prediction['no_answer_prob'].tolist()
    if FLAGS.oom_test:
      break

  # TODO(seominjoon): use sum of logits instead of normalized prob.
  if FLAGS.merge:
    new_answer_dict = defaultdict(list)
    for id_, answer_prob in answer_dict['answer_prob'].items():
      answer = answer_dict[id_]
      id_ = id_.split(' ')[0]  # retrieve true id
      new_answer_dict[id_].append([answer_prob, answer])
    answer_dict = {
        id_: max(each, key=lambda pair: pair[0])[1]
        for id_, each in new_answer_dict.items()
    }

  with tf.gfile.GFile(path, 'w') as fp:
    json.dump(answer_dict, fp)
  tf.logging.info('Dumped predictions at: %s' % path)
Example #22
0
def main(unused_argv):
    global label_names
    log_dir = 'tmp/cnn_convnet_model' 
    label_names = unpickle('dataset/cifar-10-batches-py/batches.meta')['label_names']
    data, labels = load_cifar10_data()
    eval_data, eval_labels = load_cifar10_data(type=learn.ModeKeys.EVAL)
    
    classifier = learn.Estimator(
        model_fn=cnn_model, 
        model_dir=log_dir)
    logging_hook = tf.train.LoggingTensorHook(
        tensors={'probabilities': 'softmax'}, 
        every_n_iter=50)
    classifier.fit(
        x=data,
        y=labels,
        batch_size=100,
        steps=20000,
        #monitors=[logging_hook])
        monitors=None)
    
    metrics={
        'accuracy': learn.MetricSpec(metric_fn=tf.metrics.accuracy, prediction_key='classes'),
    }
    
    eval_results = classifier.evaluate(
        x=eval_data,
        y=eval_labels,
        metrics=metrics)
    
    print(eval_results)
Example #23
0
def main(unused_argv):
    print("New2")
    # Load training and eval data
    mnist = learn.datasets.load_dataset("mnist")
    train_data = mnist.train.images  # Returns np.array
    train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
    eval_data = mnist.test.images  # Returns np.array
    eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

    # Create the Estimator
    mnist_classifier = learn.Estimator(model_fn=cnn_model_fn,
                                       model_dir="/tmp/mnist_convnet_model")

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)
    # Train the model
    mnist_classifier.fit(x=train_data,
                         y=train_labels,
                         batch_size=100,
                         steps=20000,
                         monitors=[logging_hook])
    # Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }
    # Evaluate the model and print results
    eval_results = mnist_classifier.evaluate(x=eval_data,
                                             y=eval_labels,
                                             metrics=metrics)
    print(eval_results)
Example #24
0
def train():
    mnist = learn.datasets.load_dataset('mnist')

    train_data = mnist.train.images
    train_labels = np.asarray(mnist.train.labels, dtype=np.int32)

    eval_data = mnist.test.images
    eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

    mnist_classifier = learn.Estimator(model_fn=cnn_model_fn,
                                       model_dir='/tmp/mnist_convnet_models')
    mnist_classifier.fit(x=train_data,
                         y=train_labels,
                         batch_size=50,
                         steps=10000)

    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes")
    }
    eval_results = mnist_classifier.evaluate(x=eval_data,
                                             y=eval_labels,
                                             metrics=metrics)
    print(eval_results)
Example #25
0
def test(testX):
    '''
    Complete this function.
    This function must read the weight files and
    return the predicted labels.
    The returned object must be a 1-dimensional numpy array of
    length equal to the number of examples. The i-th element
    of the array should contain the label of the i-th test
    example.
    
    
    Note: Download and Load model in folder ./dtmp/
    '''
    # Download the model
    downloadData_CNN()

    # Predict
    eval_data = np.asarray(testX, dtype=np.float32)  # Returns np.array

    # Estimator
    mnist_classifier = learn.Estimator(model_fn=cnn_model_fn,
                                       model_dir="./dtmp")

    yd_hat = list(mnist_classifier.predict(x=eval_data))
    labels_hat = np.array([what['classes'] for what in yd_hat])

    return labels_hat
def Q_update():
    """Update weights of three models:
       "sell" model, "buy" model and "hold" model
    """
    for action in gv.action_set:
        gv.mylogger.logger.info("Update " + action + " model")

        # # Configure a ValidationMonitor with training data
        # validation_monitor = learn.monitors.ValidationMonitor(
        #     np.float32(Q_data[action]),
        #     np.float32(Q_labels[action]),
        #     every_n_steps=20)

        # Create the estimator
        Q_estimator = learn.Estimator(model_fn=gv.cnn_model_fn,
                                      model_dir=gv.model_dirs[action])

        # Train the model
        SKCompat(Q_estimator).fit(x=train.Q_data[action].astype(np.float32),
                                  y=train.Q_labels[action].astype(np.float32),
                                  steps=training_steps)

        # Evaluate the model and print results
        eval_results = Q_estimator.evaluate(
            x=train.Q_data[action].astype(np.float32),
            y=train.Q_labels[action].astype(np.float32))
        gv.mylogger.logger.info(eval_results)
Example #27
0
def main(unused_argv):
    # Load training and eval data
    prepper = prep_hiragana.prepper('hiragana', 'hiragana.txt')
    train_data = prepper.train_images()  # Returns np.array
    train_labels = np.asarray(prepper.train_labels(), dtype=np.int32)

    eval_data = prepper.validate_images()  # Returns np.array
    eval_labels = np.asarray(prepper.validate_labels(), dtype=np.int32)
    # Create the Estimator
    hiragana_classifier = learn.Estimator(
        model_fn=cnn_model_fn, model_dir="/tmp/hiragana_convnet_model2")
    # Set up logging for predictions
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)
    # Train the model
    hiragana_classifier.fit(x=train_data,
                            y=train_labels,
                            batch_size=50,
                            steps=1000,
                            monitors=[logging_hook])
    # Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }
    # Evaluate the model and print results
    eval_results = hiragana_classifier.evaluate(x=eval_data,
                                                y=eval_labels,
                                                metrics=metrics)
    print(eval_results)
def recognize():
    if (image_loaded.get() == 0):
        recognize_text.delete('0.0', tk.END)
        recognize_text.insert(tk.END, "Nie wczytano zdjecia!")
    else:
        global path_to_image

        im = adjust_picture(path_to_image.get())
        im = PIL.ImageOps.invert(im)
        im_np = np.array(im)
        im_np = np.float32(im_np)

        class_labels = ['triangle', 'circle', 'square', 'hexagon']

        dirname = os.path.dirname(os.path.abspath(__file__))
        shape_classifier = learn.Estimator(model_fn=cnn_model_fn,
                                           model_dir=os.path.join(
                                               dirname, 'net', 'shape_model'))

        predictions = shape_classifier.predict(x=im_np, as_iterable=True)
        for i, p in enumerate(predictions):
            recognize_text.delete('0.0', tk.END)
            recognize_text.tag_configure('center', justify='center')
            recognize_text.insert(tk.END, class_labels[int(p["classes"])])
            recognize_text.tag_add('center', '1.0', 'end')
Example #29
0
def get_estimator(model_dir, params, run_config=None):
    """ Returns an instance of the Estimator """
    estimator = learn.Estimator(model_fn=_model_fn,
                                model_dir=model_dir,
                                params=params,
                                config=run_config)
    return estimator
Example #30
0
def main(unused_argv):
    # Input data
    ROOT_PATH = "/Users/miaoyan/PycharmProjects/hori_verti_line_recognization/short_line/"
    train_data_dir = os.path.join(ROOT_PATH, "train")
    train_data_index, train_labels = load_data(train_data_dir) #read vertical-01 first, then read horizontal-10

    train_data = np.asarray(train_data_index)
    train_data = 1 - train_data
    train_data = train_data.astype('float32')

    train_labels = np.asarray(train_labels, dtype=np.float32) #01-vertical 10-horizontal

    # create variable for horizontal and vertical filters
    variable_line = tf.Variable(tf.random_uniform(shape=[3, 3, 1, 1], minval=1, maxval=2, dtype=tf.float32))
    sess.run(variable_line.initializer)

    # Use cnn_training_model
    y = cnn_model_line(train_data, variable_line) #output of CNNs

    # initialize and reshape the original labels
    y_= tf.placeholder(tf.float32, [None, 1]) #Initialization of the labels
    y_ = tf.reshape(train_labels, [-1]) # 01 is vertical line, 10 is horizontal line

    # Define loss and optimizer
    loss = None
    train_op =  None

    # mean square error
    onehot_labels_prediction = tf.one_hot(indices=y, depth=2)
    onehot_labels_label = tf.one_hot(indices=tf.cast(y_, dtype=tf.int32), depth=2)
    print(sess.run(onehot_labels_prediction))
    print(sess.run(onehot_labels_label))
    mse_loss = tf.losses.mean_squared_error(labels=onehot_labels_label, predictions=onehot_labels_prediction)
    print(sess.run(mse_loss))

    # Train the model
    # Create the Estimator
    line_classifier = learn.Estimator(model_fn=cnn_model_line,
        model_dir="/Users/miaoyan/PycharmProjects/hori_verti_line_recognization")

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    tensors_to_log = {"probabilities": "softmax_tensor"}

    # Prints the given tensors once every N local steps
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=2)

    # Fit the data for training the model
    line_classifier.fit(x=train_data, y=train_labels, batch_size=20, steps=1000, monitors=[logging_hook])

    # Configure the accuracy metric for evaluation
    metrics = {"accuracy": learn.MetricSpec(metric_fn=tf.metrics.accuracy, prediction_key="classes"), }

    train_op = tf.contrib.layers.optimize_loss(
        loss=mse_loss,
        global_step=tf.contrib.framework.get_global_step(),
        learning_rate=0.001,
        optimizer='SGD') # use stochastic gradient descent as optimization algorithm
    print(sess.run(variable_line))