def BuildComplexLearner(restore=True):
    """Builds a Complex Learner that uses CNNs for digit classification.
    
    Args:
        restore: (bool) Whether to restore the model or train a new one.
    """
    if restore:
        mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
        learner = ComplexLearner()
        learner.Restore("thresholded_model.ckpt")
        return learner
    else:
        mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
        learner = ComplexLearner()
        ThresholdPixels(mnist.train.images)

        def signal_handler(signal, frame):
            print "Caught ctrl-c. Saving model then exiting..."
            learner.Save("thresholded_ctrl_c.ckpt")
            sys.exit(0)

        signal.signal(signal.SIGINT, signal_handler)
        learner.Train(mnist.train)
        ThresholdPixels(mnist.test.images)
        learner.Test(mnist.test)
        return learner
Exemple #2
0
    def __init__(self):
        # Import data
        error = None
        for _ in range(10):
            try:
                self.mnist = input_data.read_data_sets(
                    "/tmp/tensorflow/mnist/input_data", one_hot=True)
                error = None
                break
            except Exception as e:
                error = e
                time.sleep(5)
        if error:
            raise ValueError("Failed to import data", error)

        # Set seed and build layers
        tf.set_random_seed(0)

        self.x = tf.placeholder(tf.float32, [None, 784], name="x")
        self.y_ = tf.placeholder(tf.float32, [None, 10], name="y_")
        y_conv, self.keep_prob = deepnn(self.x)

        # Need to define loss and optimizer attributes
        self.loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(
                labels=self.y_, logits=y_conv))
        self.optimizer = tf.train.AdamOptimizer(1e-4)
        self.variables = ray_tf_utils.TensorFlowVariables(
            self.loss, tf.get_default_session())

        # For evaluating test accuracy
        correct_prediction = tf.equal(
            tf.argmax(y_conv, 1), tf.argmax(self.y_, 1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
Exemple #3
0
def main():
    # Load training data
    print("loading dataset ... ")
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    image_width = 28  # MNIST image dimensions
    tensor_size = image_width * image_width  # Flatten image to 1D

    # Build model:
    session = tf.Session()
    model = softmax_regressor(tensor_size)

    def f(X):
        """
        Optimization function to train and evaluate model with
        hyperparameters.
        """
        batch_size, num_iterations = denormalize(X)
        return train_and_test(session.run, model, mnist.train,
                              mnist.test, batch_size=batch_size,
                              num_iterations=num_iterations)

    # Hyper-parameter search over batch size and training iterations.
    maxiter = 50
    x0 = np.array([1, 1])
    res = minimize(f, x0, method="nelder-mead", options={"maxiter": maxiter})

    print("done. batch size: {}, number of iterations: {}"
          .format(*denormalize(res.x)))
def fetch_data():
    if not exists(data_dir):
        makedirs(data_dir)

    # Normalize data once if we haven't done it before and store it in a file
    if not exists(f'{data_dir}/{data_file}'):
        print('Downloading MNIST data')
        mnist = input_data.read_data_sets(data_dir, one_hot=True)

        def _normalize(data, mean=None, std=None):
            if mean is None:
                mean = np.mean(data, axis=0)
                std = np.std(data, axis=0)
            return div0((data - mean), std), mean, std

        train_data, mean, std = _normalize(mnist.train.images)

        validation_data, *_ = _normalize(mnist.validation.images, mean, std)
        test_data, *_ = _normalize(mnist.test.images, mean, std)

        mnist_data = {'train_images': train_data,
                      'train_labels': mnist.train.labels,
                      'validation_images': validation_data,
                      'validation_labels': mnist.validation.labels,
                      'test_images': test_data,
                      'test_labels': mnist.test.labels}
        with open(f'{data_dir}/{data_file}', 'wb') as f:
            pickle.dump(mnist_data, f)

    # If we have normalized the data already; load it
    else:
        with open(f'{data_dir}/{data_file}', 'rb') as f:
            mnist_data = pickle.load(f)

    return mnist_data
Exemple #5
0
def load_data(data_dir):
  """Returns training and test tf.data.Dataset objects."""
  data = input_data.read_data_sets(data_dir, one_hot=True)
  train_ds = tf.data.Dataset.from_tensor_slices((data.train.images,
                                                 data.train.labels))
  test_ds = tf.data.Dataset.from_tensors((data.test.images, data.test.labels))
  return (train_ds, test_ds)
def get_data_sets_and_params(use_MNIST_instead_of_our_data=False):
    if use_MNIST_instead_of_our_data:
        params = dict(
            width = 28,
            height = 28,
            num_training_steps = 20000,
            batch_size = 50,
        )
    else:
        params = dict(
            width = 70,
            height = 70,
            num_training_steps = 1000,
            batch_size = 50,
            training_images = 5000,
            test_images = 1000,
            allow_rotation = True,
        )

    if use_MNIST_instead_of_our_data:
        from tensorflow.examples.tutorials.mnist import input_data
        data_sets = input_data.read_data_sets('MNIST_data', one_hot=True)
    else:
        collection_dir = make_polygon_pngs.make_collection(params['width'],
                                                           params['height'],
                                                           params['training_images'],
                                                           params['test_images'],
                                                           allow_rotation=params['allow_rotation'])
        data_sets = datasets.read_data_sets(collection_dir)
    return data_sets, params
Exemple #7
0
def load_data(name, random_labels=False):
	"""Load the data
	name - the name of the dataset
	random_labels - True if we want to return random labels to the dataset
	return object with data and labels"""
	print ('Loading Data...')
	C = type('type_C', (object,), {})
	data_sets = C()
	if name.split('/')[-1] == 'MNIST':
		data_sets_temp = input_data.read_data_sets(os.path.dirname(sys.argv[0]) + "/data/MNIST_data/", one_hot=True)
		data_sets.data = np.concatenate((data_sets_temp.train.images, data_sets_temp.test.images), axis=0)
		data_sets.labels = np.concatenate((data_sets_temp.train.labels, data_sets_temp.test.labels), axis=0)
	else:
		d = sio.loadmat(os.path.join(os.path.dirname(sys.argv[0]), name + '.mat'))
		F = d['F']
		y = d['y']
		C = type('type_C', (object,), {})
		data_sets = C()
		data_sets.data = F
		data_sets.labels = np.squeeze(np.concatenate((y[None, :], 1 - y[None, :]), axis=0).T)
	# If we want to assign random labels to the  data
	if random_labels:
		labels = np.zeros(data_sets.labels.shape)
		labels_index = np.random.randint(low=0, high=labels.shape[1], size=labels.shape[0])
		labels[np.arange(len(labels)), labels_index] = 1
		data_sets.labels = labels
	return data_sets
Exemple #8
0
def make_spoko_mnist():
    """ Convert mnist data to more familiar structure """
    # Get data
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

    # Think signals
    # Training
    t_signals = mnist.train.images
    t_labels  = mnist.train.labels

    # Validation
    v_signals = mnist.validation.images
    v_labels  = mnist.validation.labels

    # ?
    savepath = 'data/mnist.storage'

    with h5py.File(savepath, 'w') as db:
        # Validation
        v_group = db.create_group('validation')

        v_group.create_dataset('signals', data = v_signals)
        v_group.create_dataset('labels',  data = v_labels)

        # Training
        t_group = db.create_group('training')

        t_group.create_dataset('signals', data = t_signals)
        t_group.create_dataset('labels',  data = t_labels)

    print "Saved mnist data to:", savepath
Exemple #9
0
def main(_):
  # Import data
  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
  # Create the model
  x = tf.placeholder(tf.float32, [None, 784])
  W = tf.Variable(tf.zeros([784, 10]))
  b = tf.Variable(tf.zeros([10]))
  y = tf.matmul(x, W) + b
  # Define loss and optimizer
  y_ = tf.placeholder(tf.float32, [None, 10])
  # The raw formulation of cross-entropy,
  #
  #   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
  #                                 reduction_indices=[1]))
  #
  # can be numerically unstable.
  #
  # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
  # outputs of 'y', and then average across the batch.
  cross_entropy = tf.reduce_mean(
      tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
  train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
  sess = tf.InteractiveSession()
  tf.global_variables_initializer().run()
  # Train
  for _ in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
  # Test trained model
  correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                      y_: mnist.test.labels}))
def train_and_evaluate(output_dir, hparams):
  EVAL_INTERVAL = 60

  mnist = input_data.read_data_sets('mnist/data', one_hot=True, reshape=False)

  train_input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'image':mnist.train.images},
    y=mnist.train.labels,
    batch_size=100,
    num_epochs=None,
    shuffle=True,
    queue_capacity=5000
  )

  eval_input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'image':mnist.test.images},
    y=mnist.test.labels,
    batch_size=100,
    num_epochs=1,
    shuffle=False,
    queue_capacity=5000
  )
  estimator = tf.estimator.Estimator(model_fn = image_classifier,
                                     params = hparams,
                                     config=tf.estimator.RunConfig(
                                         save_checkpoints_secs = EVAL_INTERVAL),
                                     model_dir = output_dir)
  train_spec = tf.estimator.TrainSpec(input_fn = train_input_fn,
                                    max_steps = hparams['train_steps'])
  exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
  eval_spec = tf.estimator.EvalSpec(input_fn = eval_input_fn,
                                  steps = None,
                                  exporters = exporter,
                                  throttle_secs = EVAL_INTERVAL)
  tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
Exemple #11
0
def main(_):

  # Create a cluster from the parameter server and worker hosts.
  cluster = tf.train.ClusterSpec({"local": ["localhost:2222"]})
  
  # Create and start a server for the local task.
  server = tf.train.Server(cluster, job_name="local", task_index=0)

  # Build model...
  from tensorflow.examples.tutorials.mnist import input_data
  mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
  x = tf.placeholder(tf.float32, [None, 784])
  W = tf.Variable(tf.zeros([784, 10]))
  b = tf.Variable(tf.zeros([10]))
  y = tf.nn.softmax(tf.matmul(x, W) + b)
  y_ = tf.placeholder(tf.float32, [None, 10])
  cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
  train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
  init = tf.initialize_all_variables()
  sess = tf.Session(server.target)
  sess.run(init)
  for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

  correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
def main(_):
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])
    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 10])
    # Build the graph for the deep net
    y_conv, keep_prob = deepnn(x)
    with tf.name_scope('loss'):
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
                                                              logits=y_conv)
    cross_entropy = tf.reduce_mean(cross_entropy)
    with tf.name_scope('adam_optimizer'):
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
        correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction)
    graph_location = tempfile.mkdtemp()
    print('Saving graph to: %s' % graph_location)
    train_writer = tf.summary.FileWriter(graph_location)
    train_writer.add_graph(tf.get_default_graph())
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(20000):
            batch = mnist.train.next_batch(50)
            if i % 100 == 0:
               train_accuracy = accuracy.eval(feed_dict={
                          x: batch[0], y_: batch[1], keep_prob: 1.0})
            print('step %d, training accuracy %g' % (i, train_accuracy))
        train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
        print('test accuracy %g' % accuracy.eval(feed_dict={
          x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
Exemple #13
0
def main(_):
  cluster,server,job_name,task_index,num_workers = get_mpi_cluster_server_jobname(num_ps = 4, num_workers = 5)
  MY_GPU = task_index % NUM_GPUS

  if job_name == "ps":
    server.join()
  elif job_name == "worker":

    is_chief = (task_index == 0)
    # Assigns ops to the local worker by default.
    with tf.device(tf.train.replica_device_setter(\
      worker_device='/job:worker/task:{}/gpu:{}'.format(task_index,MY_GPU),
		  cluster=cluster)):

      loss,accuracy,input_tensor,true_output_tensor = get_loss_accuracy_ops()

      global_step = tf.Variable(0,trainable=False)
      optimizer = tf.train.AdagradOptimizer(0.01)
      if sync_mode:
        optimizer = tf.train.SyncReplicasOptimizer(optimizer,replicas_to_aggregate=num_workers,
          replica_id=task_index,total_num_replicas=num_workers)

      train_op = optimizer.minimize(loss, global_step=global_step)

      if sync_mode and is_chief:
        # Initial token and chief queue runners required by the sync_replicas mode
        chief_queue_runner = optimizer.get_chief_queue_runner()
        init_tokens_op = optimizer.get_init_tokens_op()

      saver = tf.train.Saver()
      summary_op = tf.merge_all_summaries()
      init_op = tf.initialize_all_variables()

    # Create a "supervisor", which oversees the training process.
    sv = tf.train.Supervisor(is_chief=is_chief,logdir="/tmp/train_logs",init_op=init_op,summary_op=summary_op,
                             saver=saver,global_step=global_step,save_model_secs=600)

    mnist = input_data.read_data_sets(data_dir, one_hot=True)

    # The supervisor takes care of session initialization, restoring from
    # a checkpoint, and closing when done or an error occurs.
    config = tf.ConfigProto(allow_soft_placement=True)
    with sv.prepare_or_wait_for_session(server.target,config=config) as sess:
      if sync_mode and is_chief:
        sv.start_queue_runners(sess,[chief_queue_runner])
        sess.run(init_tokens_op)

      step = 0
      start = time.time()
      while not sv.should_stop() and step < 1000:
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        train_feed = {input_tensor: batch_xs, true_output_tensor: batch_ys,K.learning_phase(): 1}

        _, step, curr_loss, curr_accuracy = sess.run([train_op, global_step, loss, accuracy], feed_dict=train_feed)
      	sys.stdout.write('\rWorker {}, step: {}, loss: {}, accuracy: {}'.format(task_index,step,curr_loss,curr_accuracy))
      	sys.stdout.flush()

    # Ask for all the services to stop.
    sv.stop()
    print('Elapsed: {}'.format(time.time() - start))
Exemple #14
0
def main():
    # Load the input data
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    # Setup variables and placeholders
    x = tf.placeholder(tf.float32, [None, 784])
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))

    # Implement our model
    y = tf.nn.softmax(tf.matmul(x, W) + b)
    
    # Placeholder to input the correct answers
    y_ = tf.placeholder(tf.float32, [None, 10])
    
    # Implement cross-entropy
    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

    # Apply an optimization algorithm to reduce the cost
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
    
    # Initialize variables
    init = tf.initialize_all_variables()
    sess = tf.Session()
    sess.run(init)

    # Now let's train!
    for i in range(1000):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

    # Evaluate our model
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_:mnist.test.labels}))
    def test_fully_connected(self):
#         self.mock.verbose = True
        self.mock.loop_cnt = 65
        self.mock.add_layer(784)
        self.mock.add_cnn()
        self.mock.add_pool()
        self.mock.add_cnn()
        self.mock.add_pool()
        self.mock.add_layer(1024, act_func=tf.nn.relu)
        self.mock.add_layer(10, act_func= tf.nn.softmax)
        self.mock.set_entropy_func(self.mock.entropy_log)
        
        from tensorflow.examples.tutorials.mnist import input_data
        mnist = input_data.read_data_sets("down/", one_hot=True)

        def get_feed(x=None):
            b = mnist.train.next_batch(100)
            feed = {self.mock.input:b[0], self.mock.target:b[1]}
            return feed
            
        self.mock.get_feed_before_loop = get_feed
        self.mock.get_feed_each_one_step = get_feed
        
#         def print_entropy (i, sess, feed):
#             print  sess.run( self.mock.entropy , feed)
#         self.mock.after_one_step = print_entropy
            
        self.mock.learn()
        self.assertTrue(0.5 <self.mock.last_acc , 'less 0.5 acc %2.3f'%self.mock.last_acc )
Exemple #16
0
def main(_):
    # mnist数据
    #mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    
    # 模型定义
    x = tf.placeholder(tf.float32, [None, 784])
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    # x,W相乘
    y = tf.matmul(x, W) + b

    # 十个分类
    y_ = tf.placeholder(tf.float32, [None, 10])
    
    # loss函数,reduce_mean(softmax_cross_entropy_with_logits)
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
    # 梯度下降
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    # 开始训练
    for _ in range(1000):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

    # 测试,argmax(y,1)拿到每行的最大值,即预测是几
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                         y_: mnist.test.labels}))
Exemple #17
0
    def __init__(self, name='mnist', source='./data/mnist/', one_hot=True, batch_size = 64, seed = 0):


        self.name            = name
        self.source          = source
        self.one_hot         = one_hot
        self.batch_size      = batch_size
        self.seed            = seed
        np.random.seed(seed) # To make your "random" minibatches the same as ours

        self.count           = 0

        tf.set_random_seed(self.seed)  # Fix the random seed for randomized tensorflow operations.

        if name == 'mnist':
            self.mnist = input_data.read_data_sets(source)
            self.data  = self.mnist.train.images
            print('data shape: {}'.format(np.shape(self.data)))
            self.minibatches = self.random_mini_batches(self.data.T, self.batch_size, self.seed)
        elif name == 'cifar10':
            # download data files from: 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' 
            # extract into the correct folder
            data_files = ['data_batch_1','data_batch_2','data_batch_3','data_batch_4','data_batch_5']
            self.data, _ = read_cifar10(source, data_files)
            self.minibatches = self.random_mini_batches(self.data.T, self.batch_size, self.seed)
        elif name == 'celeba':
            # Count number of data images
            self.im_list  = list_dir(source, 'jpg')
            self.nb_imgs  = len(self.im_list)
            self.nb_compl_batches  = int(math.floor(self.nb_imgs/self.batch_size))
            self.nb_total_batches     = self.nb_compl_batches
            if self.nb_imgs % batch_size != 0:
               self.num_total_batches = self.nb_compl_batches + 1
            self.count = 0
            self.color_space = 'RGB'
def main(args):
    data_dir = "/tmp/data/"
    mnist = input_data.read_data_sets(data_dir, one_hot=True)

    n_classes = 10
    batch_size = 128
    keep_rate = args.keep_rate
    epochs = args.epoch
    l_r = args.learning_rate
    device_type = args.device_type

    # place holder for training input
    x_input = tf.placeholder(tf.float32, [None, 784])
    y_target = tf.placeholder(tf.float32, [None, 10])

    x_test_input = mnist.test.images
    y_test_target = mnist.test.labels

    print(mnist.train.num_examples)

    predictions_arr, acc_metric, sess_inst = train_neural_network(data=mnist, x_placeholder=x_input,
                                                    y_placeholder=y_target, epochs=epochs, no_of_classes=n_classes,
                                keep_rate=keep_rate, device_type=device_type, batch_size=batch_size, learning_rate=l_r)

    display_results(prediction_inst=predictions_arr, input_data=x_test_input, target_label=y_test_target, session_instance=sess_inst,
                    x_buffer_placeholder=x_test_input)
def load_data(train_images_file, train_labels_file, included_operators = ["+", "-", "*", "/"]):
    
    mnist = input_data.read_data_sets('MNIST_DATA', one_hot=True)
    
    symbols = np.load(train_images_file)
    symbol_labels = np.load(train_labels_file)
    
    digits = mnist.train.images
    digit_labels = mnist.train.labels
    
    symbol_indices = []
    
    if "+" in included_operators:
        symbol_indices.append(0)
    if "-" in included_operators:
        symbol_indices.append(1)
    if "*" in included_operators:
        symbol_indices.append(2)
    if "/" in included_operators:
        symbol_indices.append(3)
    
    # Find indices of symbols labeled as addition or subtraction
    idx = np.sum(symbol_labels[:,symbol_indices], axis=1)
    
    # Obtain labels of symbols labeled as addition or subtraction
    filtered_symbol_labels = symbol_labels[idx>0, :]
    
    # Obtain symbols labeled as addition or subtraction
    filtered_symbols = symbols[idx>0, :, :]
    
    return [digits, digit_labels, filtered_symbols, filtered_symbol_labels]
Exemple #20
0
def main(_):
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
    # input
    x = tf.placeholder(tf.float32, [None, 784])
    y_ = tf.placeholder(tf.float32, [None, 10])
    
    # parameters
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    # compuation
    y = tf.matmul(x, W) + b
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
    
    # run
    sess = tf.InteractiveSession()
    # Train
    tf.initialize_all_variables().run()
    
    for _ in range(1000):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

    # Test trained model
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                      y_: mnist.test.labels}))
def train_and_eval():
  """Train and evaluate the model."""
  model_dir = tempfile.mkdtemp() if not FLAGS.model_dir else FLAGS.model_dir
  print('model directory = %s' % model_dir)

  estimator = build_estimator(model_dir)

  # TensorForest's loss hook allows training to terminate early if the
  # forest is no longer growing.
  early_stopping_rounds = 100
  monitor = random_forest.TensorForestLossHook(early_stopping_rounds)

  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=False)

  estimator.fit(x=mnist.train.images, y=mnist.train.labels,
                batch_size=FLAGS.batch_size, monitors=[monitor])

  metric_name = 'accuracy'
  metric = {metric_name:
            metric_spec.MetricSpec(
                eval_metrics.get_metric(metric_name),
                prediction_key=eval_metrics.get_prediction_key(metric_name))}

  results = estimator.evaluate(x=mnist.test.images, y=mnist.test.labels,
                               batch_size=FLAGS.batch_size,
                               metrics=metric)
  for key in sorted(results):
    print('%s: %s' % (key, results[key]))
def load_mnist_dataset(mode='supervised', one_hot=True):
    """Load the MNIST handwritten digits dataset.

    :param mode: 'supervised' or 'unsupervised' mode
    :param one_hot: whether to get one hot encoded labels
    :return: train, validation, test data:
            for (X, y) if 'supervised',
            for (X) if 'unsupervised'
    """
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=one_hot)

    # Training set
    trX = mnist.train.images
    trY = mnist.train.labels

    # Validation set
    vlX = mnist.validation.images
    vlY = mnist.validation.labels

    # Test set
    teX = mnist.test.images
    teY = mnist.test.labels

    if mode == 'supervised':
        return trX, trY, vlX, vlY, teX, teY

    elif mode == 'unsupervised':
        return trX, vlX, teX
def main(_):
  # MNIST 데이터 셋을 ont-hot 인코딩 형태로 받아온다.
  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

  # Create the model
  x = tf.placeholder(tf.float32, [None, 784])
  W = tf.Variable(tf.zeros([784, 10]))
  b = tf.Variable(tf.zeros([10]))
  y = tf.matmul(x, W) + b

  # Define loss and optimizer
  y_ = tf.placeholder(tf.float32, [None, 10])

  cross_entropy = tf.reduce_mean(
      tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
  train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
  #train_step = tf.train.AdamOptimizer(0.1).minimize(cross_entropy)

  sess = tf.InteractiveSession()
  tf.global_variables_initializer().run()
  # Train
  for _ in range(10000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

  # Test trained model
  correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                      y_: mnist.test.labels}))
def main():
    data_path = '/home/charlesxu/Workspace/data/MNIST_data/'
    data = input_data.read_data_sets(data_path, one_hot=True)

    original(data)
    widen(data)
    deepen(data)
def get_data(task_name):
    ## Data sets
    if task_name == 'qianli_func':
        (X_train, Y_train, X_cv, Y_cv, X_test, Y_test) = get_data_from_file(file_name='./f_1d_cos_no_noise_data.npz')
    elif task_name == 'f_2D_task2':
        (X_train, Y_train, X_cv, Y_cv, X_test, Y_test) = get_data_from_file(file_name='./f_2d_task2_ml_data_and_mesh.npz')
    elif task_name == 'f_2d_task2_xsinglog1_x_depth2':
        (X_train, Y_train, X_cv, Y_cv, X_test, Y_test) = get_data_from_file(file_name='./f_2d_task2_ml_xsinlog1_x_depth_2data_and_mesh.npz')
    elif task_name == 'f_2d_task2_xsinglog1_x_depth3':
        (X_train, Y_train, X_cv, Y_cv, X_test, Y_test) = get_data_from_file(file_name='./f_2d_task2_ml_xsinlog1_x_depth_3data_and_mesh.npz')
    elif task_name == 'MNIST_flat':
        mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
        X_train, Y_train = mnist.train.images, mnist.train.labels
        X_cv, Y_cv = mnist.validation.images, mnist.validation.labels
        X_test, Y_test = mnist.test.images, mnist.test.labels
    elif task_name == 'hrushikesh':
        with open('../hrushikesh/patient_data_X_Y.json', 'r') as f_json:
            patients_data = json.load(f_json)
        X = patients_data['1']['X']
        Y = patients_data['1']['Y']
        X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.40)
        X_cv, X_test, Y_cv, Y_test = train_test_split(X_test, Y_test, test_size=0.5)
        (X_train, Y_train, X_cv, Y_cv, X_test, Y_test) = ( np.array(X_train), np.array(Y_train), np.array(X_cv), np.array(Y_cv), np.array(X_test), np.array(Y_test) )
    else:
        raise ValueError('task_name: %s does not exist. Try experiment that exists'%(task_name))
    return (X_train, Y_train, X_cv, Y_cv, X_test, Y_test)
def main(_):
  n_in = 784
  n_out = 10
  n_hidden = 200
  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
  w_in = tf.Variable(tf.random_normal([n_in, n_hidden]))
  b_in = tf.Variable(tf.random_normal([n_hidden]))
  w_out = tf.Variable(tf.random_normal([n_hidden, n_out]))
  b_out = tf.Variable(tf.random_normal([n_out]))
  # Create the model
  x = tf.placeholder(tf.float32, [None, n_in])
  h = tf.nn.relu(tf.add(tf.matmul(x, w_in), b_in))
  y = tf.add(tf.matmul(h, w_out), b_out)

  batch_size = 100
  labels = tf.placeholder(tf.float32, [None, n_out])
  cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, labels))
  optimizer = tf.train.AdamOptimizer(0.01).minimize(cost)
  with tf.Session() as sess:
    # Train
    sess.run(tf.initialize_all_variables())
    for _ in range(5000):
      batch_xs, batch_ys = mnist.train.next_batch(batch_size)
      sess.run(optimizer, feed_dict={x: batch_xs, labels: batch_ys})
      #print(sess.run(tf.nn.softmax(y), feed_dict={x: batch_xs}))

    # Test trained model
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(labels, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                        labels: mnist.test.labels}))
    def __init__(self, batch_size):
        from tensorflow.examples.tutorials.mnist import input_data
        self.mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

        self.x = tf.placeholder(tf.float32, shape=[batch_size, 28, 28, 1])
        self.feed_y = tf.placeholder(tf.float32, shape=[batch_size, 10])
        self.y = ((2*self.feed_y)-1)
Exemple #28
0
    def __init__(self, config, sess):
        self.input_dim = config.input_dim      # 784
        self.z_dim = config.z_dim              # 14
        self.c_cat = config.c_cat              # 10: Category c - 1 hot vector for 10 label values
        self.c_cont = config.c_cont            # 2: Continuous c
        self.d_update = config.d_update        # 2: Run discriminator twice before generator
        self.batch_size = config.batch_size
        self.nepoch = config.nepoch
        self.lr = config.lr                    # Learning rate 0.001
        self.max_grad_norm = config.max_grad_norm  # 40
        self.show_progress = config.show_progress  # False

        self.optimizer = tf.train.AdamOptimizer

        self.checkpoint_dir = config.checkpoint_dir
        self.image_dir = config.image_dir

        home = str(Path.home())
        DATA_ROOT_DIR = os.path.join(home, "dataset", "MNIST_data")
        self.mnist = input_data.read_data_sets(DATA_ROOT_DIR, one_hot=True)

        self.random_seed = 42

        self.X = tf.placeholder(tf.float32, [None, self.input_dim], 'X')
        self.z = tf.placeholder(tf.float32, [None, self.z_dim], 'z')
        self.c_i = tf.placeholder(tf.float32, [None, self.c_cat], 'c_cat')
        self.c_j = tf.placeholder(tf.float32, [None, self.c_cont], 'c_cont')
        self.c = tf.concat([self.c_i, self.c_j], axis=1)
        self.z_c = tf.concat([self.z, self.c_i, self.c_j], axis=1)

        self.training = tf.placeholder_with_default(False, shape=(), name='training')

        self.sess = sess
Exemple #29
0
def runMNIST():
    imageSize = 4
    imageChannels = 1

    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

    self.createNetwork("fullyConnected", imageSize)
def main():

    sess = tf.Session()
    cnn = CNN(sess)

    sess.run(tf.global_variables_initializer())

    # Load the MNIST Data
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    # Training
    for epoch in range(TRAINING_EPOCH):

        cost = 0.
        total_batch = int(mnist.train.num_examples / BATCH_SIZE)

        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(BATCH_SIZE)
            c, _ = cnn.train(batch_xs, batch_ys)
            cost += c

        avg_cost = c / total_batch

        print('Epoch #%2d' % (epoch+1))
        print('- Average cost: %4f' % (avg_cost))

    # Testing
    print('Accuracy:', cnn.get_accuracy(mnist.test.images, mnist.test.labels))
Exemple #31
0

def conv_layer(input, shape):
    W = weight_variable(shape)
    b = bias_variable([shape[3]])
    return tf.nn.relu(conv2d(input, W) + b)


def full_layer(input, size):
    in_size = int(input.get_shape()[1])
    W = weight_variable([in_size, size])
    b = bias_variable([size])
    return tf.matmul(input, W) + b


mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)

x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])

x_image = tf.reshape(x, [-1, 28, 28, 1])
conv1 = conv_layer(x_image, shape=[5, 5, 1, 32])
conv1_pool = max_pool_2x2(conv1)

conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
conv2_pool = max_pool_2x2(conv2)

conv2_flat = tf.reshape(conv2_pool, [-1, 7 * 7 * 64])
full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

keep_prob = tf.placeholder(tf.float32)
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import random
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

learning_rate = 0.001
training_epochs = 15
batch_size = 100
keep_prob = tf.placeholder(tf.float32)
nb_classes = 10

X = tf.placeholder(tf.float32, [None, 784])
X_img = tf.reshape(X, [-1, 28, 28, 1])  # img 28x28x1 (black/white)
Y = tf.placeholder(tf.float32, [None, nb_classes])

#L1 ImgIn shape=(?, 28, 28, 1)
W1 = tf.Variable(tf.random_normal([3, 3, 1, 32],
                                  stddev=0.01))  # 필터의 크기, 색깔, 필터의 개수
# W1 = tf.get_variable("W1", shape=[3,3,1,32], initializer=tf.contrib.layers.xavier_initializer())???
# Conv통과 후   -> (?, 28, 28, 32)
# Pool통과 후   -> (?, 14, 14, 32)
L1 = tf.nn.conv2d(X_img, W1, strides=[1, 1, 1, 1], padding='SAME')
# print(L1)
L1 = tf.nn.relu(L1)
L1 = tf.nn.max_pool(L1,
                    ksize=[1, 2, 2, 1],
                    strides=[1, 2, 2, 1],
                    padding='SAME')  # pooling 스트라이드 2
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
'''
from __future__ import print_function

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./mnist/", one_hot=True)
import tensorflow as tf

# Parameters of the model
print("Defining the general parameters of the model")
learning_rate = .001
training_epochs = 15
batch_size = 100
display_step = 1

# Network Parameters
print("Defining Networks Parameters")
n_hidden_1 = 256  # 1st layer number of features
n_hidden_2 = 256  # 2nd layer number of features
n_input = 784  # MNIST data input
n_classes = 10  # MNIST total classes (0-9 digits)

# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])

# define model


def multilayer_perceptron(x, weights, biases):
    """ Implementation of a 2 layer neural network """
    # 1st hidden layer
'''
A nearest neighbor learning algorithm example using TensorFlow library.
This example is using the MNIST database of handwritten digits
'''

from __future__ import print_function

import numpy as np
import tensorflow as tf

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

# In this example, we limit mnist data
Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates)
Xte, Yte = mnist.test.next_batch(200) #200 for testing

# tf Graph Input
xtr = tf.placeholder("float", [None, 784])
xte = tf.placeholder("float", [784])

# Nearest Neighbor calculation using L1 Distance
# Calculate L1 Distance
distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.neg(xte))), reduction_indices=1)
# Prediction: Get min distance index (Nearest neighbor)
pred = tf.arg_min(distance, 0)

accuracy = 0.

# Initializing the variables
Exemple #35
0
def main(argv=None):
    modelpath = "/tmp/model.ckpt"
    
    data_sets = input_data.read_data_sets(FLAGS.input_data_dir)
    g1 = tf.Graph()
    with g1.as_default():
        images, labels, is_training, logits, loss, acc = conv_network(
            FLAGS.batch_size)
        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
        global_step = tf.Variable(0, name='global_step', trainable=False)
        train_op = optimizer.minimize(loss, global_step=global_step)
        init = tf.global_variables_initializer()
        with tf.variable_scope('conv2d') as scope:
            scope.reuse_variables()
            kernel = tf.get_variable('kernel')
            k_min = tf.reduce_min(kernel)
            k_max = tf.reduce_max(kernel)
            I0 = (kernel - k_min) / (k_max - k_min)
            tf.summary.image('filters', tf.transpose(I0, [3, 0, 1, 2]),
                             max_outputs=32)
        summary = tf.summary.merge_all()
        
        saver = tf.train.Saver() # included Saver
        
        sess = tf.Session()
        summary_writer = tf.summary.FileWriter('./logs/' + FLAGS.run,
                                               sess.graph)                                               
    
        sess.run(init)

        for it in xrange(FLAGS.max_iter):
            image_b, label_b = data_sets.train.next_batch(FLAGS.batch_size)
            _, lv, av = sess.run([train_op, loss, acc],
                                 feed_dict={images: image_b,
                                            labels: label_b,
                                            is_training: True})
            if (it % 50 == 0):
                summary_str = sess.run(summary, feed_dict={images: image_b,
                                                           labels: label_b,
                                                           is_training: True})
                summary_writer.add_summary(summary_str, it)
                summary_writer.flush()
            if (it % 500 == 0):
                msg = 'Iteration {:5d}: loss is {:7.4f}, accuracy is {:6.2f}%'
                print(msg.format(it, lv, av))
        print('Training completed')
        
        save_path = saver.save(sess, modelpath) # Save parameters to disk
        print("Model saved in file: %s" % save_path)
        
    g2 = tf.Graph()
    with g2.as_default():
        images, labels, is_training, logits, loss, acc = conv_network(
            FLAGS.batch_size)
        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
        global_step = tf.Variable(0, name='global_step', trainable=False)
        train_op = optimizer.minimize(loss, global_step=global_step)
        init = tf.global_variables_initializer()
        with tf.variable_scope('conv2d') as scope:
            scope.reuse_variables()
            kernel = tf.get_variable('kernel')
            k_min = tf.reduce_min(kernel)
            k_max = tf.reduce_max(kernel)
            I0 = (kernel - k_min) / (k_max - k_min)
            tf.summary.image('filters', tf.transpose(I0, [3, 0, 1, 2]),
                             max_outputs=32)
        summary = tf.summary.merge_all()
        
        summary_writer = tf.summary.FileWriter('./logs/' + FLAGS.run,
                                               sess.graph)
        sess2 = tf.Session()
        sess2.run(init)       
        saver = tf.train.Saver() # included Saver
        saver.restore(sess2, modelpath)

        
        
        
        avg_accuracy = 0.0
        n_evals = data_sets.test.num_examples // FLAGS.batch_size
        for i in xrange(n_evals):
            image_b, label_b = data_sets.test.next_batch(FLAGS.batch_size)
            _, lv, av = sess2.run([train_op, loss, acc],
                                 feed_dict={images: image_b,
                                            labels: label_b,
                                            is_training: False})
            avg_accuracy += av
        avg_accuracy /= n_evals
        print('Test accuracy is {:.2f}%'.format(avg_accuracy))
def run_main():

    # 定义网络超参数
    learning_rate = 0.001
    training_iters = 200000
    batch_size = 128
    display_step = 10

    # 定义网络参数
    n_input = 784
    n_classes = 10
    dropout = 0.75

    # 输入占位符
    x = tf.placeholder(tf.float32, [None, n_input])
    y = tf.placeholder(tf.float32, [None, n_classes])
    keep_prob = tf.placeholder(tf.float32)

    # 定义所有网络参数
    weights = {
        'wc1': tf.Variable(tf.random_normal([11, 11, 1, 96])),
        'wc2': tf.Variable(tf.random_normal([5, 5, 96, 256])),
        'wc3': tf.Variable(tf.random_normal([3, 3, 256, 384])),
        'wc4': tf.Variable(tf.random_normal([3, 3, 384, 384])),
        'wc5': tf.Variable(tf.random_normal([3, 3, 384, 256])),
        'wd1': tf.Variable(tf.random_normal([2*2*256, 4096])),
        'wd2': tf.Variable(tf.random_normal([4096, 4096])),
        'out': tf.Variable(tf.random_normal([4096, n_classes]))
    }

    biases = {
        'bc1': tf.Variable(tf.random_normal([96])),
        'bc2': tf.Variable(tf.random_normal([256])),
        'bc3': tf.Variable(tf.random_normal([384])),
        'bc4': tf.Variable(tf.random_normal([384])),
        'bc5': tf.Variable(tf.random_normal([256])),
        'bd1': tf.Variable(tf.random_normal([4096])),
        'bd2': tf.Variable(tf.random_normal([4096])),
        'out': tf.Variable(tf.random_normal([n_classes]))
    }

    # 构建模型
    pred = alex_net(x, weights, biases, keep_prob)
    # 损失函数和优化器
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=pred))
    tf.summary.scalar('cost', cost)
    optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate).minimize(cost)

    # 衡量矩阵
    correct_pred = tf.equal(tf.arg_max(pred, 1), tf.arg_max(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    tf.summary.scalar('accuracy', accuracy)

    merged_summaries = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('./logs/train', tf.get_default_graph())
    # train_writer.close()

    # ================== 开始训练模型 ===========================
    # 设置GPU按需增长
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    # 初始化变量
    init = tf.global_variables_initializer()
    with tf.Session(config=config) as sess:
        sess.run(init)
        step = 1
        # 第一步载入数据
        from tensorflow.examples.tutorials.mnist import input_data
        mnist = input_data.read_data_sets("../../MNIST_data/", one_hot=True)

        while step*batch_size < training_iters:
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            _, s= sess.run([optimizer, merged_summaries], feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})
            train_writer.add_summary(s, step)

            if step % display_step == 0:
                loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x, y: batch_y, keep_prob: 1})

                print("Iter " + str(step*batch_size) + ", Minibatch Loss= " +
                      "{:.6f}".format(loss) + ", Training Accuracy= " +
                      "{:.5f}".format(acc))

            step += 1
        print("Optimization Finished!")
def main(restore_save=False):
    """
    Placeholders for input 
    NB images are expressed in terms of vector and not matrices. 
    """
    x = tf.placeholder(tf.float32, shape=[None, 784])

    # Placeholder for targets
    targets = tf.placeholder(tf.float32, shape=[None, 10])

    # Placeholder for discerning train/eval mode
    is_training = tf.placeholder(dtype=tf.bool)

    # Define global step to indicize checkpoint saves
    global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="global_step")

    mnist = input_data.read_data_sets('/tmp/mnist', one_hot=True)

    model = MultiLayerPerceptron([256, 64, 10], x, targets, is_training, step=global_step, learning_rate=0.0000975)
    init_op = tf.global_variables_initializer()

    writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
    saver = tf.train.Saver(model.get_vars_to_save())
    with tf.Session() as sess:
        writer = tf.summary.FileWriter(logdir, sess.graph)

        if(restore_save):
           # check if there is checkpoint
            ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/checkpoint'))

            # check if there is a valid checkpoint path
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                print("Checkpoint restored: {}".format(ckpt.model_checkpoint_path))

        #sess = tf_debug.LocalCLIDebugWrapperSession(sess)

        # Initialize all variables
        sess.run(init_op)

        # Training parameters
        training_epochs = 50
        batch_size = 128

        # Number of batches to process to see whole dataset
        batches_each_epoch = mnist.train.num_examples // batch_size

        for epoch in range(training_epochs):

            # During training measure accuracy on validation set to have an idea of what's happening
            val_accuracy = sess.run(fetches=model.accuracy,
                                    feed_dict={x: mnist.validation.images,
                                               targets: mnist.validation.labels,
                                               is_training: False})
            
            saver.save(sess, 
                'checkpoints/mlp.ckpt',
                global_step=global_step)

            print(
                'Epoch: {:06d} - VAL accuracy: {:.03f} %'.format(epoch, val_accuracy * 100))

            for index in range(batches_each_epoch):

                # Load a batch of training data
                x_batch, target_batch = mnist.train.next_batch(batch_size)

                # Actually run one training step here
                _, summary = sess.run(fetches=[model.optimize, model.summary],
                         feed_dict={x: x_batch, targets: target_batch, is_training: True})
                writer.add_summary(summary, global_step=index * (epoch + 1))

        test_accuracy = sess.run(fetches=model.accuracy,
                                 feed_dict={x: mnist.test.images,
                                            targets: mnist.test.labels,
                                            is_training: False})
        print('*' * 50)
        print('Training ended. TEST accuracy: {:.03f} %'.format(
            test_accuracy * 100))
    writer.close()
Exemple #38
0
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

tf.reset_default_graph()

# Hyper Parameters
learning_rate = 0.01  # 学习率
n_steps = 28  # LSTM 展开步数(时序持续长度)
n_inputs = 28  # 输入节点数
n_hiddens = 64  # 隐层节点数
n_layers = 2  # LSTM layer 层数
n_classes = 10  # 输出节点数(分类数目)

# data
mnist = input_data.read_data_sets(
    "C:/Users/Administrator/Desktop/深度学习小例子/MNIST_data/", one_hot=True)
test_x = mnist.test.images
test_y = mnist.test.labels

# tensor placeholder
with tf.name_scope('inputs'):
    x = tf.placeholder(tf.float32, [None, n_steps * n_inputs],
                       name='x_input')  # 输入
    y = tf.placeholder(tf.float32, [None, n_classes], name='y_input')  # 输出
    keep_prob = tf.placeholder(tf.float32,
                               name='keep_prob_input')  # 保持多少不被 dropout
    batch_size = tf.placeholder(tf.int32, [], name='batch_size_input')  # 批大小

# weights and biases
with tf.name_scope('weights'):
    Weights = tf.Variable(tf.truncated_normal([n_hiddens, n_classes],
Exemple #39
0
# coding:utf-8
#!/usr/bin/python

import numpy as np
import math
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import warnings
warnings.filterwarnings('ignore')

mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
batch_size = 100
X_holder = tf.placeholder(tf.float32)
y_holder = tf.placeholder(tf.float32)


def t0():
    c = tf.truncated_normal([5, 5, 1, 32], stddev=0.1)
    with tf.Session() as sess:
        print(sess.run(c))
    pass


def mnist_show_image():
    image = mnist.train.images[998].reshape(-1, 28)
    plt.subplot(131)
    plt.imshow(image)
    plt.axis('off')
    plt.subplot(132)
    plt.imshow(image, cmap='gray')
Exemple #40
0
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('/path/to/MNIST_data', one_hot=True)


def test_network(sess, L2, inn, ans, xs):
    mine = sess.run(L2, feed_dict={xs: inn})
    error = tf.equal(tf.arg_max(mine, 1), tf.arg_max(ans, 1))
    acc = sess.run(tf.reduce_mean(tf.cast(error, tf.float32)))
    return acc


def Add_parameters(input, in_size, out_size, act_fuc=None):
    #in_size :  the size of input vector
    #out_size:  the size of output vector
    #act_fuc:   the activate function of the cell
    #input:     the input tensor
    Weight = tf.Variable(tf.random_normal([in_size, out_size]))
    bias = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    result = tf.matmul(input, Weight) + bias
    if act_fuc is None:
        return result
    else:
        return act_fuc(result)


def main():
    xs = tf.placeholder(tf.float32, [None, 784])
    ys = tf.placeholder(tf.float32, [None, 10])
    L1 = Add_parameters(xs, 784, 100, tf.nn.sigmoid)
Exemple #41
0
def CNN_LeNet_5_Mnist(logs_path):
    """
    LeNet对Mnist数据集进行测试
    :return: 
    """
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    # print(mnist)

    x = tf.placeholder(tf.float32, [None, 784])
    y_ = tf.placeholder(tf.float32, [None, 10])
    x_image = tf.reshape(x, [-1, 28, 28, 1])  # 把向量重新整理成矩阵,最后一个表示通道个数

    # 第一二参数值得卷积核尺寸大小,即patch,第三个参数是图像通道数,第四个参数是卷积核的数目,代表会出现多少个卷积特征
    W_conv1 = weight_variable([5, 5, 1, 32])
    b_conv1 = bias_variable([32])
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)

    W_conv2 = weight_variable([5, 5, 32, 64])  # 多通道卷积,卷积出64个特征
    b_conv2 = bias_variable([64])
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)

    W_fc1 = weight_variable([7 * 7 * 64, 1024])
    b_fc1 = bias_variable([1024])
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

    keep_prob = tf.placeholder(tf.float32)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

    cross_entropy = tf.reduce_mean(
        -tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    tf.summary.scalar("cross_entropy", cross_entropy)

    correct_prediction = tf.equal(tf.arg_max(y_conv, 1), tf.arg_max(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    merged_summary_op = tf.summary.merge_all()
    # 初始化变量
    init_op = tf.global_variables_initializer()

    # 开始训练
    sess = tf.Session()
    sess.run(init_op)
    # iterate
    # Xtrain, ytrain = get_batch(self.args, self.simrank, self.walks, minibatch * 100, self.tem_simrank)  # 找一个大点的数据集测试效果
    summary_writer = tf.summary.FileWriter(logs_path,
                                           graph=tf.get_default_graph())

    # for i in range((int)(20000)):
    num_examples = 12800 * 2  #这里暂时手动设置吧
    minibatch = 128
    for epoch in range(20):
        print("iter:", epoch)
        avg_cost = 0.
        total_batch = int(num_examples / minibatch)
        # Loop over all batches
        for i in range(total_batch):
            batchs = mnist.train.next_batch(minibatch)
            batch_xs, batch_ys = batchs[0], batchs[1]
            # batch_xs, batch_ys = next_batch(self.args, self.simrank, self.walks, minibatch, self.tem_simrank,
            #                                 num_examples)

            # and summary nodes
            _, c, summary = sess.run(
                [train_step, cross_entropy, merged_summary_op],
                feed_dict={
                    x: batch_xs,
                    y_: batch_ys,
                    keep_prob: 0.5
                })

            # Write logs at every iteration
            summary_writer.add_summary(summary, epoch * total_batch + i)
            # Compute average loss
            avg_cost += c / total_batch
            if (i % 10 == 0):
                print("i:", i, "   current c:", c, "   ave_cost:", avg_cost)
        # Display logs per epoch step
        # if (epoch + 1) % display_step == 0:
        print("Epoch:", '%04d' % (epoch + 1), "cost=",
              "{:.9f}".format(avg_cost))

        # 到达一定程度进行测试test输出
        if epoch % 1 == 0:
            batchs = mnist.train.next_batch(minibatch)
            print("test accuracy %g" % sess.run(accuracy,
                                                feed_dict={
                                                    x: mnist.test.images,
                                                    y_: mnist.test.labels,
                                                    keep_prob: 1.0
                                                }))
"""
简单的前馈神经网络,2层隐藏层
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

# 读取mnist对象
mnist = input_data.read_data_sets("/home/xiaonan/Dataset/mnist/", one_hot=True)

# 数据、标签占位符
x = tf.placeholder(tf.float32, shape=[None, 784], name='x_input')
y = tf.placeholder(tf.float32, shape=[None, 10], name='y_input')

# 设置超参数
batch_size = 200
learning_rate = 0.1
learning_rate_decay = 0.999
max_steps = 30000  # 最大训练步数

# 设置权重、偏置项、当前训练步骤参数
weight1 = tf.Variable(initial_value=tf.random_normal([784, 500], stddev=0.1, dtype=tf.float32))
bias1 = tf.Variable(tf.constant(0.1, shape=[500]))
weight2 = tf.Variable(initial_value=tf.random_normal([500, 10], stddev=0.1, dtype=tf.float32))
bias2 = tf.Variable(tf.constant(0.1, shape=[10]))
current_train_step = tf.Variable(0, trainable=False)  # 当前训练步骤,trainable=False表示变量不加入计算图中

# 网络结构
layer1 = tf.nn.relu(tf.matmul(x, weight1) + bias1)
y_ = tf.matmul(layer1, weight2) + bias2

# 使用交叉熵作为Loss函数衡量预测标签与真实标签之间的差距
Exemple #43
0
def main(arg_in):
    print(arg_in)

    print(LOG_DIR)
    if os.path.exists(LOG_DIR):
        shutil.rmtree(LOG_DIR)

    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

    data = mnist.train
    # generate translated Dataset
    i_r, i_c = data.images.shape
    l_r, l_c = data.labels.shape
    gen_images = np.ndarray((4 * i_r, i_c), dtype=np.float32)
    gen_labels = np.ndarray((4 * l_r, l_c))

    # Shift right
    gen_images[0:i_r, 1:] = data.images[:, :-1]
    gen_labels[0:l_r, :] = data.labels
    for i in range(28):
        gen_images[0:i_r, i * 28] = 0

    # Shift left
    gen_images[i_r:2 * i_r, :-1] = data.images[:, 1:]
    gen_labels[i_r:2 * i_r, :] = data.labels
    for i in range(28):
        gen_images[i_r:2 * i_r, i * 28 + 27] = 0

    # Shift up
    gen_images[2 * i_r:3 * i_r, :-28] = data.images[:, 28:]
    gen_labels[2 * i_r:3 * i_r, :] = data.labels
    gen_images[2 * i_r:3 * i_r, :28] = 0

    # Shift down
    gen_images[3 * i_r:4 * i_r, 28:] = data.images[:, :-28]
    gen_labels[3 * i_r:4 * i_r, :] = data.labels
    gen_images[3 * i_r:4 * i_r, :28] = 0

    # COnvert back to pixel values
    gen_images = gen_images * 255

    mnist_t = DataSet(gen_images, gen_labels, reshape=False, one_hot=True)

    sess = tf.InteractiveSession()

    # Input/Output
    x = tf.placeholder(tf.float32, shape=[None, 784], name='X')
    y_ = tf.placeholder(tf.float32, shape=[None, 10], name='labels')

    input_layer = tf.reshape(x, [-1, 28, 28, 1])
    tf.summary.image('x', input_layer)

    conv1 = tf.layers.conv2d(inputs=input_layer,
                             filters=32,
                             kernel_size=[5, 5],
                             padding="same",
                             activation=tf.nn.relu)
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
    conv2 = tf.layers.conv2d(inputs=pool1,
                             filters=64,
                             kernel_size=[5, 5],
                             padding="same",
                             activation=tf.nn.relu)
    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
    pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
    dense = tf.layers.dense(inputs=pool2_flat,
                            units=1024,
                            activation=tf.nn.relu)
    tf_is_training = tf.placeholder_with_default(True, shape=())
    dropout = tf.layers.dropout(inputs=dense,
                                rate=0.5,
                                training=tf_is_training)
    logits = tf.layers.dense(inputs=dropout, units=10)

    with tf.name_scope('Softmax') as scope:
        softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits))

    y_conv = logits

    with tf.name_scope('Cost') as scope:
        cross_entropy = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_,
                                                       logits=logits))
        tf.summary.scalar('cross_entropy', cross_entropy)

    with tf.name_scope('train') as scope:
        train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

    with tf.name_scope('accuracy') as scope:
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('accuracy', accuracy)

    merged_summary = tf.summary.merge_all()
    writer = tf.summary.FileWriter(LOG_DIR)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        writer.add_graph(sess.graph)
        data = mnist_t
        # data = mnist.train
        print('Processing {} samples'.format(data.num_examples))
        for i in range(10000):
            batch = data.next_batch(100)
            if i % 100 == 0:
                validation_batch = (mnist.test.images, mnist.test.labels)
                train_accuracy = accuracy.eval(feed_dict={
                    x: batch[0],
                    y_: batch[1],
                    tf_is_training: False
                })
                test_accuracy = accuracy.eval(
                    feed_dict={
                        x: validation_batch[0],
                        y_: validation_batch[1],
                        tf_is_training: False
                    })
                # saver = tf.train.Saver()
                # saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"), i)

                print('step {}, training accuracy {}, test accuracy:{}'.format(
                    i, train_accuracy, test_accuracy))
            if i % 10 == 0 and 0:
                tests = 1
                e_y = y_conv.eval(
                    feed_dict={
                        x: (batch[0])[:tests],
                        y_: (batch[1])[:tests],
                        tf_is_training: False
                    })
                a_t = (batch[1])[:tests]
                for i in range(len(e_y[0])):
                    print('{}: {:8.2f} - {:4.0f}'.format(
                        i, e_y[0][i], a_t[0][i]))
                print()
            if i % 5 == 0:
                s = sess.run(merged_summary,
                             feed_dict={
                                 x: batch[0],
                                 y_: batch[1],
                                 tf_is_training: False
                             })
                writer.add_summary(s, i)
            train_step.run(feed_dict={
                x: batch[0],
                y_: batch[1],
                tf_is_training: True
            })
        print('test accuracy {}'.format(
            accuracy.eval(feed_dict={
                x: mnist.test.images,
                y_: mnist.test.labels,
                tf_is_training: False
            })))
    writer.close()
    print("Done!!")
Exemple #44
0
def main(argv=None):
    mnist = input_data.read_data_sets(DATABASE_PATH)
    train(mnist)
    return 0
import tensorflow as tf
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.ops import resources

# Ignore all GPUs, tf random forest does not benefit from it.
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("../data/mnist", one_hot=False)

# Parameters
learning_rate = 0.001
num_steps = 500 # Total steps to train
batch_size = 1024 # The number of samples per batch
num_classes = 10 # The 10 digits
num_features = 784 # Each image is 28x28 pixels
num_trees = 10
max_nodes = 1000
random_seed=123

g = tf.Graph()
with g.as_default():
    
    tf.set_random_seed(random_seed)
    # Input and Target data
    X = tf.placeholder(tf.float32, shape=[None, num_features])
    # For random forest, labels must be integers (the class id)
    Y = tf.placeholder(tf.int32, shape=[None])
    X_resized=tf.reshape(X, (-1, 28, 28, 1))
Exemple #46
0
import tensorflow as tf
import numpy as np
import numpy.random as rd
from tensorflow.examples.tutorials.mnist import input_data
import time
import pickle

mnist = input_data.read_data_sets("../datasets/MNIST", one_hot=True)

# Define the main hyper parameter accessible from the shell
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('n_epochs', 10,
                            'number of iteration (55000 is one epoch)')
tf.app.flags.DEFINE_integer('batch', 10,
                            'number of iteration (55000 is one epoch)')
tf.app.flags.DEFINE_integer('print_every', 100, 'print every k steps')
tf.app.flags.DEFINE_integer('n1', 300,
                            'Number of neurons in the first hidden layer')
tf.app.flags.DEFINE_integer('n2', 100,
                            'Number of neurons in the second hidden layer')
#
tf.app.flags.DEFINE_float(
    'p01', .01, 'Proportion of connected synpases at initialization')
tf.app.flags.DEFINE_float(
    'p02', .03, 'Proportion of connected synpases at initialization')
tf.app.flags.DEFINE_float(
    'p0out', .3, 'Proportion of connected synpases at initialization')
tf.app.flags.DEFINE_float('l1', 1e-5, 'l1 regularization coefficient')
tf.app.flags.DEFINE_float('gdnoise', 1e-5, 'gradient noise coefficient')
tf.app.flags.DEFINE_float('lr', 0.5, 'Learning rate')
Exemple #47
0
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
                                                          logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")

optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss, name="training_op")

correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")

init = tf.global_variables_initializer()
saver = tf.train.Saver()

from tensorflow.examples.tutorials.mnist import input_data
tf.logging.set_verbosity(tf.logging.ERROR)  # deprecated 경고 메세지를 출력하지 않기 위해
mnist = input_data.read_data_sets("/tmp/data/")
tf.logging.set_verbosity(tf.logging.INFO)

X_train1 = mnist.train.images[mnist.train.labels < 5]
y_train1 = mnist.train.labels[mnist.train.labels < 5]
X_valid1 = mnist.validation.images[mnist.validation.labels < 5]
y_valid1 = mnist.validation.labels[mnist.validation.labels < 5]
X_test1 = mnist.test.images[mnist.test.labels < 5]
y_test1 = mnist.test.labels[mnist.test.labels < 5]

n_epochs = 1000
batch_size = 20

max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
Exemple #48
0
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data



import os
os.environ["CUDA_VISIBLE_DEVICES"]="0" #for training on gpu

data = input_data.read_data_sets('data/DIGITS',one_hot=True)

label_dict = { 0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9'}

# Reshape training and testing image
train_X = data.train.images.reshape(-1, 28, 28, 1)
test_X = data.test.images.reshape(-1,28,28,1)

train_y = data.train.labels
test_y = data.test.labels

training_iters = 200 
learning_rate = 0.001 
batch_size = 128

# MNIST data input (img shape: 28*28)
n_input = 28

# MNIST total classes (0-9 digits)
n_classes = 10
Exemple #49
0
def main(argv=None):
    mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
    test_model(mnist)
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 23 15:43:23 2018

@author: LQH
"""

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.InteractiveSession(config=config)

mnist = input_data.read_data_sets("E:\Python\MNIST\mnist", one_hot=True)
print("train images shape", mnist.train.images.shape)
print("train labels shape", mnist.train.labels.shape)

def get_weights(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def get_biases(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

X = tf.placeholder(tf.float32, shape=[None, 784])
Y = tf.placeholder(tf.float32, shape=[None, 10])

#fc1
w_fc1 = get_weights([784, 1024])
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("/tmp/sentiment_nn/", one_hot=True)

# one hot vector
'''
0 = [1,0,0,0,0,0,0,0,0,0]
1 = [0,1,0,0,0,0,0,0,0,0]
2 = [0,0,1,0,0,0,0,0,0,0]
'''

n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500

n_classes = 10
batch_size = 100

# height x width = 784
x = tf.placeholder(dtype='float', shape=[None, 784])
y = tf.placeholder(dtype='float')


def neural_network_model(data):
    """
    Takes input sentiment_nn and returns output of the output layer
    :param data:
    :return:
    """
    hidden_1_layer = {
Exemple #52
0
def main():
    mnist = input_data.read_data_sets("../data/", one_hot=True)
    backward(mnist)
import tensorflow as tf
import cv2
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(train_dir="data", one_hot=True)
batch_size = 1000
test_x, test_y = mnist.test.next_batch(batch_size)
# input_image = "./data/111.jpg"
# gray = cv2.cvtColor(cv2.imread(test_x), cv2.COLOR_BGR2GRAY)
# size = gray.shape
# temp = cv2.resize(gray, (28, 28))
# cv2.imshow('image', temp)
# temp = np.reshape(temp, (-1, 784))
with tf.Session() as sess:
    new_saver = tf.train.import_meta_graph('./model/mnist.ckpt-200.meta')
    new_saver.restore(sess, tf.train.latest_checkpoint('./model'))
    graph = tf.get_default_graph()
    x = graph.get_tensor_by_name("x:0")
    y_pre = graph.get_tensor_by_name("y_label:0")
    label = sess.run(y_pre, feed_dict={x: test_x})
    num = tf.equal(label, tf.arg_max(test_y, 1))
    correct_prediction = tf.reduce_sum(tf.cast(num, tf.float32))/10
    accracy = sess.run(correct_prediction, feed_dict={x: test_x})
    print(accracy)


def main():
    #Read MNIST dataset
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    #Create The Model
    #----------------
    #Define input tensor
    #--Each entry of the tensor is a pixel intensity between 0 and 1
    #--x = [number of images, width*height of image]
    x = tf.placeholder(tf.float32, shape=[None, 784], name='modelInput')

    #Define loss and optimizer tensor
    #--This tensor holds the actual distribution for the labels of each image.
    #--y_ = [number of images, number of classes]
    y_ = tf.placeholder(tf.float32, [None, 10], "inputLabels")

    #Define weights tensor
    #--This tensor holds the information about how pixel intensity indicates a certain class.
    #  For a pixel of high intensity, the weight is positive if it is evidence in favor of the
    #  image being in some class and negative if it is not.
    #--W = [width*height of image, number of classes]
    W = tf.Variable(tf.zeros([784, 10]), name='modelWeights')

    #Define bias tensor
    #--This tensor holds the bias information; extra evidence to represent
    #  that some things are more likely independent of the input.
    #--b = [number of classes]
    b = tf.Variable(tf.zeros([10]), name='modelBias')

    #Define softmax output of model
    #--Softmax serves as an activation function that normalizes the evidence into
    #  a probability distribution that an image is in some class
    #--y = [number of images, number of classes]
    y = tf.nn.softmax(tf.matmul(x, W) + b, name='modelOutput')

    #Define Cross Entropy
    #----------------------------------
    #--Cross entropy is a measure of how inefficient our predictions are at describing
    #  the actual labels of the images.
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))

    #Train the model by minimizing cross entropy
    #--By minizing cross entropy (the loss), the better the model is at predicting the correct
    #  label
    train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(
        cross_entropy)

    #Train Model
    #-----------
    sess = tf.Session()
    init = tf.global_variables_initializer()

    sess.run(init)
    saver = tf.train.Saver()

    #Determine if the predicited class matches the actual label
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))

    #Determine the percentage of images our model correctly predicted
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    #Run the training step 1000 times
    for i in range(TRAIN_STEPS + 1):
        #Run training step for batch of 100 images
        batch_xs, batch_ys = mnist.train.next_batch(BATCH_SIZE)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        #Print accuracy and loss
        print('Training Step:' + str(i) + '  Accuracy = ' + str(
            sess.run(accuracy, {
                x: mnist.test.images,
                y_: mnist.test.labels
            }) * 100) + "%" + '	 Loss = ' +
              str(sess.run(cross_entropy, {
                  x: batch_xs,
                  y_: batch_ys
              })))
        #Save learned weights of model to checkpoint file
        if i % 5 == 0:
            out = saver.save(sess,
                             SAVED_MODEL_PATH + MODEL_NAME + '.ckpt',
                             global_step=i)

    #Print final accuracy of model
    print('Final Accuracy: ' + str(
        sess.run(accuracy,
                 feed_dict={
                     x: mnist.test.images,
                     y_: mnist.test.labels
                 })))

    #Save model definition
    tf.train.write_graph(sess.graph_def, SAVED_MODEL_PATH,
                         MODEL_NAME + '.pbtxt')
    tf.train.write_graph(sess.graph_def,
                         SAVED_MODEL_PATH,
                         MODEL_NAME + '.pb',
                         as_text=False)

    #Freeze the graph
    #----------------
    #Input graph is our saved model defined above
    input_graph = SAVED_MODEL_PATH + MODEL_NAME + '.pb'
    #Use default graph saver
    input_saver = ""
    #Input file is a binary file
    input_binary = True
    #Checkpoint file to merge with graph definition
    input_checkpoint = SAVED_MODEL_PATH + MODEL_NAME + '.ckpt-' + str(
        TRAIN_STEPS)
    #Output nodes in model
    output_node_names = 'modelOutput'
    restore_op_name = 'save/restore_all'
    filename_tensor_name = 'save/Const:0'
    #Output path
    output_graph = SAVED_MODEL_PATH + 'frozen_' + MODEL_NAME + '.pb'
    clear_devices = True
    initializer_nodes = ""
    #Freeze
    freeze_graph.freeze_graph(
        input_graph,
        input_saver,
        input_binary,
        input_checkpoint,
        output_node_names,
        restore_op_name,
        filename_tensor_name,
        output_graph,
        clear_devices,
        initializer_nodes,
    )
Exemple #55
0
def main(argv):
    mnist = input_data.read_data_sets('../MNIST_data/', one_hot=True)
    # mnist.train.next_batch()

    # Build the auto encoder
    auto_encoder = tf.estimator.Estimator(
        model_fn=sparase_autoencoder,
        model_dir='../log/SAE',
        params={
            'encoder_units': [],
            'encoder_result_units': 200,
            'decoder_units': [],
        }
    )

    # Train the model
    auto_encoder.train(
        input_fn=lambda : input_fn('train', mnist.train.images, mnist.train.images, batch_size=128),
        steps=3000
    )

    # # Evaluate the model
    # eval_result = auto_encoder.evaluate(
    #     input_fn=lambda : input_fn(
    #         function='eval',
    #         features=mnist.test.images[:20],
    #         labels=mnist.test.images[:20],
    #         batch_size=20
    #     )
    # )

    # Build mnist classifier
    mnist_classifier = tf.estimator.Estimator(
        model_fn=pic_classifier,
        model_dir='../log/classifier',
        params={
            'units': [128],
            'n_classes': 10,
        }
    )

    # Get training input data from previous auto encoder
    classifier_gen = auto_encoder.predict(
        input_fn=lambda : input_fn('predict', mnist.train.images, batch_size=mnist.train.images.shape[0])
    )


    # Train classifier
    mnist_classifier.train(
        input_fn=lambda : input_fn_gen('train', classifier_gen, mnist.train.labels, batch_size=128),
        steps=3000
    )

    # Get test data
    classifier_gen_test = auto_encoder.predict(
        input_fn=lambda :input_fn('predict', mnist.test.images, batch_size=mnist.train.images.shape[0])
    )

    # Evaluate classifier
    classifier_eval_result = mnist_classifier.evaluate(
        input_fn=lambda : input_fn_gen('eval', classifier_gen_test, mnist.test.labels, mnist.test.labels.shape[0])
    )

    print('\nTest accuracy:{accuracy: 0.3f}\n'.format(**classifier_eval_result))
from __future__ import division, print_function, absolute_import

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=False)

# # Visualize decoder setting
# # Parameters
# learning_rate = 0.01
# training_epochs = 5
# batch_size = 256
# display_step = 1
examples_to_show = 10
#
# # Network Parameters
# n_input = 784  # MNIST data input (img shape: 28*28)
#
# # tf Graph input (only pictures)
# X = tf.placeholder("float", [None, n_input])
#
# # hidden layer settings
# n_hidden_1 = 256 # 1st layer num features
# n_hidden_2 = 128 # 2nd layer num features
# weights = {
#     'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
#     'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
#     'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
def main():
    mnist=input_data.read_data_sets(mhp.MNIST_DATA_PATH,one_hot=True)
    backwarad(mnist)
def main(_):
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir,
                                      one_hot=True,
                                      validation_size=10000)

    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])
    W_2 = tf.Variable(tf.random_normal([784, 100]) / tf.sqrt(784.0 / 2))
    b_2 = tf.Variable(tf.random_normal([100]))
    z_2 = tf.matmul(x, W_2) + b_2
    a_2 = tf.nn.relu(z_2)

    W_3 = tf.Variable(tf.random_normal([100, 100]) / tf.sqrt(100.0 / 2))
    b_3 = tf.Variable(tf.random_normal([100]))
    z_3 = tf.matmul(a_2, W_3) + b_3
    a_3 = tf.nn.relu(z_3)

    W_4 = tf.Variable(tf.random_normal([100, 100]) / tf.sqrt(100.0 / 2))
    b_4 = tf.Variable(tf.random_normal([100]))
    z_4 = tf.matmul(a_3, W_4) + b_4
    a_4 = tf.nn.relu(z_4)

    W_5 = tf.Variable(tf.random_normal([100, 10]) / tf.sqrt(100.0))
    b_5 = tf.Variable(tf.random_normal([10]))
    z_5 = tf.matmul(a_4, W_5) + b_5
    a_5 = tf.sigmoid(z_5)

    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 10])

    tf.add_to_collection(tf.GraphKeys.WEIGHTS, W_2)
    tf.add_to_collection(tf.GraphKeys.WEIGHTS, W_3)
    tf.add_to_collection(tf.GraphKeys.WEIGHTS, W_4)
    tf.add_to_collection(tf.GraphKeys.WEIGHTS, W_5)
    regularizer = tf.contrib.layers.l2_regularizer(scale=5.0 / 50000)
    reg_term = tf.contrib.layers.apply_regularization(regularizer)

    loss = (tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(labels=y_, logits=z_5)) +
            reg_term)

    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    correct_prediction = tf.equal(tf.argmax(a_5, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # Train
    best = 0
    for epoch in range(30):
        for _ in range(5000):
            batch_xs, batch_ys = mnist.train.next_batch(10)
            sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        # Test trained model
        accuracy_currut_train = sess.run(accuracy,
                                         feed_dict={
                                             x: mnist.train.images,
                                             y_: mnist.train.labels
                                         })

        accuracy_currut_validation = sess.run(accuracy,
                                              feed_dict={
                                                  x: mnist.validation.images,
                                                  y_: mnist.validation.labels
                                              })

        print("Epoch %s: train: %s validation: %s" %
              (epoch, accuracy_currut_train, accuracy_currut_validation))
        best = (best,
                accuracy_currut_validation)[best <= accuracy_currut_validation]

    # Test trained model
    print("best: %s" % best)
def nn_example():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    # Python optimisation variables
    learning_rate = 0.5
    epochs = 10
    batch_size = 100

    # declare the training data placeholders
    # input x - for 28 x 28 pixels = 784
    x = tf.placeholder(tf.float32, [None, 784])
    # now declare the output data placeholder - 10 digits
    y = tf.placeholder(tf.float32, [None, 10])

    # now declare the weights connecting the input to the hidden layer
    W1 = tf.Variable(tf.random_normal([784, 300], stddev=0.03), name='W1')
    b1 = tf.Variable(tf.random_normal([300]), name='b1')
    # and the weights connecting the hidden layer to the output layer
    W2 = tf.Variable(tf.random_normal([300, 10], stddev=0.03), name='W2')
    b2 = tf.Variable(tf.random_normal([10]), name='b2')

    # calculate the output of the hidden layer
    hidden_out = tf.add(tf.matmul(x, W1), b1)
    hidden_out = tf.nn.relu(hidden_out)

    # now calculate the hidden layer output - in this case, let's use a softmax activated
    # output layer
    y_ = tf.nn.softmax(tf.add(tf.matmul(hidden_out, W2), b2))

    # now let's define the cost function which we are going to train the model on
    y_clipped = tf.clip_by_value(y_, 1e-10, 0.9999999)
    cross_entropy = -tf.reduce_mean(tf.reduce_sum(y * tf.log(y_clipped)
                                                  + (1 - y) * tf.log(1 - y_clipped), axis=1))

    # add an optimiser
    optimiser = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)

    # finally setup the initialisation operator
    init_op = tf.global_variables_initializer()

    # define an accuracy assessment operation
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # add a summary to store the accuracy
    tf.summary.scalar('accuracy', accuracy)

    merged = tf.summary.merge_all()
    writer = tf.summary.FileWriter('C:\\Users\\Andy\\PycharmProjects')
    # start the session
    with tf.Session() as sess:
        # initialise the variables
        sess.run(init_op)
        total_batch = int(len(mnist.train.labels) / batch_size)
        for epoch in range(epochs):
            avg_cost = 0
            for i in range(total_batch):
                batch_x, batch_y = mnist.train.next_batch(batch_size=batch_size)
                _, c = sess.run([optimiser, cross_entropy], feed_dict={x: batch_x, y: batch_y})
                avg_cost += c / total_batch
            print("Epoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost))
            summary = sess.run(merged, feed_dict={x: mnist.test.images, y: mnist.test.labels})
            writer.add_summary(summary, epoch)

        print("\nTraining complete!")
        writer.add_graph(sess.graph)
        print(sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}))
Exemple #60
0
def get_mnist_data():
    mnist = input_data.read_data_sets('Data/MNIST', reshape=False)
    return mnist