def train_mnist_single_machine(num_epochs,
                               use_fake_data=False,
                               device=None,
                               manual_op_exec=False):
  """Train a ConvNet on MNIST.
  Args:
    num_epochs: int. Number of passes to make over the training set.
    use_fake_data: bool. If True, generate a synthetic dataset.
    device: string or None. The covariance and inverse update ops are run on
      this device. If empty or None, the default device will be used.
      (Default: None)
    manual_op_exec: bool, If `True` then `minimize_loss_single_machine_manual`
      is called for training which handles inverse and covariance computation.
      This is shown only for illustrative purpose. Otherwise
      `minimize_loss_single_machine` is called which relies on
      `PeriodicInvCovUpdateOpt` for op placement and execution.
  Returns:
    accuracy of model on the final minibatch of training data.
  """
  from tensorflow.data import Iterator

  # Load a dataset.
  print ("Loading MNIST into memory.")
  tf.logging.info("Loading MNIST into memory.")
  iter_train_handle, output_types, output_shapes = mnist.load_mnist_as_iterator(num_epochs,
                                                    args.batch_size,
                                                    train=True,
                                                    use_fake_data=use_fake_data,
                                                    flatten_images=False)
  iter_val_handle, _, _ = mnist.load_mnist_as_iterator(10000*num_epochs, # This just ensures this doesn't cause early termination
                                                    10000,
                                                    train=False,
                                                    use_fake_data=use_fake_data,
                                                    flatten_images=False)

  handle = tf.placeholder(tf.string, shape=[])
  iterator = Iterator.from_string_handle(
    handle, output_types, output_shapes)
  next_batch = iterator.get_next()
  (examples, labels) = next_batch

  # Build a ConvNet.
  layer_collection = kfac.LayerCollection()

  loss, accuracy = build_model(
      examples, labels, num_labels=10, layer_collection=layer_collection,
      register_layers_manually=_USE_MANUAL_REG)
  if not _USE_MANUAL_REG:
    layer_collection.auto_register_layers()

  # Without setting allow_soft_placement=True there will be problems when
  # the optimizer tries to place certain ops like "mod" on the GPU (which isn't
  # supported).
  config = tf.ConfigProto(allow_soft_placement=True)

  # Fit model.
  return minimize_loss_single_machine(handle, iter_train_handle, iter_val_handle,
        loss, accuracy, layer_collection, device=device, session_config=config)
예제 #2
0
    def loaddata(self):
        """
        loads data queue pipelines for train,test,val which can be switched during run time.
        """
        #change image to float
        scale = (1.0 / 255.0)
        #mean centering
        mean = np.load("DataCollection/Pong-v0/mean.npy")
        # making multi threaded pipelines from tfrecords using feedable iterators to switch queues during run time
        filenames = glob.glob('DataCollection/Pong-v0/'+'train'+'/*.tfrecords')
        datasettrain = tf.data.TFRecordDataset(filenames)
        filenames = glob.glob('DataCollection/Pong-v0/' + 'val' + '/*.tfrecords')
        datasetval = tf.data.TFRecordDataset(filenames)
        filenames = glob.glob('DataCollection/Pong-v0/' + 'test' + '/*.tfrecords')
        datasettest = tf.data.TFRecordDataset(filenames)
        #restore image and action data and shape
        datasettrain = datasettrain.map(map_func= self.parse_function, num_parallel_calls= 4)
        #to iterate over dataset for multiple epochs
        datasettrain = datasettrain.repeat()
        #shuffling and creating a queue buffer
        datasettrain = datasettrain.shuffle(buffer_size=1000)  # specify queue size buffer
        #making batches
        datasettrain = datasettrain.batch(32)
        #string handle for train
        iter_train_handle = datasettrain.make_one_shot_iterator().string_handle()

        datasetval = datasetval.map(map_func=self.parse_function, num_parallel_calls=4)
        datasetval = datasetval.repeat()
        datasetval = datasetval.shuffle(buffer_size=1000)  # specify queue size buffer
        datasetval = datasetval.batch(32)
        # string handle for val
        iter_val_handle = datasetval.make_one_shot_iterator().string_handle()

        datasettest = datasettest.map(map_func=self.parse_function, num_parallel_calls=4)
        datasettest = datasettest.repeat()
        datasettest = datasettest.shuffle(buffer_size=1000)  # specify queue size buffer
        datasettest = datasettest.batch(32)
        # string handle for test
        iter_test_handle = datasettest.make_one_shot_iterator().string_handle()
        #handle to indicate which queue we want to switch to or iterate over
        handle = tf.placeholder(tf.string, shape=[])
        iterator = Iterator.from_string_handle(handle, datasettrain.output_types, datasettrain.output_shapes)
        #get data tensors
        s_t_batch, a_t_batch, x_t_1_batch = iterator.get_next()
        mean_const = tf.constant(mean, dtype=tf.float32)
        #mean centering and convert to float
        s_t_batch = (s_t_batch - tf.tile(mean_const, [1, 1, 4])) * scale
        x_t_1_batch = (x_t_1_batch - mean_const) * scale
        self.s_t_batch = s_t_batch
        self.a_t_batch = a_t_batch
        self.x_t_1_batch = x_t_1_batch
        self.iter_train_handle = iter_train_handle
        self.iter_val_handle = iter_val_handle
        self.iter_test_handle = iter_test_handle
        self.handle = handle
예제 #3
0
import tensorflow as tf
from tensorflow.data import Dataset, Iterator

dataset_train = Dataset.range(10)
dataset_val = Dataset.range(90, 100)

iter_train_handle = dataset_train.make_one_shot_iterator().string_handle()
iter_val_handle = dataset_val.make_one_shot_iterator().string_handle()

handle = tf.placeholder(tf.string, shape=[])
iterator = Iterator.from_string_handle(handle, dataset_train.output_types,
                                       dataset_train.output_shapes)
next_batch = iterator.get_next()

with tf.train.MonitoredTrainingSession() as sess:
    handle_train, handle_val = sess.run([iter_train_handle, iter_val_handle])

    for step in range(10):
        print('train', sess.run(next_batch, feed_dict={handle: handle_train}))

        if step % 3 == 0:
            print('val', sess.run(next_batch, feed_dict={handle: handle_val}))
예제 #4
0
def training():

	batch_size = 10
	noise_type = 'gaussian'
	noise_proportion = 0.2
	noise_mean = 0
	noise_std = 1
	noise_lam = 1
	noise_std_range = [1,5]
	noise_lam_range = [1,5]
	loss_type = 'l2_loss'
	study_rate = 1e-5

	current_dir = Path('.')

	train_true,__,test_true=load_data(dataset='mias',
		DIR=current_dir)

	train_true = train_true.repeat().batch(batch_size)
	train_true = train_true.prefetch(buffer_size =
		tf.data.experimental.AUTOTUNE)

	test_true = test_true.repeat().batch(batch_size)
	test_true = test_true.prefetch(buffer_size =
		tf.data.experimental.AUTOTUNE)

	iter_train_handle = train_true.make_one_shot_iterator().string_handle()
	iter_val_handle = test_true.make_one_shot_iterator().string_handle()

	handle = tf.placeholder(tf.string, shape=[])
	iterator = Iterator.from_string_handle(handle,
		train_true.output_types,
		train_true.output_shapes)

	next_batch = iterator.get_next()

	noise_args = {'proportion':noise_proportion}

	if noise_type == 'random':
		noise_fn = random_noise
		noise_args['std_range'] = noise_std_range
		noise_args['lam_range'] = noise_lam_range
	elif noise_type == 'poisson':
		noise_fn = poisson_noise
		noise_args['lam'] = noise_lam
	else:
		noise_fn = gaussian_noise
		noise_args['mean'] = noise_mean
		noise_args['std'] = noise_std


	true_img = tf.placeholder(tf.uint8, 
		shape=[batch_size, 64, 64, 1])

	noised_img = noise_fn(**noise_args,
		image=true_img)

	model_input = tf.cast(noised_img,
		dtype=tf.float32)

	denoised_img = QAE.build_QAE(model_input)

	if loss_type == 'l2_loss':
		train_loss = l2_loss(
			tf.cast(true_img,
				dtype=tf.float32),
			denoised_img)
		# val_loss = l2_loss(
		# 	tf.cast(test_true_img,
		# 		dtype=tf.float32),
		# 	test_denoised_img)

	total_train_loss = train_loss

	optimizer = tf.train.AdamOptimizer(\
		learning_rate=study_rate).minimize(
		total_train_loss)

	tf.summary.scalar('train_l2_loss',train_loss)
	# tf.summary.scalar('val_l2_loss',val_loss)


	tf.summary.scalar('total_train_loss',
		total_train_loss)

	merged_summary = tf.summary.merge_all()
	train_writer = tf.summary.FileWriter(
		current_dir/'train_data')


	init_vars = tf.group(
		tf.global_variables_initializer(),
		tf.local_variables_initializer())

	saver = tf.train.Saver()


	with tf.Session().as_default() as sess:
		global_step = tf.train.get_global_step()

		handle_train, handle_val = sess.run(
			[iter_train_handle, iter_val_handle])

		sess.run(init_vars)

		for step in range(500):
			train_true_img = sess.run(next_batch,
				feed_dict={handle: handle_train})
			test_true_img = sess.run(next_batch,
				feed_dict={handle: handle_val})

			_ = sess.run(optimizer, 
				feed_dict={true_img:train_true_img})

			t_summ = sess.run(merged_summary,
				feed_dict={true_img:train_true_img})

			t_loss = sess.run(total_train_loss,
				feed_dict={true_img:train_true_img})

			train_writer.add_summary(t_summ,step)

			print('Iter:{}, Training Loss {}'.format(
				step, t_loss))

			if step%20 == 0:
				fig,axes = 

		print('done')