예제 #1
0
def evaluate():
	"""Eval CIFAR-10 for a number of steps."""
	with tf.Graph().as_default():
		# Get images and labels for CIFAR-10.
		eval_data = eval_data == 'test'
		images, labels = cifar10.inputs(eval_data=eval_data)
		# Build a Graph that computes the logits predictions from the
		# inference model.
		logits = cifar10.inference(images)
		# Calculate predictions.
		top_k_op = tf.nn.in_top_k(logits, labels, 1)
		# Restore the moving average version of the learned variables for eval.
		variable_averages = tf.train.ExponentialMovingAverage(
				cifar10.MOVING_AVERAGE_DECAY)
		variables_to_restore = {}
		for v in tf.all_variables():
			if v in tf.trainable_variables():
				restore_name = variable_averages.average_name(v)
			else:
				restore_name = v.op.name
			variables_to_restore[restore_name] = v
		saver = tf.train.Saver(variables_to_restore)
		while True:
			eval_once(saver, top_k_op)
			if run_once:
				break
			time.sleep(eval_interval_secs)
예제 #2
0
def evaluate():
    """Eval CIFAR-10 for a number of steps."""
    with tf.Graph().as_default() as g:
        # Get images and labels for CIFAR-10.
        eval_data = FLAGS.eval_data == 'test'
        images, labels = cifar10.inputs(eval_data=eval_data)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = cifar10.inference(images)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # define predict function
        predict_function = tf.argmax(logits, 1)

        variable_averages = tf.train.ExponentialMovingAverage(
            cifar10.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)
        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()
        summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)

        predict()
예제 #3
0
	def InterpBeadError(w1,b1, w2,b2, write = False, name = "00"):
		errors = []
		
		#xdat,ydat = generatecandidate4(.5, .25, .1, 1000)
		
		#xdat,ydat = mnist.train.next_batch(1000)
		
		#xdat = mnist.test.images
		#ydat = mnist.test.labels
		#xdat = np.array(xdat)
		#ydat = np.array(ydat)
		
		
		
		
		for tt in xrange(20):
			#print tt
			#accuracy = 0.
			t = tt/20.
			thiserror = 0

			#x0 = tf.placeholder("float", [None, n_input])
			#y0 = tf.placeholder("float", [None, n_classes])
			weights, biases = model_interpolate(w1,b1,w2,b2, t)
			#interp_model = multilayer_perceptron(w=weights, b=biases)
			interp_model = convnet(w=weights, b=biases)

			with interp_model.g.as_default():
				
				xdat, ydat = cifar10.inputs(eval_data='test')
				logit_test = interp_model.predict(xdat)
				top_k_op = tf.nn.in_top_k(logit_test, ydat, 1)
				pred = interp_model.predict(xdat)
				init = tf.initialize_all_variables()
				with tf.Session() as sess:
					sess.run(init)
					
					tf.train.start_queue_runners(sess=sess)
					
					num_iter = 20
					true_count = 0  # Counts the number of correct predictions.
					total_sample_count = num_iter * batch_size
					step = 0
					while step < num_iter:
						predictions = sess.run([top_k_op])
						true_count += np.sum(predictions)
						step += 1
					precision = true_count / total_sample_count
					print "Accuracy:", precision
					#,"\t",tt,weights[0][1][0],weights[0][1][1]
					thiserror = 1 - precision
					
			errors.append(thiserror)

		if write == True:
			with open("f" + str(name) + ".out",'w+') as f:
				for e in errors:
					f.write(str(e) + "\n")
		
		return max(errors), np.argmax(errors)
예제 #4
0
def evaluate():
  """Eval CIFAR-10 for a number of steps."""
  with tf.Graph().as_default() as g:
    # Get images and labels for CIFAR-10.
    eval_data = FLAGS.eval_data == 'test'
    images, labels = cifar10.inputs(eval_data=eval_data)

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits = cifar10.inference(images)

    # Calculate predictions.
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        cifar10.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)

    while True:
      for i in range(20):
        eval_once(saver, summary_writer, top_k_op, summary_op,i)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
예제 #5
0
def evaluate():
    """Eval CIFAR-10 for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels for CIFAR-10.
        eval_data = FLAGS.eval_data == 'test'
        images, labels = cifar10.inputs(eval_data=eval_data)
        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = cifar10.inference(images)
        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)
        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            cifar10.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)
        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()
        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
                                                graph_def=graph_def)
        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
예제 #6
0
def evaluate():
    """Eval CIFAR-10 for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels for CIFAR-10.
        eval_data = eval_data == 'test'
        images, labels = cifar10.inputs(eval_data=eval_data)
        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = cifar10.inference(images)
        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)
        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            cifar10.MOVING_AVERAGE_DECAY)
        variables_to_restore = {}
        for v in tf.all_variables():
            if v in tf.trainable_variables():
                restore_name = variable_averages.average_name(v)
            else:
                restore_name = v.op.name
            variables_to_restore[restore_name] = v
        saver = tf.train.Saver(variables_to_restore)
        while True:
            eval_once(saver, top_k_op)
            if run_once:
                break
            time.sleep(eval_interval_secs)
예제 #7
0
def main():
    # Get images and labels for CIFAR-10.
    eval_data = FLAGS.eval_data == 'test'
    images, labels = cifar10.inputs(eval_data=eval_data)
    with tf.Session() as sess:
        # Build a Graph that computes the logits predictions from the
        # inference model.
        probabilities = tf.nn.softmax(cifar10.inference(images))

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            cifar10.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('No checkpoint file found')
            return

        # Start the queue runners.
        coord = tf.train.Coordinator()
        try:
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(
                    qr.create_threads(sess,
                                      coord=coord,
                                      daemon=True,
                                      start=True))

            num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))

            submission = []
            true_labels = []

            step = 0
            while step < num_iter and not coord.should_stop():
                submission_batch, true_labels_batch = sess.run(
                    [probabilities, labels])
                submission.append(submission_batch)
                true_labels.append(true_labels_batch)
                step += 1

            submission = np.vstack(submission)
            true_labels = np.concatenate(true_labels)

        except Exception as e:  # pylint: disable=broad-except
            coord.request_stop(e)

        coord.request_stop()
        coord.join(threads, stop_grace_period_secs=10)

    return submission, true_labels
예제 #8
0
def evaluate():
    images, labels = cifar10.inputs(eval_data=True)
    logits = cifar10.inference(images)
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    variable_averages = tf.train.ExponentialMovingAverage(cifar10.MOVING_AVERAGE_DECAY)
    variables_to_restore = {}
    for v in tf.all_variables():
        if v in tf.trainable_variables():
            restore_name = variable_averages.average_name(v)
        else:
            restore_name = v.op.name
        variables_to_restore[restore_name] = v
    saver = tf.train.Saver(variables_to_restore)
    eval_once(saver, top_k_op)
예제 #9
0
def evaluate():
    with tf.Graph().as_default() as g:
        # GET THE TEST IMAGES
        eval_data = FLAGS.eval_data == 'test'
        images, labels = cifar10.inputs(eval_data=eval_data)
        logits = cifar10.inference(images)
        top_k_op = tf.nn.in_top_k(logits, labels, 1)
        variable_averages = tf.train.ExponentialMovingAverage(
            cifar10.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)
        # SUMMARY FOR GRAPH
        summary_op = tf.merge_all_summaries()
        summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
def evaluate():
    """Eval CIFAR-10 for a number of steps."""
    f = open('/mnt/eval_output.log', 'w')
    f.write("TrainingStep\tPrecision\n")
    f.close()
    with tf.Graph().as_default() as g:
        # Get images and labels for CIFAR-10.
        eval_data = FLAGS.eval_data == 'test'
        images, labels = cifar10.inputs(eval_data=eval_data)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = cifar10.inference(images)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # Restore the moving average version of the learned variables for eval.
        #variable_averages = tf.train.ExponentialMovingAverage(
        #   cifar10.MOVING_AVERAGE_DECAY)
        #variables_to_restore = variable_averages.variables_to_restore()
        #saver = tf.train.Saver(variables_to_restore)
        saver = tf.train.Saver(tf.global_variables())

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)
        training_step = 0
        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op,
                      training_step)
            training_step = training_step + FLAGS.checkpointing_step
            if (training_step > FLAGS.trained_steps):
                break
            if FLAGS.run_once:
                break
예제 #11
0
def visualize_excitations():
    ''' Restore a trained model, and run one of the visualizations. '''
    with tf.Graph().as_default():
        # Get images for CIFAR-10.
        eval_data = FLAGS.eval_data == 'test'
        images, _ = cifar10.inputs(eval_data=eval_data)

        # Get conv2 and pool2 responses
        _, conv2, pool2 = cifar10.inference(images)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            cifar10.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                # Restores from checkpoint
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                print('No checkpoint file found')
                return

            if FLAGS.excitation_layer == 'conv2':
                channels = np.asarray([0, 31,
                                       63])  # first, 31st, and last channels
                excitation_map = visualize_conv(sess,
                                                images,
                                                conv2,
                                                channels,
                                                half_receptive_field=5,
                                                accum_padding=0,
                                                stride=2,
                                                dst_height=96,
                                                num_images=FLAGS.num_examples)

            elif FLAGS.excitation_layer == 'pool2':
                neurons = np.asarray([
                    [0, 0, 0],  # top-left corner of first map
                    [5, 5, 63],  # bottom-right corner of last map
                    [3, 4, 5]
                ])  # in the middle of 5th map
                excitation_map = visualize_pooling(
                    sess,
                    images,
                    pool2,
                    neurons,
                    half_receptive_field=6,
                    accum_padding=0,
                    stride=4,
                    dst_height=96,
                    num_images=FLAGS.num_examples)

            else:
                raise Exception('add your own layers and parameters')

            excitation_map = cv2.cvtColor(excitation_map, cv2.COLOR_RGB2BGR)
            cv2.imshow('excitations', excitation_map)
            cv2.waitKey(-1)
예제 #12
0
FLAGS = tf.app.flags.FLAGS

# import cifar10 data
from tensorflow.models.image.cifar10 import cifar10
cifar10.maybe_download_and_extract()

# global variable to select which (and how many) GPU's to use
# (tensorflow can be hungry with resources if not properly controlled)
gpus_to_use = [3]

# network input (data and correct labels)
# x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
# y_ = tf.placeholder(tf.float32, shape=[None, 10])

train_images, train_labels = cifar10.distorted_inputs()
test_images, test_labels = cifar10.inputs(eval_data=True)

# select stream to use (train or test)
select_test = tf.placeholder(dtype=bool,shape=[],name='select_test')
x = tf.cond(
    select_test,
    lambda:test_images,
    lambda:train_images
)
y_ = tf.cond(
    select_test,
    lambda:test_labels,
    lambda:train_labels
)

# first convolutional layer
예제 #13
0
                        checkpoint_path = os.path.join(train_dir, 'model.ckpt')
                        saver.save(sess, checkpoint_path, global_step=step)
            except tf.errors.OutOfRangeError:
                print('Done training -- epoch limit reached')
            finally:
                # When done, ask the threads to stop.
                coord.request_stop()
            # Wait for threads to finish.
            coord.join(threads)
            sess.close()

    # -- evaluation phase : Eval CIFAR-10 for a number of steps --
    if args.eval:
        with tf.Graph().as_default() as g:
            # Get images and labels for CIFAR-10.
            images, labels = cifar10.inputs(eval_data=eval_data == 'test')
            # Build a Graph that computes the logits predictions from the inference model.
            logits = inference(images)
            # Calculate predictions.
            top_k_op = tf.nn.in_top_k(logits, labels, 1)
            averager = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
            # variables_to_restore(): returns the EMA shadow variable if the variable has a EMA, otherwise the variable.
            # args: moving_avg_variables: a list of variables whose the moving variable is to be
            #       restored. If None, it will default to tf.moving_average_variables() + tf.trainable_variables()
            variables_to_restore = averager.variables_to_restore()
            # create a saver for all the EMA variables to restore
            saver = tf.train.Saver(variables_to_restore)
            # Build the summary operation based on the TF collection of Summaries.
            summary_op = tf.merge_all_summaries()
            summary_writer = tf.train.SummaryWriter(eval_dir, g)
            while True:
예제 #14
0
파일: snippet.py 프로젝트: szabo92/gistable
def train():
    """Train CIFAR-10 for a number of steps."""
    with tf.Graph().as_default():
        with tf.variable_scope("model") as scope:
            global_step = tf.Variable(0, trainable=False)

            # Get images and labels for CIFAR-10.
            images, labels = cifar10.distorted_inputs()
            images_eval, labels_eval = cifar10.inputs(eval_data=True)

            # Build a Graph that computes the logits predictions from the
            # inference model.
            logits = cifar10.inference(images)
            scope.reuse_variables()
            logits_eval = cifar10.inference(images_eval)

            # Calculate loss.
            loss = cifar10.loss(logits, labels)

            # For evaluation
            top_k = tf.nn.in_top_k(logits, labels, 1)
            top_k_eval = tf.nn.in_top_k(logits_eval, labels_eval, 1)

            # Add precision summary
            summary_train_prec = tf.placeholder(tf.float32)
            summary_eval_prec = tf.placeholder(tf.float32)
            tf.scalar_summary('precision/train', summary_train_prec)
            tf.scalar_summary('precision/eval', summary_eval_prec)

            # Build a Graph that trains the model with one batch of examples and
            # updates the model parameters.
            train_op = cifar10.train(loss, global_step)

            # Create a saver.
            saver = tf.train.Saver(tf.all_variables())

            # Build the summary operation based on the TF collection of Summaries.
            summary_op = tf.merge_all_summaries()

            # Build an initialization operation to run below.
            init = tf.initialize_all_variables()

            # Start running operations on the Graph.
            sess = tf.Session(config=tf.ConfigProto(
                log_device_placement=FLAGS.log_device_placement))
            sess.run(init)

            # Start the queue runners.
            tf.train.start_queue_runners(sess=sess)

            summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
                                                    graph_def=sess.graph_def)

            for step in xrange(FLAGS.max_steps):
                start_time = time.time()
                _, loss_value = sess.run([train_op, loss])
                duration = time.time() - start_time

                assert not np.isnan(
                    loss_value), 'Model diverged with loss = NaN'

                if step % 10 == 0:
                    num_examples_per_step = FLAGS.batch_size
                    examples_per_sec = num_examples_per_step / duration
                    sec_per_batch = float(duration)

                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), step, loss_value,
                                        examples_per_sec, sec_per_batch))

                EVAL_STEP = 10
                EVAL_NUM_EXAMPLES = 1024
                if step % EVAL_STEP == 0:
                    prec_train = evaluate_set(sess, top_k, EVAL_NUM_EXAMPLES)
                    prec_eval = evaluate_set(sess, top_k_eval,
                                             EVAL_NUM_EXAMPLES)
                    print('%s: precision train = %.3f' %
                          (datetime.now(), prec_train))
                    print('%s: precision eval  = %.3f' %
                          (datetime.now(), prec_eval))

                if step % 100 == 0:
                    summary_str = sess.run(summary_op,
                                           feed_dict={
                                               summary_train_prec: prec_train,
                                               summary_eval_prec: prec_eval
                                           })
                    summary_writer.add_summary(summary_str, step)

                # Save the model checkpoint periodically.
                if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                    checkpoint_path = os.path.join(FLAGS.train_dir,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
def train():
    print("\nSource code of training file {}:\n\n{}".format(__file__, open(__file__).read()))

    log('loading CIFAR')
    # Import data
    training_batch = cifar10.distorted_inputs()

    lm = LayerManager(forward_biased_estimate=False)
    batch = tf.Variable(0)

    with tf.name_scope('input'):
        fed_input_data = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 3])
        fed_input_labels = tf.placeholder(tf.int32, [None])
        drop_probs = [tf.Variable(tf.constant(DEFAULT_KEEP_PROB, shape=[1, 1, 1, ], dtype=tf.float32), trainable=False, collections=['Dropout']) for _ in range(NUM_DROPOUT_LAYERS)]

    with tf.name_scope('posterior'):
        training_batch_error, _, _, _ = full_model(lm, drop_probs, *training_batch)
    training_merged = lm.summaries.merge_all_summaries()
    lm.is_training = False
    tf.get_variable_scope().reuse_variables()
    lm.summaries.reset()
    with tf.name_scope('test'):
        _, test_percent_error, _, _ = full_model(lm, drop_probs, *cifar10.inputs(eval_data=True))
    with tf.name_scope('forward'):
        _, _, forward_per_example_error, forward_incorrect_examples = full_model(lm, drop_probs, fed_input_data, fed_input_labels)

    def compute_test_percent_error():
        return numpy.mean([sess.run([test_percent_error]) for _ in range(int(numpy.ceil(FLAGS.num_test_examples / FLAGS.batch_size)))])

    saver = tf.train.Saver(tf.trainable_variables() + tf.get_collection('BatchNormInternal'))

    learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, batch, 5000, 0.8, staircase=True)

    train_step = tf.train.AdamOptimizer(learning_rate).minimize(training_batch_error, global_step=batch, var_list=lm.filter_factory.variables + lm.weight_factory.variables + lm.bias_factory.variables + lm.scale_factory.variables)

    fed_drop_probs = tf.placeholder(tf.float32, [None, None, None, None])
    update_drop_probs = [tf.assign(drop_prob, fed_drop_probs, validate_shape=False) for drop_prob in drop_probs]

    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        sess.run(tf.initialize_variables(tf.get_collection('BatchNormInternal')))
        sess.run(tf.initialize_variables(tf.get_collection('Dropout')))

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        if TRAIN:
            train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph)
            # test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test')
            try:
                log('starting training')
                for i in range(FLAGS.max_steps):
                    if i % 1000 == 999: # Do test set
                        err = compute_test_percent_error()
                        for j in range(NUM_DROPOUT_LAYERS):
                            sess.run([update_drop_probs[j]], feed_dict={fed_drop_probs: [[[[1.0]]]]})
                        det_err = compute_test_percent_error()
                        for j in range(NUM_DROPOUT_LAYERS):
                            sess.run([update_drop_probs[j]], feed_dict={fed_drop_probs: [[[[DEFAULT_KEEP_PROB]]]]})
                        log('batch %s: Random test classification error = %s%%, deterministic test classification error = %s%%' % (i, err, det_err))
                    if i % 100 == 99: # Record a summary
                        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                        run_metadata = tf.RunMetadata()
                        summary, _ = sess.run([training_merged, train_step],
                                              options=run_options,
                                              run_metadata=run_metadata)
                        train_writer.add_summary(summary, i)
                        train_writer.add_run_metadata(run_metadata, 'batch%d' % i)
                    else:
                        sess.run([train_step])
            finally:
                log('saving')
                saver.save(sess, FLAGS.train_dir, global_step=batch)
                log('done')
        else:
            restore_latest(saver, sess, '/tmp/derandomizing_dropout', suffix='-100000')

        if DERANDOMIZE_DROPOUT:
            # NUM_RUNS = 10
            # runs = []
            # for _ in range(NUM_RUNS):
            #     new_output_probs, = sess.run([forward_output], feed_dict={fed_input_data: mnist.train.images, fed_input_labels: mnist.train.labels})
            #     new_output = numpy.argmax(new_output_probs, 1)
            #     runs.append(new_output)
            #
            # all_runs = numpy.vstack(runs).T
            # entropy = numpy.array([scipy.stats.entropy(numpy.bincount(row), base=2.0) for row in all_runs])


            derandomized_drop_probs = [DEFAULT_KEEP_PROB * numpy.ones((1, HIDDEN_LAYER_SIZE)) for _ in range(NUM_DROPOUT_LAYERS)]

            num_tests_performed = 0

            for pass_count in range(1):
                for j in range(HIDDEN_LAYER_SIZE):
                    for i in range(NUM_DROPOUT_LAYERS):  # range(NUM_DROPOUT_LAYERS-1,-1,-1):
                        if derandomized_drop_probs[i][0, j] == 0.0 or derandomized_drop_probs[i][0, j] == 1.0:
                            continue
                        num_tests_performed += 1
                        for k in range(NUM_DROPOUT_LAYERS):
                            if k == i:
                                # curr_drop_probs = numpy.tile(derandomized_drop_probs[i], (BATCHES_PER_DERANDOMIZE_STEP*BATCH_SIZE, 1))
                                # to_randomize = HIDDEN_LAYER_SIZE - j - 1
                                # randperms = numpy.argsort(numpy.random.rand(BATCHES_PER_DERANDOMIZE_STEP*BATCH_SIZE, to_randomize), axis=1)
                                #
                                # to_keep = max(int(HIDDEN_LAYER_SIZE*DEFAULT_KEEP_PROB-derandomized_drop_probs[i][:j].sum()), 1)
                                # curr_drop_probs[:, j+1:] = (randperms < to_keep)


                                curr_drop_probs = (numpy.random.rand(BATCHES_PER_DERANDOMIZE_STEP*BATCH_SIZE, HIDDEN_LAYER_SIZE) < derandomized_drop_probs[i]).astype(numpy.float32)
                                curr_drop_probs[:, j] = 0.0
                                # curr_drop_probs[:, j+1:j+2] = 1.0
                                sess.run([update_drop_probs[i]], feed_dict={fed_drop_probs: curr_drop_probs})
                            else:
                                sess.run([update_drop_probs[k]], feed_dict={fed_drop_probs: numpy.random.rand(BATCHES_PER_DERANDOMIZE_STEP * BATCH_SIZE, HIDDEN_LAYER_SIZE) < derandomized_drop_probs[k]})

                        #indices = numpy.argmax(entropy[:, numpy.newaxis] + -numpy.log(-numpy.log(numpy.random.rand(entropy.shape[0], BATCHES_PER_DERANDOMIZE_STEP*BATCH_SIZE))), axis=0)

                        #  indices = [numpy.argmax(1000*entropy + -numpy.log(-numpy.log(numpy.random.rand(*entropy.shape)))) for _ in range(BATCHES_PER_DERANDOMIZE_STEP*BATCH_SIZE)]
                        # examples = mnist.train.images[indices, :]
                        # labels = mnist.train.labels[indices]
                        # Collect a bunch of 64-example batches together
                        examples, labels = [numpy.concatenate(things, axis=0) for things in zip(*[sess.run(training_batch) for _ in range(BATCHES_PER_DERANDOMIZE_STEP)])]

                        # Might want to use cross entropy, but why not not use percent error since we're not differentiating?
                        # Using "test" expressions so we can manually feed in data, but we are feeding training data (same data for obj0 and obj1)
                        err0, cross_entropies0 = sess.run([forward_incorrect_examples, forward_per_example_error], feed_dict={fed_input_data: examples, fed_input_labels: labels})
                        curr_drop_probs[:, j] = 1.0
                        # curr_drop_probs[:, j+1:] = (randperms < to_keep - 1)
                        # curr_drop_probs[:, j+1:j+2] = 0.0
                        sess.run([update_drop_probs[i]], feed_dict={fed_drop_probs: curr_drop_probs})
                        err1, cross_entropies1 = sess.run([forward_incorrect_examples, forward_per_example_error], feed_dict={fed_input_data: examples, fed_input_labels: labels})

                        # One-sided paired-sample t-test
                        cross_entropy_diff = cross_entropies0 - cross_entropies1
                        t = numpy.sqrt(BATCHES_PER_DERANDOMIZE_STEP * BATCH_SIZE)*cross_entropy_diff.mean()/cross_entropy_diff.std(ddof=1)
                        p = scipy.stats.t.sf(-t, df=BATCHES_PER_DERANDOMIZE_STEP * BATCH_SIZE - 1)

                        b = numpy.sum(err0 & ~err1)
                        c = numpy.sum(err1 & ~err0)
                        # if b + c < BINOMIAL_TEST_CUTOFF:
                        #     p = 0.5
                        #     stat_message = "too small"
                        # else:
                        #     # McNemar's test
                        #     if b + c >= CHI2_TEST_CUTOFF:
                        #         chi2 = (b-c)**2/(b+c)
                        #         p = scipy.stats.distributions.chi2.sf(chi2, df=1)  # Two-sided
                        #     else:
                        #         p = scipy.stats.binom_test([b,c]) - scipy.stats.binom.pmf(b, b+c, 0.5)  # Mid-p test
                        #     # Form one-sided p-value
                        #     if b > c:
                        #         p = 1-0.5*p
                        #     else:
                        #         p = 0.5*p
                        #     if b + c >= CHI2_TEST_CUTOFF:
                        #         stat_message = "p = %.4f, chi square test" % p
                        #     else:
                        #         stat_message = "p = %.4f, binomial mid-p test" % p

                        if p < SIGNIFICANCE_LEVEL:  # cross_entropies0.mean() <= cross_entropies1.mean():  # b <= c:
                            new_drop_prob = 0.0
                            neuron_status = "drop"
                        elif p > 1 - SIGNIFICANCE_LEVEL:
                            new_drop_prob = 1.0
                            neuron_status = "keep"
                        else:
                            new_drop_prob = DEFAULT_KEEP_PROB
                            neuron_status = "hmmm"

                        #log(neuron_status + ' L{} N{}: b + c = {}, {}'.format(i, j, b+c, stat_message))
                        log(neuron_status + ' P{} L{} N{}: b = {}, c = {}, p = {}'.format(pass_count, i, j, b, c, p))
                        derandomized_drop_probs[i][0, j] = new_drop_prob
                for i in range(NUM_DROPOUT_LAYERS):
                    num_dropped = (derandomized_drop_probs[i] == 0.0).sum()
                    num_kept = (derandomized_drop_probs[i] == 1.0).sum()
                    num_hmmm = HIDDEN_LAYER_SIZE - num_dropped - num_kept
                    sess.run([update_drop_probs[i]], feed_dict={fed_drop_probs: numpy.ceil(derandomized_drop_probs[i])})

                    log('layer {}: {} neurons dropped, {} kept, {} undecided'.format(i, num_dropped, num_kept, num_hmmm))
                log('Performed {} statistical tests'.format(num_tests_performed))
            log('saving')
            saver.save(sess, FLAGS.train_dir, global_step=batch+1)
            log('done')
        else:
            restore_latest(saver, sess, '/tmp/derandomizing_dropout', suffix='-100001')

        err, = compute_test_percent_error()
        log('Test classification error = %s%%' % err)

        coord.request_stop()
        coord.join(threads)
        sess.close()
예제 #16
0
		
		
		
		
		#print test_model.weights
		

		
		models.append(test_model)
		with test_model.g.as_default():
			
			global_step = tf.Variable(0, trainable=False)

			# Get images and labels for CIFAR-10.
			images, labels = cifar10.distorted_inputs()
			test_images, test_labels = cifar10.inputs(eval_data='test')

			# Build a Graph that computes the logits predictions from the
			# inference model.
			logits = test_model.predict(images)
			logit_test = test_model.predict(test_images)

			# Calculate loss.
			loss = cifar10.loss(logits, labels)

			# Build a Graph that trains the model with one batch of examples and
			# updates the model parameters.
			train_op = cifar10.train(loss, global_step)


			top_k_op = tf.nn.in_top_k(logit_test, test_labels, 1)
예제 #17
0
		def SGDBead(self, bead, thresh, maxindex):
			
			finalerror = 0.
			
			#thresh = .05

			# Parameters
			learning_rate = 0.001
			training_epochs = 15
			batch_size = 100
			display_step = 1
			
			curWeights, curBiases = self.AllBeads[bead]
			#test_model = multilayer_perceptron(w=curWeights, b=curBiases)
			test_model = convnet(w=curWeights, b=curBiases)

			
			with test_model.g.as_default():

				global_step = tf.Variable(0, trainable=False)

				# Get images and labels for CIFAR-10.
				images, labels = cifar10.distorted_inputs()
				test_images, test_labels = cifar10.inputs(eval_data='test')

				# Build a Graph that computes the logits predictions from the
				# inference model.
				logits = test_model.predict(images)
				logit_test = test_model.predict(test_images)

				# Calculate loss.
				loss = cifar10.loss(logits, labels)

				# Build a Graph that trains the model with one batch of examples and
				# updates the model parameters.
				train_op = cifar10.train(loss, global_step)


				top_k_op = tf.nn.in_top_k(logit_test, test_labels, 1)


				# Build an initialization operation to run below.
				init = tf.initialize_all_variables()

				# Start running operations on the Graph.
				#sess = tf.Session(config=tf.ConfigProto(
				#    log_device_placement=FLAGS.log_device_placement))

				with tf.Session(config=tf.ConfigProto(
					log_device_placement=False)) as sess:
					sess.run(init)

					tf.train.start_queue_runners(sess=sess)

					step = 0
					stopcond = True
					while step < max_steps and stopcond:


						start_time = time.time()
						_, loss_value = sess.run([train_op, loss])
						duration = time.time() - start_time

						assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

						if step % 10 == 0:
							num_examples_per_step = batch_size
							examples_per_sec = num_examples_per_step / duration
							sec_per_batch = float(duration)

							format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
									  'sec/batch)')
							print (format_str % (datetime.now(), step, loss_value,
											 examples_per_sec, sec_per_batch))

						if step % 100 == 0:

							num_iter = int(math.ceil(num_examples / batch_size))
							true_count = 0  # Counts the number of correct predictions.
							total_sample_count = num_iter * batch_size
							stepp = 0
							while stepp < num_iter:
								predictions = sess.run([top_k_op])
								true_count += np.sum(predictions)
								stepp += 1


							# Compute precision @ 1.
							precision = true_count / total_sample_count
							print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))

							if precision > 1 - thresh:
								stopcond = False
								test_model.params = sess.run(test_model.weightslist), sess.run(test_model.biaseslist)
								self.AllBeads[bead]=test_model.params
								finalerror = 1 - precision
								print ("Final bead error: ",str(finalerror))
								
						step += 1        
				return finalerror