def main_supervised(ae): with ae.session.graph.as_default(): sess = ae.session input_pl = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, FLAGS.image_pixels), name='input_pl') logits = ae.supervised_net(input_pl) data = read_data_sets(FLAGS.data_dir, sub['tr'], sub['te'], sub['val'], one_hot=False) num_train = data.train.num_examples labels_placeholder = tf.placeholder(tf.int32, shape=FLAGS.batch_size, name='target_pl') loss = loss_supervised(logits, labels_placeholder) train_op, global_step = training(loss, FLAGS.supervised_learning_rate) eval_correct = evaluation(logits, labels_placeholder) hist_summaries = [ ae['biases{0}'.format(i + 1)] for i in range(ae.num_hidden_layers + 1) ] hist_summaries.extend([ ae['weights{0}'.format(i + 1)] for i in range(ae.num_hidden_layers + 1) ]) hist_summaries = [ tf.summary.histogram(v.op.name + "_fine_tuning", v) for v in hist_summaries ] summary_op = tf.summary.merge(hist_summaries) summary_writer = tf.summary.FileWriter(pjoin(FLAGS.summary_dir, 'fine_tuning'), graph=sess.graph, flush_secs=FLAGS.flush_secs) vars_to_init = ae.get_variables_to_init(ae.num_hidden_layers + 1) vars_to_init.append(global_step) sess.run(tf.variables_initializer(vars_to_init)) steps = FLAGS.finetuning_epochs * num_train for step in range(steps): start_time = time.time() feed_dict = fill_feed_dict(data.train, input_pl, labels_placeholder) _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 5000 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_img_str = sess.run(tf.summary.image( "training_images", tf.reshape(input_pl, (FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 1)), max_outputs=FLAGS.batch_size), feed_dict=feed_dict) summary_writer.add_summary(summary_img_str) if (step + 1) % 5000 == 0 or (step + 1) == steps: train_sum = do_eval_summary("training_error", sess, eval_correct, input_pl, labels_placeholder, data.train) val_sum = do_eval_summary("validation_error", sess, eval_correct, input_pl, labels_placeholder, data.validation) test_sum = do_eval_summary("test_error", sess, eval_correct, input_pl, labels_placeholder, data.test) summary_writer.add_summary(train_sum, step) summary_writer.add_summary(val_sum, step) summary_writer.add_summary(test_sum, step) for i in range(FLAGS.num_hidden_layers): filters = sess.run(tf.identity(ae["weights" + str(i + 1)])) np.save(pjoin(FLAGS.chkpt_dir, "filters" + str(i + 1) + "ft"), filters) filters_biases = sess.run(tf.identity(ae["biases" + str(i + 1)])) np.save(pjoin(FLAGS.chkpt_dir, "biases" + str(i + 1) + "ft"), filters_biases)
def main_supervised(ae): with ae.session.graph.as_default(): sess = ae.session input_pl = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, FLAGS.image_pixels), name='input_pl') logits = ae.supervised_net(input_pl) #data = read_data_sets(FLAGS.data_dir) #num_train = data.train.num_examples new_file_data = ExtractData() data = read_data_sets_pretraining(new_file_data.fetch_data_sets) num_train = data.train.num_examples labels_placeholder = tf.placeholder(tf.int32, shape=FLAGS.batch_size, name='target_pl') loss = loss_supervised(logits, labels_placeholder) train_op, global_step = training(loss, FLAGS.supervised_learning_rate) eval_correct = evaluation(logits, labels_placeholder) hist_summaries = [ae['biases{0}'.format(i + 1)] for i in xrange(ae.num_hidden_layers + 1)] hist_summaries.extend([ae['weights{0}'.format(i + 1)] for i in xrange(ae.num_hidden_layers + 1)]) hist_summaries = [tf.histogram_summary(v.op.name + "_fine_tuning", v) for v in hist_summaries] summary_op = tf.merge_summary(hist_summaries) summary_writer = tf.train.SummaryWriter(pjoin(FLAGS.summary_dir, 'fine_tuning'), graph_def=sess.graph_def, flush_secs=FLAGS.flush_secs) vars_to_init = ae.get_variables_to_init(ae.num_hidden_layers + 1) vars_to_init.append(global_step) sess.run(tf.initialize_variables(vars_to_init)) steps = FLAGS.finetuning_epochs * num_train for step in xrange(steps): start_time = time.time() feed_dict = fill_feed_dict(data.train, input_pl, labels_placeholder) _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 50 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) if (step + 1) % 200 == 0 or (step + 1) == steps: train_sum = do_eval_summary("training_error", sess, eval_correct, input_pl, labels_placeholder, data.train) val_sum = do_eval_summary("validation_error", sess, eval_correct, input_pl, labels_placeholder, data.validation) test_sum = do_eval_summary("test_error", sess, eval_correct, input_pl, labels_placeholder, data.test) do_eval(sess, eval_correct, input_pl, labels_placeholder, data.test) summary_writer.add_summary(train_sum, step) summary_writer.add_summary(val_sum, step) summary_writer.add_summary(test_sum, step)
def main_supervised(ae): with ae.session.graph.as_default(): sess = ae.session input_pl = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, FLAGS.image_pixels), name='input_pl') logits = ae.supervised_net(input_pl) data = read_data_sets(FLAGS.data_dir) num_train = data.train.num_examples labels_placeholder = tf.placeholder(tf.int32, shape=FLAGS.batch_size, name='target_pl') loss = loss_supervised(logits, labels_placeholder) train_op, global_step = training(loss, FLAGS.supervised_learning_rate) eval_correct = evaluation(logits, labels_placeholder) hist_summaries = [ae['biases{0}'.format(i + 1)] for i in xrange(ae.num_hidden_layers + 1)] hist_summaries.extend([ae['weights{0}'.format(i + 1)] for i in xrange(ae.num_hidden_layers + 1)]) hist_summaries = [tf.histogram_summary(v.op.name + "_fine_tuning", v) for v in hist_summaries] summary_op = tf.merge_summary(hist_summaries) summary_writer = tf.train.SummaryWriter(pjoin(FLAGS.summary_dir, 'fine_tuning'), graph_def=sess.graph_def, flush_secs=FLAGS.flush_secs) vars_to_init = ae.get_variables_to_init(ae.num_hidden_layers + 1) vars_to_init.append(global_step) sess.run(tf.initialize_variables(vars_to_init)) steps = FLAGS.finetuning_epochs * num_train for step in xrange(steps): start_time = time.time() feed_dict = fill_feed_dict(data.train, input_pl, labels_placeholder) _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_img_str = sess.run( tf.image_summary("training_images", tf.reshape(input_pl, (FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 1)), max_images=FLAGS.batch_size), feed_dict=feed_dict ) summary_writer.add_summary(summary_img_str) if (step + 1) % 1000 == 0 or (step + 1) == steps: train_sum = do_eval_summary("training_error", sess, eval_correct, input_pl, labels_placeholder, data.train) val_sum = do_eval_summary("validation_error", sess, eval_correct, input_pl, labels_placeholder, data.validation) test_sum = do_eval_summary("test_error", sess, eval_correct, input_pl, labels_placeholder, data.test) summary_writer.add_summary(train_sum, step) summary_writer.add_summary(val_sum, step) summary_writer.add_summary(test_sum, step)
def main_supervised(ae): output = [] with ae.session.graph.as_default(): sess = ae.session input_pl = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, FLAGS.image_pixels), name='input_pl') logits = ae.supervised_net(input_pl) data = read_data_sets(FLAGS.data_dir) num_train = data.train.num_examples labels_placeholder = tf.placeholder(tf.int32, shape=(FLAGS.batch_size, FLAGS.num_classes), name='target_pl') # print ("logits.shape ", logits.get_shape()) # print ("labels.shape ", labels_placeholder.get_shape()) loss = loss_supervised(logits, labels_placeholder) train_op, global_step = training(loss, FLAGS.supervised_learning_rate) eval_correct = evaluation(logits, labels_placeholder) hist_summaries = [ ae['biases{0}'.format(i + 1)] for i in xrange(ae.num_hidden_layers + 1) ] hist_summaries.extend([ ae['weights{0}'.format(i + 1)] for i in xrange(ae.num_hidden_layers + 1) ]) hist_summaries = [ tf.histogram_summary(v.op.name + "_fine_tuning", v) for v in hist_summaries ] summary_op = tf.merge_summary(hist_summaries) summary_writer = tf.train.SummaryWriter(pjoin(FLAGS.summary_dir, 'fine_tuning'), graph_def=sess.graph_def, flush_secs=FLAGS.flush_secs) vars_to_init = ae.get_variables_to_init(ae.num_hidden_layers + 1) vars_to_init.append(global_step) sess.run(tf.initialize_variables(vars_to_init)) steps = FLAGS.finetuning_epochs * num_train print("steps at the beginning ", steps) for step in xrange(steps): start_time = time.time() feed_dict = fill_feed_dict(data.train, input_pl, labels_placeholder) _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # print (type((sess.run(logits, feed_dict=feed_dict)))) # pred = sess.run(logits, feed_dict=feed_dict) # output.extend(pred) # print (len(output)) # print (output[0].shape) # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_img_str = sess.run(tf.image_summary( "training_images", tf.reshape(input_pl, (FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 1)), max_images=FLAGS.batch_size), feed_dict=feed_dict) summary_writer.add_summary(summary_img_str) if (step + 1) % 1000 == 0 or (step + 1) == steps: print("insdie testing area: ", step) train_sum = do_eval_summary("training_error", sess, eval_correct, input_pl, labels_placeholder, data.train) val_sum = do_eval_summary("validation_error", sess, eval_correct, input_pl, labels_placeholder, data.validation) test_sum = do_eval_summary("test_error", sess, eval_correct, input_pl, labels_placeholder, data.test) summary_writer.add_summary(train_sum, step) summary_writer.add_summary(val_sum, step) summary_writer.add_summary(test_sum, step) steps_per_epoch = data.test.num_examples // FLAGS.batch_size num_examples = steps_per_epoch * FLAGS.batch_size print("steps_per_epoch ", steps_per_epoch) for step in xrange(steps_per_epoch): print("step ", step) feed_dict = fill_feed_dict(data.test, input_pl, labels_placeholder) pred = sess.run(logits, feed_dict=feed_dict) print("pred.shape ", pred.shape) output.extend(pred) print(len(output)) print(output[0].shape) output = np.asarray(output) np.save('data/output1.npy', output)