Example #1
0
def main_unsupervised():
    with tf.Graph().as_default() as g:
        sess = tf.Session()

        num_hidden = FLAGS.num_hidden_layers
        ae_hidden_shapes = [
            getattr(FLAGS, "hidden{0}_units".format(j + 1))
            for j in range(num_hidden)
        ]
        ae_shape = [FLAGS.image_pixels
                    ] + ae_hidden_shapes + [FLAGS.num_classes]

        ae = AutoEncoder(ae_shape, sess)

        data = read_data_sets_pretraining(FLAGS.data_dir,
                                          sub['tr'],
                                          sub['te'],
                                          sub['val'],
                                          one_hot=True)

        num_train = data.train.num_examples

        learning_rates = {
            j: getattr(FLAGS, "pre_layer{0}_learning_rate".format(j + 1))
            for j in range(num_hidden)
        }

        noise = {
            j: getattr(FLAGS, "noise_{0}".format(j + 1))
            for j in range(num_hidden)
        }

        for i in range(len(ae_shape) - 2):
            n = i + 1
            with tf.variable_scope("pretrain_{0}".format(n)):
                input_ = tf.placeholder(dtype=tf.float32,
                                        shape=(FLAGS.batch_size, ae_shape[0]),
                                        name='ae_input_pl')
                target_ = tf.placeholder(dtype=tf.float32,
                                         shape=(FLAGS.batch_size, ae_shape[0]),
                                         name='ae_target_pl')
                layer = ae.pretrain_net(input_, n)

                with tf.name_scope("target"):
                    target_for_loss = ae.pretrain_net(target_,
                                                      n,
                                                      is_target=True)

                loss = loss_x_entropy(layer, target_for_loss)
                train_op, global_step = training(loss, learning_rates[i], i)

                summary_dir = pjoin(FLAGS.summary_dir,
                                    'pretraining_{0}'.format(n))
                summary_writer = tf.summary.FileWriter(
                    summary_dir, graph=sess.graph, flush_secs=FLAGS.flush_secs)
                summary_vars = [
                    ae["biases{0}".format(n)], ae["weights{0}".format(n)]
                ]

                hist_summarries = [
                    tf.summary.histogram(v.op.name, v) for v in summary_vars
                ]
                hist_summarries.append(loss_summaries[i])
                summary_op = tf.summary.merge(hist_summarries)

                vars_to_init = ae.get_variables_to_init(n)
                vars_to_init.append(global_step)
                sess.run(tf.variables_initializer(vars_to_init))

                print("\n\n")
                print("| Training Step | Cross Entropy |  Layer  |   Epoch  |")
                print("|---------------|---------------|---------|----------|")

                for step in range(FLAGS.pretraining_epochs * num_train):
                    feed_dict = fill_feed_dict_ae(data.train, input_, target_,
                                                  noise[i])

                    loss_summary, loss_value = sess.run([train_op, loss],
                                                        feed_dict=feed_dict)

                    if step % 5000 == 0:
                        summary_str = sess.run(summary_op, feed_dict=feed_dict)
                        summary_writer.add_summary(summary_str, step)
                        image_summary_op = \
                            tf.summary.image("training_images",
                                             tf.reshape(input_,
                                                        (FLAGS.batch_size,
                                                         FLAGS.image_size,
                                                         FLAGS.image_size, 1)),
                                             max_outputs=FLAGS.batch_size)

                        summary_img_str = sess.run(image_summary_op,
                                                   feed_dict=feed_dict)
                        summary_writer.add_summary(summary_img_str)

                        output = "| {0:>13} | {1:13.4f} | Layer {2} | Epoch {3}  |"\
                                 .format(step, loss_value, n, step // num_train + 1)

                        print(output)

            filters = sess.run(tf.identity(ae["weights" + str(n)]))
            np.save(pjoin(FLAGS.chkpt_dir, "filters" + str(n)), filters)
            filters_biases = sess.run(tf.identity(ae["biases" + str(n)]))
            np.save(pjoin(FLAGS.chkpt_dir, "biases" + str(n)), filters_biases)
            if i == 0:
                filters = tile_raster_images(X=filters.T,
                                             img_shape=(FLAGS.image_size,
                                                        FLAGS.image_size),
                                             tile_shape=(10, 10),
                                             output_pixel_vals=False)
                filters = np.expand_dims(np.expand_dims(filters, 0), 3)
                image_var = tf.Variable(filters)
                image_filter = tf.identity(image_var)
                sess.run(tf.variables_initializer([image_var]))
                img_filter_summary_op = tf.summary.image(
                    "first_layer_filters", image_filter)
                summary_writer.add_summary(sess.run(img_filter_summary_op))
                summary_writer.flush()

    return ae
Example #2
0
def main_supervised(ae):
  with ae.session.graph.as_default():
    sess = ae.session
    input_pl = tf.placeholder(tf.float32, shape=(FLAGS.batch_size,
                                                 FLAGS.image_pixels),
                              name='input_pl')
    logits = ae.supervised_net(input_pl)

    #data = read_data_sets(FLAGS.data_dir)
    #num_train = data.train.num_examples
	new_file_data = ExtractData()
    data = read_data_sets_pretraining(new_file_data.fetch_data_sets)
    num_train = data.train.num_examples
	

    labels_placeholder = tf.placeholder(tf.int32,
                                        shape=FLAGS.batch_size,
                                        name='target_pl')

    loss = loss_supervised(logits, labels_placeholder)
    train_op, global_step = training(loss, FLAGS.supervised_learning_rate)
    eval_correct = evaluation(logits, labels_placeholder)

    hist_summaries = [ae['biases{0}'.format(i + 1)]
                      for i in xrange(ae.num_hidden_layers + 1)]
    hist_summaries.extend([ae['weights{0}'.format(i + 1)]
                           for i in xrange(ae.num_hidden_layers + 1)])

    hist_summaries = [tf.histogram_summary(v.op.name + "_fine_tuning", v)
                      for v in hist_summaries]
    summary_op = tf.merge_summary(hist_summaries)

    summary_writer = tf.train.SummaryWriter(pjoin(FLAGS.summary_dir,
                                                  'fine_tuning'),
                                            graph_def=sess.graph_def,
                                            flush_secs=FLAGS.flush_secs)

    vars_to_init = ae.get_variables_to_init(ae.num_hidden_layers + 1)
    vars_to_init.append(global_step)
    sess.run(tf.initialize_variables(vars_to_init))

    steps = FLAGS.finetuning_epochs * num_train
    for step in xrange(steps):
      start_time = time.time()

      feed_dict = fill_feed_dict(data.train,
                                 input_pl,
                                 labels_placeholder)

      _, loss_value = sess.run([train_op, loss],
                               feed_dict=feed_dict)
     
      duration = time.time() - start_time

      # Write the summaries and print an overview fairly often.
      if step % 50 == 0:
        # Print status to stdout.
        print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
        # Update the events file.

        summary_str = sess.run(summary_op, feed_dict=feed_dict)
        summary_writer.add_summary(summary_str, step)

      if (step + 1) % 200 == 0 or (step + 1) == steps:
        train_sum = do_eval_summary("training_error",
                                    sess,
                                    eval_correct,
                                    input_pl,
                                    labels_placeholder,
                                    data.train)

        val_sum = do_eval_summary("validation_error",
                                  sess,
                                  eval_correct,
                                  input_pl,
                                  labels_placeholder,
                                  data.validation)

        test_sum = do_eval_summary("test_error",
                                   sess,
                                   eval_correct,
                                   input_pl,
                                   labels_placeholder,
                                   data.test)

        do_eval(sess,
                eval_correct,
                input_pl,
                labels_placeholder,
                data.test)

        summary_writer.add_summary(train_sum, step)
        summary_writer.add_summary(val_sum, step)
        summary_writer.add_summary(test_sum, step)
def main_unsupervised():
  with tf.Graph().as_default() as g:
    sess = tf.Session()

    num_hidden = FLAGS.num_hidden_layers
    ae_hidden_shapes = [getattr(FLAGS, "hidden{0}_units".format(j + 1))
                        for j in xrange(num_hidden)]
    #ae_shape = [FLAGS.image_pixels] + ae_hidden_shapes + [FLAGS.num_classes]

    data = read_data_sets_pretraining(FLAGS.data_dir)
    num_train = data.train.num_examples # No. of training examples

    ae_shape = [data.train.items.shape[1]] + ae_hidden_shapes
    ae = AutoEncoder(ae_shape, sess)

    learning_rates = {j: getattr(FLAGS,
                                 "pre_layer{0}_learning_rate".format(j + 1))
                      for j in xrange(num_hidden)}

    noise = {j: getattr(FLAGS, "noise_{0}".format(j + 1))
             for j in xrange(num_hidden)}

    # for i in xrange(len(ae_shape) - 2):
    for i in xrange(len(ae_shape) - 1):
      n = i + 1
      with tf.variable_scope("pretrain_{0}".format(n)):
        input_ = tf.placeholder(dtype=tf.float32,
                                shape=(FLAGS.batch_size, ae_shape[0]),
                                name='ae_input_pl')
        target_ = tf.placeholder(dtype=tf.float32,
                                 shape=(FLAGS.batch_size, ae_shape[0]),
                                 name='ae_target_pl')
        layer = ae.pretrain_net(input_, n)

        with tf.name_scope("target"):
          target_for_loss = ae.pretrain_net(target_, n, is_target=True) #signal that we want to reconstruct

        loss = loss_x_entropy(layer, target_for_loss)

        train_op, global_step = training(loss, learning_rates[i], i)

        # summarizing training variables
        summary_dir = pjoin(FLAGS.summary_dir, 'pretraining_{0}'.format(n))
        summary_writer = tf.train.SummaryWriter(summary_dir,
                                                graph_def=sess.graph_def,
                                                flush_secs=FLAGS.flush_secs)
        summary_vars = [ae["biases{0}".format(n)], ae["weights{0}".format(n)]]

        hist_summarries = [tf.histogram_summary(v.op.name, v)
                           for v in summary_vars]
        hist_summarries.append(loss_summaries[i])
        summary_op = tf.merge_summary(hist_summarries)

        # training variables initialization
        vars_to_init = ae.get_variables_to_init(n)
        vars_to_init.append(global_step)
        sess.run(tf.initialize_variables(vars_to_init))

        print("\n\n")
        print("| Training Step | Cross Entropy |  Layer  |   Epoch  |")
        print("|---------------|---------------|---------|----------|")

        minibatches = int(math.ceil(num_train / FLAGS.batch_size))
        # for step in xrange(FLAGS.pretraining_epochs * num_train):
        for step in xrange(FLAGS.pretraining_epochs * minibatches):

            # feed_dict of input (with injected noise) and target
          feed_dict = fill_feed_dict_ae(data.train, input_, target_, noise[i])

          loss_summary, loss_value = sess.run([train_op, loss],
                                              feed_dict=feed_dict) # unsupervised training and loss

          if step % 100 == 0:
          # if step % FLAGS.batch_size == 0:
            summary_str = sess.run(summary_op, feed_dict=feed_dict)
            summary_writer.add_summary(summary_str, step)
            # image_summary_op = \
            #     tf.image_summary("training_images",
            #                      tf.reshape(input_,
            #                                 (FLAGS.batch_size,
            #                                  FLAGS.image_size,
            #                                  FLAGS.image_size, 1)),
            #                      max_images=FLAGS.batch_size)
            #
            # summary_img_str = sess.run(image_summary_op,
            #                            feed_dict=feed_dict)
            # summary_writer.add_summary(summary_img_str)

            output = "| {0:>13} | {1:13.4f} | Layer {2} | Epoch {3}  |"\
                     .format(step, loss_value, n, step // num_train + 1)

            print(output)

      # if i == 0:
      #   filters = sess.run(tf.identity(ae["weights1"]))
      #   np.save(pjoin(FLAGS.chkpt_dir, "filters"), filters)
      #   filters = tile_raster_images(X=filters.T,
      #                                img_shape=(FLAGS.image_size,
      #                                           FLAGS.image_size),
      #                                tile_shape=(10, 10),
      #                                output_pixel_vals=False)
      #   filters = np.expand_dims(np.expand_dims(filters, 0), 3)
      #   image_var = tf.Variable(filters)
      #   image_filter = tf.identity(image_var)
      #   sess.run(tf.initialize_variables([image_var]))
      #   img_filter_summary_op = tf.image_summary("first_layer_filters",
      #                                            image_filter)
      #   summary_writer.add_summary(sess.run(img_filter_summary_op))
      #   summary_writer.flush()

  return ae
Example #4
0
def main_unsupervised():
    with tf.Graph().as_default() as g:
        sess = tf.Session()

        num_hidden = FLAGS.num_hidden_layers
        ae_hidden_shapes = [
            getattr(FLAGS, "hidden{0}_units".format(j + 1))
            for j in xrange(num_hidden)
        ]
        #ae_shape = [FLAGS.image_pixels] + ae_hidden_shapes + [FLAGS.num_classes]

        data = read_data_sets_pretraining(FLAGS.data_dir)
        num_train = data.train.num_examples  # No. of training examples

        ae_shape = [data.train.items.shape[1]] + ae_hidden_shapes
        ae = AutoEncoder(ae_shape, sess)

        learning_rates = {
            j: getattr(FLAGS, "pre_layer{0}_learning_rate".format(j + 1))
            for j in xrange(num_hidden)
        }

        noise = {
            j: getattr(FLAGS, "noise_{0}".format(j + 1))
            for j in xrange(num_hidden)
        }

        # for i in xrange(len(ae_shape) - 2):
        for i in xrange(len(ae_shape) - 1):
            n = i + 1
            with tf.variable_scope("pretrain_{0}".format(n)):
                input_ = tf.placeholder(dtype=tf.float32,
                                        shape=(FLAGS.batch_size, ae_shape[0]),
                                        name='ae_input_pl')
                target_ = tf.placeholder(dtype=tf.float32,
                                         shape=(FLAGS.batch_size, ae_shape[0]),
                                         name='ae_target_pl')
                layer = ae.pretrain_net(input_, n)

                with tf.name_scope("target"):
                    target_for_loss = ae.pretrain_net(
                        target_, n,
                        is_target=True)  #signal that we want to reconstruct

                loss = loss_x_entropy(layer, target_for_loss)

                train_op, global_step = training(loss, learning_rates[i], i)

                # summarizing training variables
                summary_dir = pjoin(FLAGS.summary_dir,
                                    'pretraining_{0}'.format(n))
                summary_writer = tf.train.SummaryWriter(
                    summary_dir,
                    graph_def=sess.graph_def,
                    flush_secs=FLAGS.flush_secs)
                summary_vars = [
                    ae["biases{0}".format(n)], ae["weights{0}".format(n)]
                ]

                hist_summarries = [
                    tf.histogram_summary(v.op.name, v) for v in summary_vars
                ]
                hist_summarries.append(loss_summaries[i])
                summary_op = tf.merge_summary(hist_summarries)

                # training variables initialization
                vars_to_init = ae.get_variables_to_init(n)
                vars_to_init.append(global_step)
                sess.run(tf.initialize_variables(vars_to_init))

                print("\n\n")
                print("| Training Step | Cross Entropy |  Layer  |   Epoch  |")
                print("|---------------|---------------|---------|----------|")

                minibatches = int(math.ceil(num_train / FLAGS.batch_size))
                # for step in xrange(FLAGS.pretraining_epochs * num_train):
                for step in xrange(FLAGS.pretraining_epochs * minibatches):

                    # feed_dict of input (with injected noise) and target
                    feed_dict = fill_feed_dict_ae(data.train, input_, target_,
                                                  noise[i])

                    loss_summary, loss_value = sess.run(
                        [train_op, loss],
                        feed_dict=feed_dict)  # unsupervised training and loss

                    if step % 100 == 0:
                        # if step % FLAGS.batch_size == 0:
                        summary_str = sess.run(summary_op, feed_dict=feed_dict)
                        summary_writer.add_summary(summary_str, step)
                        # image_summary_op = \
                        #     tf.image_summary("training_images",
                        #                      tf.reshape(input_,
                        #                                 (FLAGS.batch_size,
                        #                                  FLAGS.image_size,
                        #                                  FLAGS.image_size, 1)),
                        #                      max_images=FLAGS.batch_size)
                        #
                        # summary_img_str = sess.run(image_summary_op,
                        #                            feed_dict=feed_dict)
                        # summary_writer.add_summary(summary_img_str)

                        output = "| {0:>13} | {1:13.4f} | Layer {2} | Epoch {3}  |"\
                                 .format(step, loss_value, n, step // num_train + 1)

                        print(output)

            # if i == 0:
            #   filters = sess.run(tf.identity(ae["weights1"]))
            #   np.save(pjoin(FLAGS.chkpt_dir, "filters"), filters)
            #   filters = tile_raster_images(X=filters.T,
            #                                img_shape=(FLAGS.image_size,
            #                                           FLAGS.image_size),
            #                                tile_shape=(10, 10),
            #                                output_pixel_vals=False)
            #   filters = np.expand_dims(np.expand_dims(filters, 0), 3)
            #   image_var = tf.Variable(filters)
            #   image_filter = tf.identity(image_var)
            #   sess.run(tf.initialize_variables([image_var]))
            #   img_filter_summary_op = tf.image_summary("first_layer_filters",
            #                                            image_filter)
            #   summary_writer.add_summary(sess.run(img_filter_summary_op))
            #   summary_writer.flush()

    return ae