コード例 #1
0
ファイル: model.py プロジェクト: shubhampachori12110095/tfweb
def process_images(serialized_images):
    def decode(jpeg_str, central_fraction=0.875, image_size=299):
        decoded = tf.cast(tf.image.decode_jpeg(jpeg_str, channels=3),
                          tf.float32)
        cropped = tf.image.central_crop(decoded,
                                        central_fraction=central_fraction)
        resized = tf.squeeze(
            tf.image.resize_bilinear(tf.expand_dims(cropped, [0]),
                                     [image_size, image_size],
                                     align_corners=False), [0])
        resized.set_shape((image_size, image_size, 3))
        normalized = tf.subtract(tf.multiply(resized, 1.0 / 127.5), 1.0)

        return normalized

    def process(images, image_size=299):
        images = tf.map_fn(decode, images, dtype=tf.float32)

        return images

    images = process(serialized_images)

    with slim.arg_scope(inception.inception_v3_arg_scope()):
        logits, end_points = inception.inception_v3(images,
                                                    num_classes=num_classes,
                                                    is_training=False)

    features = tf.reshape(end_points['PreLogits'], [-1, 2048])
    class_predictions = tf.nn.sigmoid(logits)

    return features, class_predictions
コード例 #2
0
def build_inceptionv3_graph(images, endpoint, is_training, checkpoint,
                            reuse=False):
  """Builds an InceptionV3 model graph.

  Args:
    images: A 4-D float32 `Tensor` of batch images.
    endpoint: String, name of the InceptionV3 endpoint.
    is_training: Boolean, whether or not to build a training or inference graph.
    checkpoint: String, path to the pretrained model checkpoint.
    reuse: Boolean, whether or not we are reusing the embedder.
  Returns:
    inception_output: `Tensor` holding the InceptionV3 output.
    inception_variables: List of inception variables.
    init_fn: Function to initialize the weights (if not reusing, then None).
  """
  with slim.arg_scope(inception.inception_v3_arg_scope()):
    _, endpoints = inception.inception_v3(
        images, num_classes=1001, is_training=is_training)
    inception_output = endpoints[endpoint]
    inception_variables = slim.get_variables_to_restore()
    inception_variables = [
        i for i in inception_variables if 'global_step' not in i.name]
    if is_training and not reuse:
      init_saver = tf.train.Saver(inception_variables)
      def init_fn(scaffold, sess):
        del scaffold
        init_saver.restore(sess, checkpoint)
    else:
      init_fn = None
    return inception_output, inception_variables, init_fn
コード例 #3
0
 def load(self):
     if self.session is None:
         if len(self.labelmap) != self.num_classes:
             logging.error(
                 "{} lines while the number of classes is {}".format(
                     len(self.labelmap), self.num_classes))
         self.label_dict = {}
         for line in tf.gfile.GFile(self.dict_path).readlines():
             words = [word.strip(' "\n') for word in line.split(',', 1)]
             self.label_dict[words[0]] = words[1]
         logging.warning(
             "Loading the network {} , first apply / query will be slower".
             format(self.name))
         config = tf.ConfigProto()
         config.gpu_options.per_process_gpu_memory_fraction = self.gpu_fraction
         g = tf.Graph()
         with g.as_default():
             self.input_image = tf.placeholder(tf.string)
             processed_image = inception_preprocess(self.input_image)
             with slim.arg_scope(inception.inception_v3_arg_scope()):
                 logits, end_points = inception.inception_v3(
                     processed_image,
                     num_classes=self.num_classes,
                     is_training=False)
             self.predictions = end_points[
                 'multi_predictions'] = tf.nn.sigmoid(
                     logits, name='multi_predictions')
             saver = tf_saver.Saver()
             self.session = tf.InteractiveSession(config=config)
             saver.restore(self.session, self.network_path)
コード例 #4
0
def prep_graph():
    global predictions
    global labelmap
    global label_dict
    global sess
    global input_image
    global food_list
    food_list = []
    with open(food_names) as f:
        for x in f:
            food_list.append(x.rstrip())
    g = tf.Graph()
    with g.as_default():
        input_image = tf.placeholder(tf.string)
        processed_image = PreprocessImage(input_image)
        with slim.arg_scope(inception.inception_v3_arg_scope()):
            logits, end_points = inception.inception_v3(processed_image,
                                                        num_classes=6012,
                                                        is_training=False)
        predictions = end_points['multi_predictions'] = tf.nn.sigmoid(
            logits, name='multi_predictions')
        init_op = control_flow_ops.group(
            variables.initialize_all_variables(),
            variables.initialize_local_variables(),
            data_flow_ops.initialize_all_tables())
        saver = tf_saver.Saver()
        sess = tf.Session()
        saver.restore(sess, checkpoint)
        labelmap, label_dict = LoadLabelMaps(6012, labelmap_file,
                                             label_dict_file)
コード例 #5
0
def main(args):
    if not os.path.exists(FLAGS.checkpoint):
        tf.logging.fatal(
            'Checkpoint %s does not exist. Have you download it? See tools/download_data.sh',
            FLAGS.checkpoint)
    g = tf.Graph()
    with g.as_default():
        input_image = PreprocessImage(FLAGS.image_path[0])

        with slim.arg_scope(inception.inception_v3_arg_scope()):
            logits, end_points = inception.inception_v3(
                input_image, num_classes=FLAGS.num_classes, is_training=False)

        bottleneck = end_points['PreLogits']
        init_op = control_flow_ops.group(
            variables.initialize_all_variables(),
            variables.initialize_local_variables(),
            data_flow_ops.initialize_all_tables())
        saver = tf_saver.Saver()
        sess = tf.Session()
        saver.restore(sess, FLAGS.checkpoint)

        # Run the evaluation on the image
        bottleneck_eval = np.squeeze(sess.run(bottleneck))

    first = True
    for val in bottleneck_eval:
        if not first:
            sys.stdout.write(",")
        first = False
        sys.stdout.write('{:.3f}'.format(val))
    sys.stdout.write('\n')
コード例 #6
0
def main(args):
  if not os.path.exists(FLAGS.checkpoint):
    tf.logging.fatal(
        'Checkpoint %s does not exist. Have you download it? See tools/download_data.sh',
        FLAGS.checkpoint)
  g = tf.Graph()
  with g.as_default():
    input_image = PreprocessImage(FLAGS.image_path[0])

    with slim.arg_scope(inception.inception_v3_arg_scope()):
      logits, end_points = inception.inception_v3(
          input_image, num_classes=FLAGS.num_classes, is_training=False)

    bottleneck = end_points['PreLogits']
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer(),
                       tf.tables_initializer())
    saver = tf_saver.Saver()
    sess = tf.Session()
    saver.restore(sess, FLAGS.checkpoint)

    # Run the evaluation on the image
    bottleneck_eval = np.squeeze(sess.run(bottleneck))

  first = True
  for val in bottleneck_eval:
    if not first:
      sys.stdout.write(",")
    first = False
    sys.stdout.write('{:.3f}'.format(val))
  sys.stdout.write('\n')
コード例 #7
0
ファイル: temp.py プロジェクト: jasonkrone/birds-stn
def main(args):
    if not os.path.exists(FLAGS.checkpoint):
        tf.logging.fatal(
            'Checkpoint %s does not exist. Have you download it? See tools/download_data.sh',
            FLAGS.checkpoint)
    g = tf.Graph()
    with g.as_default():
        input_image = tf.placeholder(tf.string)
        processed_image = PreprocessImage(input_image)

        with slim.arg_scope(inception.inception_v3_arg_scope()):
            logits, end_points = inception.inception_v3(
                processed_image, num_classes=FLAGS.num_classes, is_training=False)

        predictions = end_points['multi_predictions'] = tf.nn.sigmoid(
            logits, name='multi_predictions')

        sess = tf.Session()

        saver = tf_saver.Saver()

        logits_2 = layers.conv2d(
            end_points['PreLogits'],
            FLAGS.num_classes, [1, 1],
            activation_fn=None,
            normalizer_fn=None,
            scope='Conv2d_final_1x1')

        logits_2 = array_ops.squeeze(logits_2, [1, 2], name='SpatialSqueeze_2')

        predictions_2 = end_points['multi_predictions_2'] = tf.nn.sigmoid(logits_2, name='multi_predictions_2')

        sess.run(tf.global_variables_initializer())

        saver.restore(sess, FLAGS.checkpoint)
コード例 #8
0
    def classify(self, image=None, image_path=None):
        #image = tf.gfile.FastGFile(image_path, 'rb').read()
        image_data = tf.image.decode_jpeg(image, channels=3)
        processed_image = inception_preprocessing.preprocess_image(
            image_data,
            inception.inception_v3.default_image_size,
            inception.inception_v3.default_image_size,
            is_training=False)
        processed_images = tf.expand_dims(processed_image, 0)

        with slim.arg_scope(inception.inception_v3_arg_scope()):
            logits, end_points = inception.inception_v3(processed_images,
                                                        num_classes=1001,
                                                        is_training=False)

        # In order to get probabilities we apply softmax on the output.
        probabilities = tf.nn.softmax(logits)

        init_fn = slim.assign_from_checkpoint_fn(
            "C:\\Users\\emman\\PycharmProjects\\TensorWebApi\\models\\inception\\inception_v3.ckpt",
            slim.get_model_variables('InceptionV3'))

        init_fn(self.session)

        list = []
        for op in self.graph.get_operations():
            for i in [
                    "Conv2d_1a_3x3", "Conv2d_2a_3x3", "Conv2d_2b_3x3",
                    "MaxPool_3a_3x3", "Conv2d_3b_1x1", "Conv2d_4a_3x3",
                    "MaxPool_5a_3x3"
            ]:
                if i in str(op.name):
                    list.append(op)
                    break

        to_graph = tf.Graph()

        for i in list:
            cg.copy_op_to_graph(org_instance=i,
                                to_graph=to_graph,
                                variables="")

        self.graph = to_graph

        # for a in self.graph.get_operations():
        #    print(str(a.name))

        start_time = time.time()

        np_image, network_input, probabilities = self.session.run(
            [image_data, processed_image, probabilities])

        duration = time.time() - start_time
        print("Compute time: " + str(duration))

        probabilities = probabilities[0, 0:]

        return probabilities
コード例 #9
0
def main(img_dir):
 
  if not os.path.exists(FLAGS.checkpoint):
    tf.logging.fatal(
        'Checkpoint %s does not exist. Have you download it? See tools/download_data.sh',
        FLAGS.checkpoint)
  g = tf.Graph()
  with g.as_default():
    input_image = tf.placeholder(tf.string)
    processed_image = PreprocessImage(input_image)

    with slim.arg_scope(inception.inception_v3_arg_scope()):
      logits, end_points = inception.inception_v3(processed_image, num_classes=FLAGS.num_classes, is_training=False)

    predictions = end_points['multi_predictions'] = tf.nn.sigmoid(logits, name='multi_predictions')
    saver = tf_saver.Saver()
    sess = tf.Session()
    saver.restore(sess, FLAGS.checkpoint)
    
    # img_dir = sorted(glob.glob(os.path.join(FLAGS.image_folder_path, '*.jpg')))
    # sorted(img_dir, key = lambda d: d[-7: -4])   
   
  
  # Run the evaluation on the images
  image_path = os.path.join(FLAGS.image_folder_path, img_dir)

  # for i in range(len(img_dir)):
    # image_path = img_dir[i]
  if not os.path.exists(image_path):
    tf.logging.fatal('Input image does not exist %s', image_path)
  img_data = tf.gfile.FastGFile(image_path, "rb").read()
  print(image_path)
  predictions_eval = np.squeeze(sess.run(predictions, {input_image: img_data}))

  # Print top(n) results
  labelmap, label_dict = LoadLabelMaps(FLAGS.num_classes, FLAGS.labelmap, FLAGS.dict)

  top_k = predictions_eval.argsort()[-FLAGS.n:][::-1]
  label_confidence_dic = {}
  display_label_name = []
  display_score = []
  for idx in top_k:
    mid = labelmap[idx]
    display_name = label_dict.get(mid, 'unknown')
    score = predictions_eval[idx]
    label_confidence_dic[display_name] = score

    display_label_name.append(display_name)
    display_score.append(score)

    print('{}: {} - {} (score = {:.2f})'.format(idx, mid, display_name, score))

    display_dict = dict({'name': display_label_name, 'score': display_score})
    display_df = pd.DataFrame(display_dict)
  return display_df
コード例 #10
0
def inception_net(images, num_classes, for_training=False, reuse=False):
    """Build Inception v3 model architecture."""

    with slim.arg_scope(inception.inception_v3_arg_scope()):
        logits, endpoints = inception.inception_v3(images,
                                                   dropout_keep_prob=0.8,
                                                   num_classes=num_classes,
                                                   is_training=for_training,
                                                   reuse=reuse,
                                                   scope='InceptionV3')

    return logits, endpoints
コード例 #11
0
ファイル: labeler.py プロジェクト: Vages/DeepLearningExam
def label(image_path,
          checkpoint="openimages_dataset/data/2016_08/model.ckpt",
          num_classes=6012,
          labelmap_path="openimages_dataset/data/2016_08/labelmap.txt",
          dict_path="openimages_dataset/dict.csv",
          threshold=0.5,
          rounding_digits=1):
    if not os.path.exists(checkpoint):
        tf.logging.fatal(
            'Checkpoint %s does not exist. Have you download it? See tools/download_data.sh',
            checkpoint)
    g = tf.Graph()
    with g.as_default():
        input_image = PreprocessImage(image_path)

        with slim.arg_scope(inception.inception_v3_arg_scope()):
            logits, end_points = inception.inception_v3(
                input_image, num_classes=num_classes, is_training=False)

        predictions = end_points['multi_predictions'] = tf.nn.sigmoid(
            logits, name='multi_predictions')
        init_op = control_flow_ops.group(
            variables.initialize_all_variables(),
            variables.initialize_local_variables(),
            data_flow_ops.initialize_all_tables())
        saver = tf_saver.Saver()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        saver.restore(sess, checkpoint)

        # Run the evaluation on the image
        predictions_eval = np.squeeze(sess.run(predictions))

    # Print top(n) results
    labelmap, label_dict = LoadLabelMaps(num_classes, labelmap_path, dict_path)

    top_k = predictions_eval.argsort()[:][::-1]
    returned_labels = []
    for idx in top_k:
        mid = labelmap[idx]
        display_name = label_dict.get(mid, 'unknown')
        score = predictions_eval[idx]
        if score < threshold:
            if returned_labels:
                break
            else:
                threshold -= 0.1
                if threshold < 0.1:
                    break
        returned_labels.append((display_name, score))

    return returned_labels
コード例 #12
0
def main(args):
    if not os.path.exists(FLAGS.checkpoint):
        tf.logging.fatal(
            'Checkpoint %s does not exist. Have you download it? See tools/download_data.sh',
            FLAGS.checkpoint)
    g = tf.Graph()
    with g.as_default():
        input_image = tf.placeholder(tf.string)
        processed_image = PreprocessImage(input_image)

        with slim.arg_scope(inception.inception_v3_arg_scope()):
            logits, end_points = inception.inception_v3(
                processed_image,
                num_classes=FLAGS.num_classes,
                is_training=False)

        predictions = end_points['multi_predictions'] = tf.nn.sigmoid(
            logits, name='multi_predictions')
        init_op = control_flow_ops.group(
            variables.initialize_all_variables(),
            variables.initialize_local_variables(),
            data_flow_ops.initialize_all_tables())
        saver = tf_saver.Saver()
        sess = tf.Session()
        saver.restore(sess, FLAGS.checkpoint)

        # Run the evaluation on the images
        for image_path in FLAGS.image_path:
            if not os.path.exists(image_path):
                tf.logging.fatal('Input image does not exist %s',
                                 FLAGS.image_path[0])
            img_data = tf.gfile.FastGFile(image_path).read()
            print(image_path)
            predictions_eval = np.squeeze(
                sess.run(predictions, {input_image: img_data}))

            # Print top(n) results
            labelmap, label_dict = LoadLabelMaps(FLAGS.num_classes,
                                                 FLAGS.labelmap, FLAGS.dict)

            top_k = predictions_eval.argsort()[-FLAGS.n:][::-1]
            for idx in top_k:
                mid = labelmap[idx]
                display_name = label_dict.get(mid, 'unknown')
                score = predictions_eval[idx]
                print('{}: {} - {} (score = {:.2f})'.format(
                    idx, mid, display_name, score))
            print()
コード例 #13
0
 def load(self):
     if self.session is None:
         logging.warning("Loading the network {} , first apply / query will be slower".format(self.name))
         config = tf.ConfigProto()
         config.gpu_options.per_process_gpu_memory_fraction = self.gpu_fraction
         network_path = os.path.abspath(__file__).split('annotator.py')[0]+'data/2016_08/model.ckpt'
         g = tf.Graph()
         with g.as_default():
             self.input_image = tf.placeholder(tf.string)
             processed_image = inception_preprocess(self.input_image)
             with slim.arg_scope(inception.inception_v3_arg_scope()):
                 logits, end_points = inception.inception_v3(processed_image, num_classes=self.num_classes, is_training=False)
             self.predictions = end_points['multi_predictions'] = tf.nn.sigmoid(logits, name='multi_predictions')
             saver = tf_saver.Saver()
             self.session = tf.InteractiveSession(config=config)
             saver.restore(self.session, network_path)
コード例 #14
0
 def load(self):
     if self.session is None:
         logging.warning("Loading the network {} , first apply / query will be slower".format(self.name))
         config = tf.ConfigProto()
         config.gpu_options.per_process_gpu_memory_fraction = 0.15
         network_path = os.path.abspath(__file__).split('annotator.py')[0]+'data/2016_08/model.ckpt'
         g = tf.Graph()
         with g.as_default():
             self.input_image = tf.placeholder(tf.string)
             processed_image = inception_preprocess(self.input_image)
             with slim.arg_scope(inception.inception_v3_arg_scope()):
                 logits, end_points = inception.inception_v3(processed_image, num_classes=self.num_classes, is_training=False)
             self.predictions = end_points['multi_predictions'] = tf.nn.sigmoid(logits, name='multi_predictions')
             saver = tf_saver.Saver()
             self.session = tf.InteractiveSession(config=config)
             saver.restore(self.session, network_path)
コード例 #15
0
ファイル: model.py プロジェクト: sign4bb/sign_4_bb
    def _build_model(self):
        self.x_input = tf.placeholder(tf.float32, shape=[None, 299, 299, 3])
        self.y_input = tf.placeholder(tf.int64, shape=[None])

        with slim.arg_scope(inception_v3_arg_scope()):
            logits, _ = inception_v3(self.x_input, num_classes=1001,
                                     is_training= self.mode == 'train')

        self.pre_softmax = logits
        self.y_pred = tf.argmax(self.pre_softmax, 1)

        self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=self.y_input, logits=self.pre_softmax
        )
        self.xent = tf.reduce_sum(self.y_xent)

        self.correct_prediction = tf.equal(self.y_pred, self.y_input)

        self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
コード例 #16
0
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.nets import inception
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.framework import graph_util
slim = tf.contrib.slim

input_checkpoint = '/home/johnny/Documents/TF_CONFIG/finetune/resv2/model.ckpt'
output_file = 'inference_graph.pb'

g = tf.Graph()
with g.as_default():
    image = tf.placeholder(name='input',
                           dtype=tf.float32,
                           shape=[1, 299, 299, 3])
    with slim.arg_scope(inception.inception_v3_arg_scope()):
        logits, end_points = inception.inception_v3(image,
                                                    num_classes=6012,
                                                    is_training=False)
        predictions = tf.nn.sigmoid(logits, name='multi_predictions')
        saver = tf_saver.Saver()
        input_graph_def = g.as_graph_def()
        sess = tf.Session()
        saver.restore(sess, input_checkpoint)

        output_node_names = "multi_predictions"
        output_graph_def = graph_util.convert_variables_to_constants(
            sess,  # The session is used to retrieve the weights
            input_graph_def,  # The graph_def is used to retrieve the nodes
            output_node_names.split(
                ","
コード例 #17
0
def train(dataset, epochs, batch_size, figsize=(5, 5)):

    gen_samples, train_accuracies, test_accuracies = [], [], []
    steps = 0
    z_dim = 100

    with tf.Graph().as_default():

        ###### place holders

        raw_input = tf.placeholder(dtype=tf.float32, shape=(64, 64, 3))
        raw_inputs_x = tf.placeholder(dtype=tf.float32,
                                      shape=(None, 299, 299, 3))

        inputs_real = tf.placeholder(tf.float32, (None, 64, 64, 3),
                                     name='input_real')

        inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')

        y = tf.placeholder(tf.int32, (None), name='y')

        label_mask = tf.placeholder(tf.int32, (None), name='label_mask')

        input_inception_real = tf.placeholder(tf.float32, (None, 8, 8, 2048),
                                              name='input_inception_real')
        input_inception_fake = tf.placeholder(tf.float32, (None, 8, 8, 2048),
                                              name='input_inception_fake')

        drop_rate = tf.placeholder_with_default(.5, (), "drop_rate")

        lr_rate = 0.0003
        num_classes = 10
        learning_rate = tf.Variable(lr_rate, trainable=False)
        sample_z = np.random.normal(0, 1, size=(50, z_size))

        g_out = get_g_out(input_z=inputs_z, output_dim=real_size[2], alpha=0.2)

        loss_results = model_loss(inputs_real,
                                  inputs_z,
                                  real_size[2],
                                  y,
                                  num_classes,
                                  label_mask=label_mask,
                                  alpha=0.2,
                                  drop_rate=drop_rate,
                                  x_inception_r=input_inception_real,
                                  x_inception_f=input_inception_fake)

        d_loss, g_loss, correct, masked_correct, samples, pred_class = loss_results

        d_opt, g_opt, shrink_lr = model_opt(d_loss,
                                            g_loss,
                                            learning_rate,
                                            beta1=0.5)

        with slim.arg_scope(inception.inception_v3_arg_scope()):
            logits_inception, _ = inception.inception_v3_base(
                raw_inputs_x, final_endpoint='Mixed_7c')  # Mixed_7c

        init_fn = slim.assign_from_checkpoint_fn(
            os.path.join(checkpoints_dir, 'inception_v3.ckpt'),
            slim.get_model_variables('InceptionV3'))

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            init_fn(sess)
            for e in range(epochs):
                print("Epoch", e)

                t1e = time.time()
                num_examples = 0
                num_correct = 0
                for x, _y, _label_mask in dataset.batches(batch_size):
                    assert 'int' in str(y.dtype)
                    steps += 1
                    num_examples += _label_mask.sum()

                    # Sample random noise for G
                    batch_z = np.random.normal(0, 1, size=(x.shape[0], z_size))

                    # Run optimizers
                    t1 = time.time()

                    gen_samples_out = sess.run(g_out,
                                               feed_dict={
                                                   inputs_real: x,
                                                   inputs_z: batch_z
                                               })

                    processed_batch_r = np.empty((0, 299, 299, 3))
                    processed_image = inception_preprocessing.preprocess_image(
                        raw_input, height=299, width=299, is_training=False)
                    # real samples
                    for i in range(x.shape[0]):
                        temp = sess.run(processed_image,
                                        feed_dict={raw_input: x[i, :, :, :]})
                        processed_batch_r = np.append(
                            processed_batch_r,
                            np.reshape(temp, newshape=(1, 299, 299, 3)),
                            axis=0)

                    # fake samples
                    processed_batch_f = np.empty((0, 299, 299, 3))
                    for i in range(x.shape[0]):
                        temp = sess.run(
                            processed_image,
                            feed_dict={raw_input: gen_samples_out[i, :, :, :]})
                        processed_batch_f = np.append(
                            processed_batch_f,
                            np.reshape(temp, newshape=(1, 299, 299, 3)),
                            axis=0)

                    final_op_r = sess.run(
                        logits_inception,
                        feed_dict={raw_inputs_x: processed_batch_r})
                    final_op_f = sess.run(
                        logits_inception,
                        feed_dict={raw_inputs_x: processed_batch_f})

                    _, _, _correct = sess.run(
                        [d_opt, g_opt, masked_correct],
                        feed_dict={
                            inputs_real: x,
                            inputs_z: batch_z,
                            y: _y,
                            label_mask: _label_mask,
                            input_inception_real: final_op_r,
                            input_inception_fake: final_op_f
                        })

                    t2 = time.time()
                    num_correct += _correct

                sess.run([shrink_lr])

                train_accuracy = num_correct / float(num_examples)

                print("\t\tClassifier train accuracy: ", train_accuracy)

                num_examples = 0
                num_correct = 0
                for x, _y in dataset.batches(batch_size, which_set="valid"):
                    assert 'int' in str(y.dtype)
                    num_examples += x.shape[0]

                    processed_batch_r = np.empty((0, 299, 299, 3))
                    processed_image = inception_preprocessing.preprocess_image(
                        raw_input, height=299, width=299, is_training=False)
                    # real samples
                    for i in range(x.shape[0]):
                        temp = sess.run(processed_image,
                                        feed_dict={raw_input: x[i, :, :, :]})
                        processed_batch_r = np.append(
                            processed_batch_r,
                            np.reshape(temp, newshape=(1, 299, 299, 3)),
                            axis=0)

                    final_op_r = sess.run(
                        logits_inception,
                        feed_dict={raw_inputs_x: processed_batch_r})

                    _correct, = sess.run(
                        [correct],
                        feed_dict={
                            inputs_real: x,
                            y: _y,
                            drop_rate: 0.,
                            input_inception_real: final_op_r
                        })
                    num_correct += _correct

                test_accuracy = num_correct / float(num_examples)
                print("\t\tClassifier test accuracy", test_accuracy)
                print("\t\tStep time: ", t2 - t1)
                t2e = time.time()
                print("\t\tEpoch time: ", t2e - t1e)

                train_accuracies.append(train_accuracy)
                test_accuracies.append(test_accuracy)

                gen_sample = sess.run(g_out, feed_dict={inputs_z: sample_z})
                gen_samples.append(gen_sample)

            y_predictions = []
            y_target = []
            num_examples = 0
            num_correct = 0
            for x, _y in dataset.batches(batch_size, which_set="test"):
                num_examples += x.shape[0]
                processed_batch_r = np.empty((0, 299, 299, 3))
                processed_image = inception_preprocessing.preprocess_image(
                    raw_input, height=299, width=299, is_training=False)

                # real samples
                for i in range(x.shape[0]):
                    temp = sess.run(processed_image,
                                    feed_dict={raw_input: x[i, :, :, :]})
                    processed_batch_r = np.append(processed_batch_r,
                                                  np.reshape(temp,
                                                             newshape=(1, 299,
                                                                       299,
                                                                       3)),
                                                  axis=0)

                final_op_r = sess.run(
                    logits_inception,
                    feed_dict={raw_inputs_x: processed_batch_r})

                _correct, _y_pred, = sess.run(
                    [correct, pred_class],
                    feed_dict={
                        inputs_real: x,
                        y: _y,
                        drop_rate: 0.,
                        input_inception_real: final_op_r
                    })
                num_correct += _correct

                y_predictions.append(_y_pred)
                y_target.append(_y)
            test_accuracy = num_correct / float(num_examples)
            print('Testing...')
            print("\t\tClassifier test accuracy", test_accuracy)
            print("\t\tStep time: ", t2 - t1)
            t2e = time.time()
            print("\t\tEpoch time: ", t2e - t1e)

    return train_accuracies, test_accuracies, gen_samples, y_predictions, y_target