Ejemplo n.º 1
0
  def testRaiseValueErrorWithInvalidDepthMultiplier(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = 1000

    inputs = random_ops.random_uniform((batch_size, height, width, 3))
    with self.assertRaises(ValueError):
      _ = inception_v3.inception_v3(inputs, num_classes, depth_multiplier=-0.1)
    with self.assertRaises(ValueError):
      _ = inception_v3.inception_v3(inputs, num_classes, depth_multiplier=0.0)
Ejemplo n.º 2
0
  def build_inception_graph(self):
    """Builds an inception graph and add the necessary input & output tensors.

      To use other Inception models modify this file. Also preprocessing must be
      modified accordingly.

      See tensorflow/contrib/slim/python/slim/nets/inception_v3.py for
      details about InceptionV3.

    Returns:
      input_jpeg: A placeholder for jpeg string batch that allows feeding the
                  Inception layer with image bytes for prediction.
      inception_embeddings: The embeddings tensor.
    """

    # These constants are set by Inception v3's expectations.
    height = 299
    width = 299
    channels = 3

    image_str_tensor = tf.placeholder(tf.string, shape=[None])

    # The CloudML Prediction API always "feeds" the Tensorflow graph with
    # dynamic batch sizes e.g. (?,).  decode_jpeg only processes scalar
    # strings because it cannot guarantee a batch of images would have
    # the same output size.  We use tf.map_fn to give decode_jpeg a scalar
    # string from dynamic batches.
    def decode_and_resize(image_str_tensor):
      """Decodes jpeg string, resizes it and returns a uint8 tensor."""
      image = tf.image.decode_jpeg(image_str_tensor, channels=channels)
      # Note resize expects a batch_size, but tf_map supresses that index,
      # thus we have to expand then squeeze.  Resize returns float32 in the
      # range [0, uint8_max]
      image = tf.expand_dims(image, 0)
      image = tf.image.resize_bilinear(
          image, [height, width], align_corners=False)
      image = tf.squeeze(image, squeeze_dims=[0])
      image = tf.cast(image, dtype=tf.uint8)
      return image

    image = tf.map_fn(
        decode_and_resize, image_str_tensor, back_prop=False, dtype=tf.uint8)
    # convert_image_dtype, also scales [0, uint8_max] -> [0 ,1).
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)

    # Then shift images to [-1, 1) for Inception.
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)

    # Build Inception layers, which expect A tensor of type float from [-1, 1)
    # and shape [batch_size, height, width, channels].
    with slim.arg_scope(inception.inception_v3_arg_scope()):
      _, end_points = inception.inception_v3(image, is_training=False)

    inception_embeddings = end_points['PreLogits']
    inception_embeddings = tf.squeeze(
        inception_embeddings, [1, 2], name='SpatialSqueeze')
    return image_str_tensor, inception_embeddings
Ejemplo n.º 3
0
  def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 150, 150
    num_classes = 1000

    train_inputs = random_ops.random_uniform(
        (train_batch_size, height, width, 3))
    inception_v3.inception_v3(train_inputs, num_classes)
    eval_inputs = random_ops.random_uniform((eval_batch_size, height, width, 3))
    logits, _ = inception_v3.inception_v3(
        eval_inputs, num_classes, is_training=False, reuse=True)
    predictions = math_ops.argmax(logits, 1)

    with self.test_session() as sess:
      sess.run(variables.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
Ejemplo n.º 4
0
  def testLogitsNotSqueezed(self):
    num_classes = 25
    images = random_ops.random_uniform([1, 299, 299, 3])
    logits, _ = inception_v3.inception_v3(
        images, num_classes=num_classes, spatial_squeeze=False)

    with self.test_session() as sess:
      variables.global_variables_initializer().run()
      logits_out = sess.run(logits)
      self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
Ejemplo n.º 5
0
  def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = 1000

    inputs = random_ops.random_uniform((batch_size, height, width, 3))
    _, end_points = inception_v3.inception_v3(inputs, num_classes)

    endpoint_keys = [
        key for key in end_points.keys()
        if key.startswith('Mixed') or key.startswith('Conv')
    ]

    _, end_points_with_multiplier = inception_v3.inception_v3(
        inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0)

    for key in endpoint_keys:
      original_depth = end_points[key].get_shape().as_list()[3]
      new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
      self.assertEqual(2.0 * original_depth, new_depth)
Ejemplo n.º 6
0
  def _image_to_vec(image_str_tensor):

    def _decode_and_resize(image_tensor):
      """Decodes jpeg string, resizes it and returns a uint8 tensor."""

      # These constants are set by Inception v3's expectations.
      height = 299
      width = 299
      channels = 3

      image_tensor = tf.where(tf.equal(image_tensor, ''), IMAGE_DEFAULT_STRING, image_tensor)

      # Fork by whether image_tensor value is a file path, or a base64 encoded string.
      slash_positions = tf.equal(tf.string_split([image_tensor], delimiter="").values, '/')
      is_file_path = tf.cast(tf.count_nonzero(slash_positions), tf.bool)

      # The following two functions are required for tf.cond. Note that we can not replace them
      # with lambda. According to TF docs, if using inline lambda, both branches of condition
      # will be executed. The workaround is to use a function call.
      def _read_file():
        return tf.read_file(image_tensor)

      def _decode_base64():
        return tf.decode_base64(image_tensor)

      image = tf.cond(is_file_path, lambda: _read_file(), lambda: _decode_base64())
      image = tf.image.decode_jpeg(image, channels=channels)
      image = tf.expand_dims(image, 0)
      image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
      image = tf.squeeze(image, squeeze_dims=[0])
      image = tf.cast(image, dtype=tf.uint8)
      return image

    # The CloudML Prediction API always "feeds" the Tensorflow graph with
    # dynamic batch sizes e.g. (?,).  decode_jpeg only processes scalar
    # strings because it cannot guarantee a batch of images would have
    # the same output size.  We use tf.map_fn to give decode_jpeg a scalar
    # string from dynamic batches.
    image = tf.map_fn(_decode_and_resize, image_str_tensor, back_prop=False, dtype=tf.uint8)
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    # "gradients_[feature_name]" will be used for computing integrated gradients.
    image = tf.identity(image, name='gradients_' + feature_name)
    image = tf.subtract(image, 0.5)
    inception_input = tf.multiply(image, 2.0)

    # Build Inception layers, which expect a tensor of type float from [-1, 1)
    # and shape [batch_size, height, width, channels].
    with tf.contrib.slim.arg_scope(inception_v3_arg_scope()):
      _, end_points = inception_v3(inception_input, is_training=False)

    embeddings = end_points['PreLogits']
    inception_embeddings = tf.squeeze(embeddings, [1, 2], name='SpatialSqueeze')
    return inception_embeddings
Ejemplo n.º 7
0
  def testBuildClassificationNetwork(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = 1000

    inputs = random_ops.random_uniform((batch_size, height, width, 3))
    logits, end_points = inception_v3.inception_v3(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [batch_size, num_classes])
    self.assertTrue('Predictions' in end_points)
    self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
                         [batch_size, num_classes])
Ejemplo n.º 8
0
  def testHalfSizeImages(self):
    batch_size = 5
    height, width = 150, 150
    num_classes = 1000

    inputs = random_ops.random_uniform((batch_size, height, width, 3))
    logits, end_points = inception_v3.inception_v3(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [batch_size, num_classes])
    pre_pool = end_points['Mixed_7c']
    self.assertListEqual(pre_pool.get_shape().as_list(),
                         [batch_size, 3, 3, 2048])
Ejemplo n.º 9
0
  def testEvaluation(self):
    batch_size = 2
    height, width = 299, 299
    num_classes = 1000

    eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
    logits, _ = inception_v3.inception_v3(
        eval_inputs, num_classes, is_training=False)
    predictions = math_ops.argmax(logits, 1)

    with self.test_session() as sess:
      sess.run(variables.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (batch_size,))
Ejemplo n.º 10
0
  def testUnknownBatchSize(self):
    batch_size = 1
    height, width = 299, 299
    num_classes = 1000

    inputs = array_ops.placeholder(dtypes.float32, (None, height, width, 3))
    logits, _ = inception_v3.inception_v3(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
    self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
    images = random_ops.random_uniform((batch_size, height, width, 3))

    with self.test_session() as sess:
      sess.run(variables.global_variables_initializer())
      output = sess.run(logits, {inputs: images.eval()})
      self.assertEquals(output.shape, (batch_size, num_classes))
Ejemplo n.º 11
0
 def testUnknownImageShape(self):
   ops.reset_default_graph()
   batch_size = 2
   height, width = 299, 299
   num_classes = 1000
   input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
   with self.test_session() as sess:
     inputs = array_ops.placeholder(
         dtypes.float32, shape=(batch_size, None, None, 3))
     logits, end_points = inception_v3.inception_v3(inputs, num_classes)
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     pre_pool = end_points['Mixed_7c']
     feed_dict = {inputs: input_np}
     variables.global_variables_initializer().run()
     pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
     self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
def main(_):
    # start_time = datetime.datetime.now()
    label_file_name = os.path.join(args.label_dir, "labels.txt")
    label_dict = get_class_label_dict(label_file_name)
    classes_num = len(label_dict)
    test_feeder = DataIterator(data_dir=args.dataset_path)
    total_size = len(test_feeder.labels)
    count = 0
    # get model from model registry
    model_path = Model.get_model_path(args.model_name)
    with tf.Session() as sess:
        test_images = test_feeder.input_pipeline(batch_size=args.batch_size)
        with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
            input_images = tf.placeholder(tf.float32, [args.batch_size, image_size, image_size, num_channel])
            logits, _ = inception_v3.inception_v3(input_images,
                                                  num_classes=classes_num,
                                                  is_training=False)
            probabilities = tf.argmax(logits, 1)

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        saver = tf.train.Saver()
        saver.restore(sess, model_path)
        out_filename = os.path.join(args.output_dir, "result-labels.txt")
        with open(out_filename, "w") as result_file:
            i = 0
            while count < total_size and not coord.should_stop():
                test_images_batch = sess.run(test_images)
                file_names_batch = test_feeder.file_paths[i * args.batch_size:
                                                          min(test_feeder.size, (i + 1) * args.batch_size)]
                results = sess.run(probabilities, feed_dict={input_images: test_images_batch})
                new_add = min(args.batch_size, total_size - count)
                count += new_add
                i += 1
                for j in range(new_add):
                    result_file.write(os.path.basename(file_names_batch[j]) + ": " + label_dict[results[j]] + "\n")
                result_file.flush()
            coord.request_stop()
            coord.join(threads)

        # copy the file to artifacts
        shutil.copy(out_filename, "./outputs/")
Ejemplo n.º 13
0
  def build_graph(self):
    """Forms the core by building a wrapper around the inception graph.

      Here we add the necessary input & output tensors, to decode jpegs,
      serialize embeddings, restore from checkpoint etc.

      To use other Inception models modify this file. Note that to use other
      models beside Inception, you should make sure input_shape matches
      their input. Resizing or other modifications may be necessary as well.
      See tensorflow/contrib/slim/python/slim/nets/inception_v3.py for
      details about InceptionV3.

    Returns:
      input_jpeg: A tensor containing raw image bytes as the input layer.
      embedding: The embeddings tensor, that will be materialized later.
    """

    input_jpeg = tf.placeholder(tf.string, shape=None)
    image = tf.image.decode_jpeg(input_jpeg, channels=self.CHANNELS)

    # Note resize expects a batch_size, but we are feeding a single image.
    # So we have to expand then squeeze.  Resize returns float32 in the
    # range [0, uint8_max]
    image = tf.expand_dims(image, 0)

    # convert_image_dtype also scales [0, uint8_max] -> [0 ,1).
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    image = tf.image.resize_bilinear(
        image, [self.HEIGHT, self.WIDTH], align_corners=False)

    # Then rescale range to [-1, 1) for Inception.
    image = tf.subtract(image, 0.5)
    inception_input = tf.multiply(image, 2.0)

    # Build Inception layers, which expect a tensor of type float from [-1, 1)
    # and shape [batch_size, height, width, channels].
    with slim.arg_scope(inception.inception_v3_arg_scope()):
      _, end_points = inception.inception_v3(inception_input, is_training=False)

    embedding = end_points['PreLogits']
    return input_jpeg, embedding
Ejemplo n.º 14
0
  def testBuildEndPoints(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = 1000

    inputs = random_ops.random_uniform((batch_size, height, width, 3))
    _, end_points = inception_v3.inception_v3(inputs, num_classes)
    self.assertTrue('Logits' in end_points)
    logits = end_points['Logits']
    self.assertListEqual(logits.get_shape().as_list(),
                         [batch_size, num_classes])
    self.assertTrue('AuxLogits' in end_points)
    aux_logits = end_points['AuxLogits']
    self.assertListEqual(aux_logits.get_shape().as_list(),
                         [batch_size, num_classes])
    self.assertTrue('Mixed_7c' in end_points)
    pre_pool = end_points['Mixed_7c']
    self.assertListEqual(pre_pool.get_shape().as_list(),
                         [batch_size, 8, 8, 2048])
    self.assertTrue('PreLogits' in end_points)
    pre_logits = end_points['PreLogits']
    self.assertListEqual(pre_logits.get_shape().as_list(),
                         [batch_size, 1, 1, 2048])
Ejemplo n.º 15
0
  def _image_to_vec(image_str_tensor):

    def _decode_and_resize(image_str_tensor):
      """Decodes jpeg string, resizes it and returns a uint8 tensor."""

      # These constants are set by Inception v3's expectations.
      height = 299
      width = 299
      channels = 3

      image = tf.where(tf.equal(image_str_tensor, ''), IMAGE_DEFAULT_STRING, image_str_tensor)
      image = tf.decode_base64(image)
      image = tf.image.decode_jpeg(image, channels=channels)
      image = tf.expand_dims(image, 0)
      image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
      image = tf.squeeze(image, squeeze_dims=[0])
      image = tf.cast(image, dtype=tf.uint8)
      return image

    # The CloudML Prediction API always "feeds" the Tensorflow graph with
    # dynamic batch sizes e.g. (?,).  decode_jpeg only processes scalar
    # strings because it cannot guarantee a batch of images would have
    # the same output size.  We use tf.map_fn to give decode_jpeg a scalar
    # string from dynamic batches.
    image = tf.map_fn(_decode_and_resize, image_str_tensor, back_prop=False, dtype=tf.uint8)
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    image = tf.subtract(image, 0.5)
    inception_input = tf.multiply(image, 2.0)

    # Build Inception layers, which expect a tensor of type float from [-1, 1)
    # and shape [batch_size, height, width, channels].
    with tf.contrib.slim.arg_scope(inception_v3_arg_scope()):
      _, end_points = inception_v3(inception_input, is_training=False)

    embeddings = end_points['PreLogits']
    inception_embeddings = tf.squeeze(embeddings, [1, 2], name='SpatialSqueeze')
    return inception_embeddings
Ejemplo n.º 16
0
def main():
    processed_data = np.load(INPUT_DATA, allow_pickle=True)
    train_images = processed_data[0]
    train_labels = processed_data[1]
    validate_images = processed_data[2]
    validate_labels = processed_data[3]
    test_images = processed_data[4]
    test_labels = processed_data[5]
    print(
        'There are %d traing examples, %d validation examples, and %d testing examples.'
        % (len(train_images), len(validate_images), len(test_images)))

    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='output_labels')

    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(
            inputs=images, num_classes=NUM_CLASSES)  # 查看logits, _的大小

    # 获取需要训练的变量
    trainable_variables = get_trainable_variables()
    # 定义交叉熵损失
    tf.losses.softmax_cross_entropy(onehot_labels=tf.one_hot(
        labels, NUM_CLASSES),
                                    logits=logits)
    total_loss = tf.losses.get_total_loss()
    # 定义训练过程
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(total_loss)

    # 计算正确率
    with tf.name_scope('evaluation'):
        corrent_prediction = tf.equal(tf.argmax(logits, 1), labels)
        accuracy = tf.reduce_mean(tf.cast(corrent_prediction, tf.float32))

    # 定义加载模型的函数
    load_func = slim.assign_from_checkpoint_fn(INCEPTION_V3,
                                               get_tuned_variables(),
                                               ignore_missing_vars=True)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        print('Loading tuned variables from ', INCEPTION_V3)
        load_func(sess)

        start = 0
        end = start + BATCH_SIZE
        for i in range(TRAINING_STEPS):
            _, loss = sess.run([train_step, total_loss],
                               feed_dict={
                                   images: train_images[start:end],
                                   labels: train_labels[start:end]
                               })
            if i % 30 == 0 or i + 1 == TRAINING_STEPS:
                saver.save(sess, MODEL_PATH, global_step=i)
                validation_accuracy = sess.run(accuracy,
                                               feed_dict={
                                                   images: validate_images,
                                                   labels: validate_labels
                                               })
                print('Step %d, validation accuracy is %f' %
                      (i, validation_accuracy))
            start = end
            if start == len(train_images):
                start = 0
            end = min(start + BATCH_SIZE, len(train_images))

        test_accuracy = sess.run(accuracy,
                                 feed_dict={
                                     images: test_images,
                                     labels: test_labels
                                 })
        print('Finnaly, test accuracy is ', test_accuracy)
    def build_model(self):
        """
        :return:
        """
        """
        Helper Variables
        """
        self.global_step_tensor = tf.Variable(0,
                                              trainable=False,
                                              name='global_step')
        self.global_step_inc = self.global_step_tensor.assign(
            self.global_step_tensor + 1)
        self.global_epoch_tensor = tf.Variable(0,
                                               trainable=False,
                                               name='global_epoch')
        self.global_epoch_inc = self.global_epoch_tensor.assign(
            self.global_epoch_tensor + 1)
        """
        Inputs to the network
        """
        print("input to inception")
        with tf.variable_scope('inputs'):
            self.x, self.y = self.data_loader.get_input()
            self.is_training = tf.placeholder(tf.bool, name='Training_flag')
        tf.add_to_collection('inputs', self.x)
        tf.add_to_collection('inputs', self.y)
        tf.add_to_collection('inputs', self.is_training)
        """
        Network Architecture
        """

        print("network arch inception")
        with tf.variable_scope('network'):
            self.logits, end_points = inception_v3.inception_v3(
                inputs=self.x, num_classes=self.num_classes)
            #self.logits = tf.squeeze(self.logits, axis=[1, 2])

            print("network output inception")
            with tf.variable_scope('out'):
                # self.out = tf.squeeze(end_points['predictions'], axis=[1,2])
                self.out = tf.nn.softmax(self.logits, dim=-1)

            tf.add_to_collection('out', self.out)

            print("network output argmax inception")
            with tf.variable_scope('out_argmax'):
                self.out_argmax = tf.argmax(self.logits,
                                            axis=-1,
                                            output_type=tf.int64,
                                            name='out_argmax')
                # self.out_argmax = tf.squeeze(tf.argmax(self.out, 1), axis=[1])

                print("Arg Max Shape: ", self.out_argmax.shape)

        print("loss inception")
        with tf.variable_scope('loss-acc'):
            # one_hot_y = tf.one_hot(indices=self.y, depth=self.num_classes)

            self.loss = tf.losses.sparse_softmax_cross_entropy(
                labels=self.y, logits=self.logits)

            # probabilities = end_points['Predictions']

            # accuracy, accuracy_update = tf.metrics.accuracy(labels = one_hot_y, predictions = self.out_argmax)
            self.acc = tf.reduce_mean(
                tf.cast(tf.equal(self.y, self.out_argmax), tf.float32))

        with tf.variable_scope('train_step'):
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                self.train_step = self.optimizer.minimize(
                    self.loss, global_step=self.global_step_tensor)

        tf.add_to_collection('train', self.train_step)
        tf.add_to_collection('train', self.loss)
        tf.add_to_collection('train', self.acc)
Ejemplo n.º 18
0
def main(argv=None):
    # 加载预处理好的数据
    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    n_training_examples = len(training_images)
    training_labels = processed_data[1]
    validation_images = processed_data[2]
    validation_labels = processed_data[3]
    testing_images = processed_data[4]
    testing_labels = processed_data[5]
    print('%d training, %d validation, %d testing' %
          (n_training_examples, len(validation_labels), len(testing_labels)))

    # 定义inceptionV3的输入
    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name='input_image')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    # 定义inception v3模型(前向传播)
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, num_classes=N_CLASSES)

    # 获取需要训练的变量
    trainable_variables = get_trainable_variables()
    # 损失函数
    loss_fun = tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                               logits,
                                               weights=1.0)
    # 训练过程
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(
        tf.losses.get_total_loss())
    # 只训练最后一层
    # train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(tf.losses.get_total_loss(),
    #                                                               var_list=get_trainable_variables())

    # 正确率
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))

    ckpt = tf.train.get_checkpoint_state(SAVE_PATH)
    if ckpt and ckpt.model_checkpoint_path:
        # 加载之前训练的参数继续训练
        variables_to_restore = slim.get_model_variables()
        print('continue training from %s' % ckpt)
        step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        step = int(step)
        ckpt = ckpt.model_checkpoint_path
    else:
        # 没有训练数据,就先迁移一部分训练好的
        ckpt = CKPT_FILE
        variables_to_restore = get_tuned_variable()
        print('loading tuned variables from %s' % CKPT_FILE)
        step = 0

    # 加载模型函数
    load_fn = slim.assign_from_checkpoint_fn(ckpt,
                                             variables_to_restore,
                                             ignore_missing_vars=True)

    # 保存新的训练好的模型的函数
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # 先初始化所有参数
        init = tf.global_variables_initializer()
        sess.run(init)
        load_fn(sess)

        start = 0
        end = BATCH
        for i in range(step + 1, step + 1 + STEPS):
            # 运行训练,不会更新所有参数
            _, loss_val = sess.run(
                [train_step, loss_fun],
                feed_dict={
                    images: training_images[start:end],
                    labels: training_labels[start:end]
                })

            # 输出日志
            if i % 10 == 0:
                print('after %d train step, loss value is: %.4f' %
                      (i, loss_val))
            if (i % 30 == 0) or (i + 1 == STEPS):
                saver.save(sess, TRAIN_FILE, global_step=i)
                validation_accuracy = sess.run(evaluation_step,
                                               feed_dict={
                                                   images: validation_images,
                                                   labels: validation_labels
                                               })
                print('Step %d Validation accuracy = %.1f%%' %
                      (i, validation_accuracy * 100.0))

            start = end
            if start == n_training_examples:
                start = 0

            end = start + BATCH
            if end > n_training_examples:
                end = n_training_examples

        # 在测试集上测试正确率
        test_accuracy = sess.run(evaluation_step,
                                 feed_dict={
                                     images: testing_images,
                                     labels: testing_labels
                                 })
        print('Final test accuracy = %.1f%%' % (test_accuracy * 100.0))
Ejemplo n.º 19
0
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.python.slim.nets import inception_v3

pretrain_model_dir = '/home/alex/Documents/pretrain_model/Inception/inception_v3/inception_v3.ckpt'

images = tf.Variable(initial_value=tf.random_uniform(shape=(5, 299, 299, 3),
                                                     minval=0,
                                                     maxval=3),
                     dtype=tf.float32)
class_num = tf.constant(value=5, dtype=tf.int32)
# is_training = True

# read net
with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
    logits, end_points = inception_v3.inception_v3(images)

if __name__ == "__main__":

    init = tf.group(tf.global_variables_initializer(),
                    tf.local_variables_initializer())
    with tf.Session() as sess:
        # images, class_num = sess.run([images, class_num])
        sess.run(init)
        for var in tf.model_variables():
            print(var.name)

        # exclusion scope
        exclude_variable = ['InceptionV3/Logits', 'InceptionV3/AuxLogits']

        variable = slim.get_variables_to_restore(exclude=exclude_variable)
Ejemplo n.º 20
0
                             padding='SAME'), bias))
        return result


print("\n\n\n")
print("program begin here:")

imgs_ = tf.placeholder(tf.float32, [299, 299, 3])
imgs = tf.reshape(imgs_, [-1, 299, 299, 3])
class_num = 1001
is_training_pl = False

g1 = tf.get_default_graph()
with g1.as_default():
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, end_points = inception_v3.inception_v3(
            imgs, num_classes=class_num, is_training=is_training_pl)
    variables = tf.contrib.framework.get_variables_to_restore()
    #print(variables)

    unpooling = tf.image.resize_images(end_points['Mixed_7c'], [15, 15])
    print(unpooling.get_shape().as_list())

    gavp = global_avg_pool(end_points['Mixed_7c'], name='Global_avg_pooling')

    variables_to_resotre = [
        v for v in variables if v.name.split('/')[0] == 'InceptionV3'
    ]
    saver = tf.train.Saver(variables_to_resotre)

g2 = tf.get_default_graph()
with g2.as_default():
Ejemplo n.º 21
0
def main(_):

    training_images = []
    training_labels = []
    validation_images = []
    validation_labels = []
    testing_images = []
    testing_labels = []

    processed_data = []
    files = os.listdir(INPUT_DIR)  # 列表推导式
    for file in files:
        file_path = os.path.join(INPUT_DIR, file)
        proce_data = np.load(file_path)
        # 横向合并数组 不改变维度
        # a = [[1,2,3],[4,5,6]]
        # b = [[1,1,1],[2,2,2]]
        # 横向合并 d = np.hstack((a,b))
        # d = array([[1, 2, 3, 1, 1, 1],[4, 5, 6, 2, 2, 2]])
        processed_data = np.hstack((processed_data, proce_data))

    training_images = processed_data[0]
    training_labels = processed_data[1]
    validation_images = processed_data[2]
    validation_labels = processed_data[3]
    testing_images = processed_data[4]
    testing_labels = processed_data[5]
    n_training_image = len(training_images)
    print(" %d train_data, %d validation-data, %d test_data" %
          (n_training_image, len(validation_labels), len(testing_labels)))

    # 定义 InceptionV3 的输入,images 为输入图片,labels为每一张图片对应的标签
    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    # 定义 InceptionV3 的结构模型
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, num_classes=N_CLASS)
    # 获取需要训练的变量
    trainable_variables = get_trainable_variables()
    # 定义交叉熵损失
    tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASS),
                                    logits,
                                    weights=1.0)
    # 定义训练过程
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(
        tf.losses.get_total_loss())
    # 计算正确率
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))

    # 定义加载模型
    load_fn = slim.assign_from_checkpoint_fn(CKPT_FILE,
                                             get_tuned_variables(),
                                             ignore_missing_vars=True)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        # 加载模型
        load_fn(sess)
        start = 0
        end = BATCH
        for i in range(STEPS):
            # 训练过程 更新指定部分参数
            # 数据已经打乱过
            sess.run(train_step,
                     feed_dict={
                         images: training_images[start:end],
                         labels: training_labels[start:end]
                     })
            if i % 30 == 0 or i + 1 == STEPS:
                saver.save(sess, TRAIN_FILE_DIR, global_step=i)
                validation_accuracy = sess.run(evaluation_step,
                                               feed_dict={
                                                   images: validation_images,
                                                   labels: validation_labels
                                               })
                nowTime = datetime.datetime.now().strftime(
                    '%Y-%m-%d %H:%M:%S')  # 现在
                print('%s step %d : Validation accuracy = %.1f%%' %
                      (nowTime, i, validation_accuracy * 100))
            start = end
            if start == n_training_image:
                start = 0
            end = start + BATCH
            if end > n_training_image:
                end = n_training_image

        # 在测试集上测试正确率
        test_accuracy = sess.run(evaluation_step,
                                 feed_dict={
                                     images: testing_images,
                                     labels: testing_labels
                                 })
        print('%s Final test accuracy = %.1f%%' %
              (nowTime, test_accuracy * 100))
Ejemplo n.º 22
0
def main(self):
    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    training_labels = processed_data[1]
    testing_images = processed_data[2]
    testing_labels = processed_data[3]
    n_training_example = len(training_images)

    print(len(training_images))
    print(len(training_labels))
    print(len(testing_images))
    print(len(testing_labels))

    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, num_classes=N_CLASSES)

    trainable_variables = get_trainable_variables()

    tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                    logits,
                                    weights=1.0)

    train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(
        tf.losses.get_total_loss())  # 可以换成Adam试一下

    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1),
                                      labels)  # 1表示的是按行比较返回最大值的索引
        A = tf.reduce_mean(tf.losses.get_total_loss())
        B = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        evaluation_step = [A, B]
    # 定义加载模型的参数
    load_fn = slim.assign_from_checkpoint_fn(CKPT_FILE,
                                             get_tuned_variables(),
                                             ignore_missing_vars=True)

    step_list = list(range(STEPS))  # [0,1,2,……,9]
    train_accuracy_list = []
    test_accuracy_list = []
    train_loss_list = []
    test_loss_list = []
    fig = plt.figure()  # 建立可视化图像框
    ax1 = fig.add_subplot(2, 3, 1)  # z子图总行数、列数,位置
    ax2 = fig.add_subplot(2, 3, 2)
    ax3 = fig.add_subplot(2, 3, 4)
    ax4 = fig.add_subplot(2, 3, 5)
    ax5 = fig.add_subplot(2, 3, 3)
    ax1.set_title('cnn_train_accuracy', fontsize=10, y=1.02)
    ax2.set_title('cnn_test_accuracy', fontsize=10, y=1.02)
    ax3.set_title('cnn_train_loss', fontsize=10, y=1.02)
    ax4.set_title('cnn_test_loss', fontsize=10, y=1.02)
    ax5.set_title('cnn_ROC', fontsize=10, y=1.02)
    # ax1.set_xlabel('steps')
    # ax2.set_xlabel('steps')
    # ax3.set_xlabel('steps')
    # ax4.set_xlabel('steps')

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        print('Loading tuned variables from %s' % CKPT_FILE)
        load_fn(sess)

        start = 0
        end = BATCH
        for i in range(STEPS):

            train_accuracy = sess.run(evaluation_step,
                                      feed_dict={
                                          images: training_images[start:end],
                                          labels: training_labels[start:end]
                                      })
            train_loss = train_accuracy[0]
            train_acc = train_accuracy[1]

            sess.run(train_step,
                     feed_dict={
                         images: training_images[start:end],
                         labels: training_labels[start:end]
                     })

            test_accuracy = sess.run(evaluation_step,
                                     feed_dict={
                                         images: testing_images,
                                         labels: testing_labels
                                     })
            test_loss = test_accuracy[0]
            test_acc = test_accuracy[1]

            train_accuracy_list.append(train_acc * 100.0)
            train_loss_list.append(train_loss)
            test_accuracy_list.append(test_acc * 100.0)
            test_loss_list.append(test_loss)

            if i % 10 == 0:
                print('Step %d: train loss = %.3f' % (i, train_loss))
                print('Step %d: train accruacy = %.1f%%' %
                      (i, train_acc * 100.0))
                print('Step %d: Test loss = %.3f' % (i, test_loss))
                print('Step %d: Test accruacy = %.1f%%' %
                      (i, test_acc * 100.0))

            if i == 99:
                # saver.save(sess, TRAIN_FILE, global_step=i)

                predict = sess.run(tf.nn.softmax(logits),
                                   feed_dict={
                                       images: testing_images,
                                       labels: testing_labels
                                   })
                AUC_pro = predict[:, 0]
                probability = predict[:, 0] * 100  # 有18x8=144个
                TP = 0
                TN = 0
                FP = 0
                FN = 0
                FPR_list = []
                TPR_list = []
                for j in range(100):  # 101
                    for k in range(144):
                        if probability[k] <= j:  # 预测为1
                            if testing_labels[k] == 1:  # 真实为1
                                TP = TP + 1
                            elif testing_labels[k] == 0:  # 真实为0
                                FP = FP + 1
                        elif probability[k] > j:  # 预测为0
                            if testing_labels[k] == 1:  # 真实为1
                                FN = FN + 1
                            elif testing_labels[k] == 0:  # 真实为0
                                TN = TN + 1
                    FPR = FP / (FP + TN)
                    TPR = TP / (TP + FN)
                    FPR_list.append(FPR)
                    TPR_list.append(TPR)
                    TP = 0
                    TN = 0
                    FP = 0
                    FN = 0

                prediction_tensor = tf.convert_to_tensor(AUC_pro)
                label_tensor = tf.convert_to_tensor(testing_labels)
                auc_value, auc_op = tf.metrics.auc(label_tensor,
                                                   prediction_tensor,
                                                   num_thresholds=100)
                sess.run(tf.global_variables_initializer())
                sess.run(tf.local_variables_initializer())
                sess.run(auc_op)
                value = 1 - sess.run(auc_value)
                print("AUC:" + str(value))

            start = end
            if start == n_training_example:
                start = 0
            end = start + BATCH
            if end > n_training_example:
                end = n_training_example

        writer = tf.summary.FileWriter(
            'D:/python/deep-learning/MRI-2D/BMP-cnn/to/log',
            tf.get_default_graph())
        writer.close()
        fig.tight_layout()
        ax1.plot(step_list, train_accuracy_list)
        ax2.plot(step_list, test_accuracy_list)
        ax3.plot(step_list, train_loss_list)
        ax4.plot(step_list, test_loss_list)
        # ROC曲线的横轴为假正例率(FPR), 纵轴为真正例率(TPR)
        ax5.plot(FPR_list, TPR_list)
        plt.show()
def main(argv=None):
    #加载预处理好的数据
    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    n_training_example = len(training_images)
    training_labels = processed_data[1]

    validation_images = processed_data[2]
    validation_labels = processed_data[3]
    testing_images = processed_data[4]
    testing_labels = processed_data[5]

    #定义inception-v3的输入,images为输入图片,labels为每一张图片对应的标签
    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name="input-images")
    labels = tf.placeholder(tf.int64, [None], name="labels")

    #定义inception-v3模型。因为谷歌给出的只有模型参数取值,所以这里需要在这个代码中定义inception-v3的模型结构。虽然理论上需要区分训练和测试中使用的模型,也就是说在测试时应该
    #使用is_training=False,但是因为预先训练好的inception-v3模型中使用的batch normalization参数与新的数据会有差异,导致结果很差,所以这里直接使用同一个模型来进行测试
    #关于这个问题更加具体的解释可以参考GitHub上的issue:https://github.com/tensorflow/models/issues1314
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, num_classes=N_CLASSES)
    #获取需要训练的变量
    training_variables = get_trainable_variables()
    #定义交叉熵损失。注意在模型定义的时候已经将正则化损失加入损失集合了
    tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                    logits,
                                    weights=1.0)
    #定义训练过程。这里minimize的过程中指定了需要优化的变量集合
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(
        tf.losses.get_total_loss())
    #计算正确率
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))
    #定义加载模型的函数
    load_fn = slim.assign_from_checkpoint_fn(CKPT_FILE,
                                             get_tune_variables(),
                                             ignore_missing_vars=True)
    #定义保存新的训练好的模型函数
    saver = tf.train.Saver()
    with tf.Session() as sess:
        #初始化没有加载进来的变量,注意这个过程一定要在模型加载之前,否则初始化过程会将已经加载好的变量重新赋值
        tf.global_variables_initializer().run()

        #加载谷歌已经训练好的模型
        load_fn(sess)

        start = 0
        end = BATCH
        for i in range(STEPS):
            #运行训练过程,这里不会更新全部的参数,只会更新指定的部分参数
            sess.run(train_step,
                     feed_dict={
                         images: training_images[start:end],
                         labels: training_labels[start:end]
                     })
            #输出日志
            if i % 30 == 0 or i + 1 == STEPS:
                saver.save(sess, TRAIN_FILE, global_step=i)
                validation_accuracy = sess.run(evaluation_step,
                                               feed_dict={
                                                   images: validation_images,
                                                   labels: validation_labels
                                               })
                print('step %d : validation accuracy = %.1f%%' %
                      (i, validation_accuracy * 100.0))
                #因为在数据预处理的时候已经做过了打乱数据的操作,所以这个只需要顺序使用训练数据就好
            start = end
            if start == n_training_example:
                start = 0
            end = start + BATCH
            if end > n_training_example:
                end = n_training_example

        #在最后的测试数据上测试正确率
        test_accuracy = sess.run(evaluation_step,
                                 feed_dict={
                                     images: testing_images,
                                     labels: testing_labels
                                 })
        print('Final test accuracy = %.1f%%' % (test_accuracy * 100.0))
train_set = cky.get_set(INPUT_DATA[0], n_train_set)
validation_set = cky.get_set(INPUT_DATA[1], n_validation_set)
test_set = cky.get_set(INPUT_DATA[2], n_test_set)
#读取数据  train数据在训练过程中抓取Batch数量
with g2.as_default():
    validation_images, validation_labels = cky.get_data(
        validation_set, n_validation_set)
    test_images, test_labels = cky.get_data(test_set, n_test_set)

#print(tf.get_default_graph() is g1)
#print(test_labels)
#%%

#定义Inception3-v3模型
with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
    logits, _ = inception_v3.inception_v3(inputs=images, num_classes=N_CLASSES)

#%%

#定义交叉熵损失函数,注意在模型定义的时候已经将正则化损失加入了损失集合中
tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                logits,
                                weights=1.0)
#%%

#定义训练过程
train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(
    tf.losses.get_total_loss())

#%%
Ejemplo n.º 25
0
def main():
    # 加载预处理好的数据。
    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    n_training_example = len(training_images)
    training_labels = processed_data[1]
    validation_images = processed_data[2]
    validation_labels = processed_data[3]
    testing_images = processed_data[4]
    testing_labels = processed_data[5]
    print(
        "%d training examples, %d validation examples and %d testing examples."
        % (n_training_example, len(validation_labels), len(testing_labels)))

    # 定义inception-v3的输入,images为输入图片,labels为每一张图片
    # 对应的标签。
    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    # 定义inception-v3模型。因为谷歌给出的只有模型参数取值,所以这里
    # 需要在这个代码中定义inception-v3的模型结构。因为模型
    # 中使用到了dropout,所以需要定一个训练时使用的模型,一个测试时
    # 使用的模型。
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, num_classes=N_CLASSES)

        logits1, _ = inception_v3.inception_v3(images,
                                               num_classes=N_CLASSES,
                                               is_training=False,
                                               reuse=True)

        logits2, _ = inception_v3.inception_v3(images,
                                               num_classes=N_CLASSES,
                                               reuse=True)

    trainable_variables = get_trainable_variables()

    tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                    logits,
                                    weights=1.0)
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(
        tf.losses.get_total_loss())

    # 计算正确率。
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))

        correct_prediction1 = tf.equal(tf.argmax(logits1, 1), labels)
        evaluation_step1 = tf.reduce_mean(
            tf.cast(correct_prediction1, tf.float32))

        correct_prediction2 = tf.equal(tf.argmax(logits2, 1), labels)
        evaluation_step2 = tf.reduce_mean(
            tf.cast(correct_prediction2, tf.float32))

    load_fn = slim.assign_from_checkpoint_fn(CKPT_FILE,
                                             get_tuned_variables(),
                                             ignore_missing_vars=True)

    #
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # 初始化没有加载进来的变量。
        init = tf.global_variables_initializer()
        sess.run(init)

        # 加载谷歌已经训练好的模型。
        print('Loading tuned variables from %s' % CKPT_FILE)
        load_fn(sess)

        start = 0
        end = BATCH
        for i in range(STEPS):
            sess.run(train_step,
                     feed_dict={
                         images: training_images[start:end],
                         labels: training_labels[start:end]
                     })

            if i % 30 == 0 or i + 1 == STEPS:
                saver.save(sess, TRAIN_FILE, global_step=i)
                validation_accuracy = sess.run(
                    [evaluation_step, evaluation_step1, evaluation_step2],
                    feed_dict={
                        images: validation_images,
                        labels: validation_labels
                    })
                print('Step %d: Validation accuracy = %.1f%%' %
                      (i, validation_accuracy[0] * 100.0))
                print validation_accuracy

            start = end
            if start == n_training_example:
                start = 0

            end = start + BATCH
            if end > n_training_example:
                end = n_training_example

        # 在最后的测试数据上测试正确率。
        test_accuracy = sess.run(evaluation_step,
                                 feed_dict={
                                     images: testing_images,
                                     labels: testing_labels
                                 })
        print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
Ejemplo n.º 26
0
def main():
    # processed_data = np.load("preprocess/test_flower.npy", allow_pickle=True)
    # test_images = processed_data[0]
    # test_labels = processed_data[1]

    # load preprocessed data
    processed_data = np.load(INPUT_DATA, allow_pickle=True)
    training_images = processed_data[0]
    n_training_example = len(training_images)
    training_labels = processed_data[1]
    # np.save("preprocess/training_flower.npy", np.asarray([training_images, training_labels]))
    validation_images = processed_data[2]
    validation_labels = processed_data[3]
    # np.save("preprocess/validation_flower.npy", np.asarray([validation_images, validation_labels]))
    test_images = processed_data[4]
    test_labels = processed_data[5]
    # np.save("preprocess/test_flower.npy", np.asarray([test_images, test_labels]))

    print(
        "%d training examples, %d validation examples and %d testing examples."
        % (n_training_example, len(validation_labels), len(test_labels)))

    # define inputs
    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    # define model
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images,
                                              num_classes=N_CLASSES,
                                              is_training=False)
    # get trainable variable
    trainable_variables = get_trainable_variables()
    # define cross entropy
    tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                    logits,
                                    weights=1.0)
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(
        tf.losses.get_total_loss())

    # calc accuracy
    with tf.name_scope('evaluation'):
        prediction = tf.argmax(logits, 1)
        correct_answer = labels
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))

    # define func to load model
    load_fn = slim.assign_from_checkpoint_fn(CKPT_FILE,
                                             get_tuned_variables(),
                                             ignore_missing_vars=True)

    # define saver
    saver = tf.train.Saver()
    config = tf.ConfigProto(allow_soft_placement=True)
    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        # init
        init = tf.global_variables_initializer()
        sess.run(init)

        ckpt = tf.train.get_checkpoint_state(TRAIN_FILE)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            # load origin model
            print('loading tuned variables from %s' % CKPT_FILE)
            load_fn(sess)

        start = 0
        end = BATCH
        if TRAINING:
            for i in range(STEPS):
                sess.run(train_step,
                         feed_dict={
                             images: training_images[start:end],
                             labels: training_labels[start:end]
                         })

                if i % 20 == 0 or i + 1 == STEPS:
                    saver.save(sess, TRAIN_FILE, global_step=i)
                    validation_accuracy = sess.run(evaluation_step,
                                                   feed_dict={
                                                       images:
                                                       validation_images,
                                                       labels:
                                                       validation_labels
                                                   })
                    print('step %d: validation accuracy = %.1f%%' %
                          (i, validation_accuracy * 100.0))

                start = end
                if start == n_training_example:
                    start = 0

                end = start + BATCH
                if end > n_training_example:
                    end = n_training_example

            # test accuracy
            test_acccuracy = sess.run(evaluation_step,
                                      feed_dict={
                                          images: test_images,
                                          labels: test_labels
                                      })
            print('final test accuracy = %.1f%%' % (test_acccuracy * 100.0))
        else:
            while True:
                index = np.random.randint(0, len(test_labels) - 2)
                # test accuracy
                prediction_score, correct_answer_score = sess.run(
                    [prediction, correct_answer],
                    feed_dict={
                        images: test_images[index:index + 1],
                        labels: test_labels[index:index + 1]
                    })
                result = [(flower_label[x] + str(x)) for x in prediction_score]
                answer = [(flower_label[x] + str(x))
                          for x in correct_answer_score]
                # print(result)
                # print(answer)
                plt.imshow(test_images[index])
                print('test result: %s, correct answer: %s' % (result, answer))
                plt.show()
                time.sleep(3)
Ejemplo n.º 27
0
def main():
    #加载预处理好的数据
    processed_data = np.load(input_data)
    train_images = processed_data[0]
    n_training_examples = len(train_images)
    train_labels = processed_data[1]
    validation_images = processed_data[2]
    validation_labels = processed_data[3]
    test_images = processed_data[4]
    test_labels = processed_data[5]
    print(
        '%d training examples, %d validation examples and %d testing examples.'
        % (n_training_examples, len(validation_labels), len(test_labels)))

    #定义inception-v3 的输入,images为输入图片,labels为每一张图片对应的标签
    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    #定义inception-v3模型。因为谷歌给出的模型只有参数值,所以需要定义模型结构。
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, num_classes=n_classes)

    #获取需要训练的变量
    trainable_variables = get_trainable_variables()
    #定义交叉熵损失。注意在模型定义的时候已经将正则化损失加入损失集合了
    tf.losses.softmax_cross_entropy(tf.one_hot(labels, n_classes),
                                    logits,
                                    weights=1)
    #定义训练过程。这里minimize的过程中指定了需要优化的变量集合
    train_step = tf.train.RMSPropOptimizer(learning_rate).minimize(
        tf.losses.get_total_loss())

    #计算正确率
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))  #tf.cast转换成float32类型

    #定义加载模型的函数
    load_fn = slim.assign_from_checkpoint_fn(ckpt_file,
                                             get_tuned_variables(),
                                             ignore_missing_vars=True)

    #定义保存新的训练的好的模型
    saver = tf.train.Saver()  #声明tf.train.Saver类用于保存模型
    with tf.Session() as sess:
        #初始化没有加载进来的变量。
        #这个过程一定要在模型加载之前,否则初始化过程会将已经加载好的变量重新赋值
        init = tf.global_variables_initializer()
        sess.run(init)

        #加载谷歌已经训练好的模型
        print('Loaging tuned variable from %s' % ckpt_file)
        load_fn(sess)

        start = 0
        end = batch
        for i in range(steps):
            #运行训练过程,这里不会更新全部的参数,只会更新制定的部分参数
            sess.run(train_step,
                     feed_dict={
                         images: train_images[start:end],
                         labels: train_labels[start:end]
                     })

            #输出日志
            if i % 30 == 0 or i + 1 == steps:
                saver.save(sess, train_file, global_step=i)
                validation_accuracy = sess.run(evaluation_step,
                                               feed_dict={
                                                   images: validation_images,
                                                   labels: validation_labels
                                               })
                print('Step %d: Validation accuracy = %.1f%%' %
                      (i, validation_accuracy * 100.0))

            #因为数据预处理的时候已经做过了打乱数据的操作,所以这里只需要顺序使用训练集就好
            start = end
            if start == n_training_examples:
                start = 0

            end = start + batch
            if end > n_training_examples:
                end = n_training_examples

        #最后在测试集数据上使用正确率
        test_accuracy = sess.run(evaluation_step,
                                 feed_dict={
                                     images: test_images,
                                     labels: test_labels
                                 })
        print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
Ejemplo n.º 28
0
def main():
    # 在data_process.py中最后存储的是,这边依次取出
    # np.asarray([training_images, training_labels,
    #             validation_images, validation_labels,
    #             testing_images, testing_labels])
    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    n_training_example = len(training_images)
    print(n_training_example)
    training_labels = processed_data[1]

    validation_images = processed_data[2]
    validation_labels = processed_data[3]

    testing_images = processed_data[4]
    testing_labels = processed_data[5]
    print(
        "%d training examples, %d validation examples and %d testing examples."
        % (n_training_example, len(validation_labels), len(testing_labels)))

    # 定义inception-v3的输入,images为输入图片,labels为每一张图片对应的标签。
    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, num_classes=N_CLASSES)

    # 获取需要训练的变量
    trainable_variables = get_trainable_variables()

    tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                    logits=logits,
                                    weights=1.0)
    total_loss = tf.losses.get_total_loss()
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(total_loss)

    # 计算正确率。
    with tf.name_scope('evaluation'):
        correction_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(
            tf.cast(correction_prediction, tf.float32))

    # 定义加载Google训练好的Inception-v3模型的Saver。
    load_fn = slim.assign_from_checkpoint_fn(CKPT_FILE,
                                             get_tuned_variables(),
                                             ignore_missing_vars=True)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        # 初始化没有加载进来的变量。
        tf.global_variables_initializer().run()

        # 加载谷歌已经训练好的模型。
        print('Loading tuned variables from %s' % CKPT_FILE)
        load_fn(sess)

        start = 0
        end = BATCH
        for i in range(STEPS):
            _, loss = sess.run(
                [train_step, total_loss],
                feed_dict={
                    images: training_images[start:end],
                    labels: training_labels[start:end]
                })

            if i % 30 == 0 or i + 1 == STEPS:
                saver.save(sess, TRAIN_FILE, global_step=i)
                validation_accuracy = sess.run(evaluation_step,
                                               feed_dict={
                                                   images: validation_images,
                                                   labels: validation_labels
                                               })
                print(
                    'Step %d: Training loss is %.1f Validation accuracy = %.1f%%'
                    % (i, loss, validation_accuracy * 100.0))

            start = end
            if start == n_training_example:
                start = 0
            end = start + BATCH
            if end > n_training_example:
                end = n_training_example
        # 在最后的测试数据上测试正确率。
        test_accuracy = sess.run(evaluation_step,
                                 feed_dict={
                                     images: testing_images,
                                     labels: testing_labels
                                 })
        print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
Ejemplo n.º 29
0
def main():
    if not os.path.exists(TRAIN_FILE):
        os.makedirs(TRAIN_FILE)

    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    training_labels = processed_data[1]
    n_training_examples = len(training_labels)
    validation_images = processed_data[2]
    validation_labels = processed_data[3]
    testing_images = processed_data[4]
    testing_labels = processed_data[5]
    print('%d training numbers, %d validation numbers, %d testing numbers'
                    %(n_training_examples, len(validation_labels), len(testing_labels)))

    # 定义V3的输入
    images = tf.placeholder(tf.float32, [None, IMG_HEIGHT, IMG_WIDTH, CHANNELS], name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    #定义V3模型
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, num_classes=N_CLASSES, is_training=True)

    # 获取需要训练的变量
    trainable_variables = get_trainable_variables()   # minimize 中用到 trainable_variables
    tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES), logits, weights=1.0)
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(tf.losses.get_total_loss(), var_list=trainable_variables)

    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # code change here
    # exclude = CHECKPOINT_EXCLUDE_SCOPES.split(',')
    # tuned_var = slim.get_variables_to_restore(exclude=exclude)
    tuned_var = get_tuned_variables()
    load_fn = slim.assign_from_checkpoint_fn(
                        CKPT_FILE, tuned_var, ignore_missing_vars = True)  # ERROR: tensor shape not match

    saver = tf.train.Saver()
    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)   # 需要先初始化

        print('Loading tuned_variables from %s' % CKPT_FILE)
        load_fn(sess)

        start = 0
        end = BATCH
        for i in range(STEPS):
            print('Steps:', i)   # code test
            sess.run(train_step, feed_dict = {
                                        images:training_images[start:end],
                                        labels:training_labels[start:end]})

            # 输出日志
            if i % 30 == 0 or i+1 == STEPS:
                saver.save(sess, TRAIN_FILE, global_step=i)
                validation_accuracy = sess.run(evaluation_step, feed_dict={
                                         images:validation_images, labels:validation_labels})
                print('Step:%d, validation accuracy = %.lf%%'%(i, validation_accuracy*100))

            strat = end
            if start == n_training_examples:
                start = 0
            end = start + BATCH
            if end > n_training_examples:
                end = n_training_examples

        test_accuracy = sess.run(evaluation_step, feed_dict={
                                    images:testing_images, labels:testing_labels})
        print('the testing accuracy:%.lf%%' % (test_accuracy*100))
Ejemplo n.º 30
0
def main():
    # 加载预处理好的数据
    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    n_training_example = len(training_images)
    training_labels = processed_data[1]
    validation_images = processed_data[2]
    validation_labels = processed_data[3]
    testing_images = processed_data[4]
    testing_labels = processed_data[5]
    print(
        "%d training examples, %d validation examples and %d testing examples."
        % (n_training_example, len(validation_labels), len(testing_labels)))

    # 定义inception-v3的输入,images为输入图片,labels为每一张图片对应的标签
    images = tf.placeholder(tf.float32, [None, 399, 399, 3],
                            name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    # 定义inception-v3网络结构
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, num_classes=N_CLASSES)

    # 获取需要训练的变量
    training_variables = get_trainable_variables()
    # 定义交叉熵损失。注意在模型定义的时候已经将正则化损失加入损失集合了
    tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                    logits,
                                    weights=1.0)
    # 定义训练过程.
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(
        tf.losses.get_total_loss())

    # 计算正确率
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))

    # 定义加载模型的函数
    load_fn = slim.assign_from_checkpoint_fn(CKPT_FILE,
                                             get_tuned_variables(),
                                             ignore_missing_vars=True)

    # 保存新模型
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # 初始化没有加载进来的变量。一定要在模型加载之前初始化,否则初始化会影响加载进来的模型。
        init = tf.global_variables_initializer()
        sess.run(init)

        # 加载谷歌训练好的模型
        print('Loading tuned variables from %s' % CKPT_FILE)
        load_fn(sess)

        start = 0
        end = BATCH
        for i in range(STEPS):
            # 运行训练过程,这里不会更新全部参数,只会更新制定的部分参数
            _, loss = sess.run(train_step,
                               feed_dict={
                                   images: training_images[start:end],
                                   labels: training_labels[start:end]
                               })
            # 输出日志
            if i % 30 == 0 or i + 1 == STEPS:
                saver.save(sess, TRAIN_FILE, global_step=i)

                validation_accuracy = sess.run(evaluation_step,
                                               feed_dict={
                                                   images: validation_images,
                                                   labels: validation_labels
                                               })
                print(
                    'Step %d: Training loss is %.1f Validation accuracy = %.1f%%'
                    % (i, loss, validation_accuracy * 100.0))

            start = end
            if start == n_training_example:
                start = 0

            end = start + BATCH
            if end > n_training_example:
                end = n_training_example

    # 在最后的测试数据上测试正确率。
    test_accuracy = sess.run(evaluation_step,
                             feed_dict={
                                 images: testing_images,
                                 labels: testing_labels
                             })
    print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
Ejemplo n.º 31
0
    def build_inception_graph(self):
        """Builds an inception graph and add the necessary input & output tensors.

      To use other Inception models modify this file. Also preprocessing must be
      modified accordingly.

      See tensorflow/contrib/slim/python/slim/nets/inception_v3.py for
      details about InceptionV3.

    Returns:
      input_jpeg: A placeholder for jpeg string batch that allows feeding the
                  Inception layer with image bytes for prediction.
      inception_embeddings: The embeddings tensor.
    """

        # These constants are set by Inception v3's expectations.
        height = 299
        width = 299
        channels = 3

        image_str_tensor = tf.placeholder(tf.string, shape=[None])

        # The CloudML Prediction API always "feeds" the Tensorflow graph with
        # dynamic batch sizes e.g. (?,).  decode_jpeg only processes scalar
        # strings because it cannot guarantee a batch of images would have
        # the same output size.  We use tf.map_fn to give decode_jpeg a scalar
        # string from dynamic batches.
        def decode_and_resize(image_str_tensor):
            """Decodes jpeg string, resizes it and returns a uint8 tensor."""
            image = tf.image.decode_jpeg(image_str_tensor, channels=channels)
            # Note resize expects a batch_size, but tf_map supresses that index,
            # thus we have to expand then squeeze.  Resize returns float32 in the
            # range [0, uint8_max]
            image = tf.expand_dims(image, 0)
            image = tf.image.resize_bilinear(image, [height, width],
                                             align_corners=False)
            image = tf.squeeze(image, squeeze_dims=[0])
            image = tf.cast(image, dtype=tf.uint8)
            return image

        image = tf.map_fn(decode_and_resize,
                          image_str_tensor,
                          back_prop=False,
                          dtype=tf.uint8)
        # convert_image_dtype, also scales [0, uint8_max] -> [0 ,1).
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)

        # Then shift images to [-1, 1) for Inception.
        # Try-except to make the code compatible across sdk versions
        try:
            image = tf.subtract(image, 0.5)
            image = tf.multiply(image, 2.0)
        except AttributeError:
            image = tf.sub(image, 0.5)
            image = tf.mul(image, 2.0)

        # Build Inception layers, which expect A tensor of type float from [-1, 1)
        # and shape [batch_size, height, width, channels].
        with slim.arg_scope(inception.inception_v3_arg_scope()):
            _, end_points = inception.inception_v3(image, is_training=False)

        inception_embeddings = end_points['PreLogits']
        inception_embeddings = tf.squeeze(inception_embeddings, [1, 2],
                                          name='SpatialSqueeze')
        return image_str_tensor, inception_embeddings
Ejemplo n.º 32
0
def train_save_and_test_model():

    # Loading datasets
    print("Loading dataset.")
    dataset_np = np.load(INPUT_DATA)
    training_images, training_labels, num_training_images = dataset_np[
        0], dataset_np[1], len(dataset_np[0])
    validation_images, validation_labels, num_validation_images = dataset_np[
        2], dataset_np[3], len(dataset_np[2])
    testing_images, testing_labels, num_testing_images = dataset_np[
        4], dataset_np[5], len(dataset_np[4])
    print(
        "Loading dataset successfully: %s training images, %s validation images and %s testing images!"
        % (num_training_images, num_validation_images, num_testing_images))

    # Create an inception-v3 model
    print("Start creating an inception-v3 model.")
    input_images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                                  name="input_images")
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()
                        ):  # Default InceptionV3 arg scope
        logits, _ = inception_v3.inception_v3(input_images,
                                              num_classes=NUM_CLASSES)
    # All l2-regularization has been added to the collection of tf.GraphKeys.REGULARIZATION_LOSSES when the
    # inception-v3 be created. You can print them by the following line.
    # print(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

    # Add graph for training
    labels_true = tf.placeholder(tf.int32, [None], name='labels_true')
    with tf.variable_scope("Loss"):
        softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy(
            tf.one_hot(labels_true, NUM_CLASSES), logits)
        loss = tf.add(
            softmax_cross_entropy_loss,
            tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)),
            name="loss")

    # Adding graph for calculating acuuracy
    with tf.variable_scope("Accuracy"):
        correct_predictions = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32),
                                       labels_true)
        accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))

    # Create an optimizer
    train_step_op = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(loss)

    # Create log for visualization by tensorboard
    print("Creating the inception-v3 model successfuly!")
    log_writer = tf.summary.FileWriter(LOG_DIR, graph=tf.get_default_graph())
    log_writer.close()

    # Training, evaluating and saving model
    saver = tf.train.Saver(max_to_keep=MAX_TO_KEEP)
    with tf.Session() as sess:

        init_op = tf.global_variables_initializer()
        sess.run(init_op)

        # normalize dataset
        training_images = normalize_features(training_images, is_train=True)
        validation_images = normalize_features(validation_images,
                                               is_train=False)
        testing_images = normalize_features(testing_images, is_train=False)

        # print all trainable variables
        print("The trainable variables are as follows:")
        for trainable_variable in tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES):
            print(trainable_variable)

        print("Start training inception-v3.")
        best_accuracy = 0.0
        best_step = 0
        for i in range(1, TRAINING_STEPS + 1):

            images_batch, labels_batch = random_get_batch_data(
                training_images, training_labels, batch_size=BATCH_SIZE)

            sess.run(train_step_op,
                     feed_dict={
                         input_images: images_batch,
                         labels_true: labels_batch,
                     })

            if i % PRINT_EVERY_STEPS == 0 or i + 1 == TRAINING_STEPS:

                validation_accuacy = sess.run(accuracy,
                                              feed_dict={
                                                  input_images:
                                                  validation_images,
                                                  labels_true:
                                                  validation_labels,
                                              })
                now_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
                print("[%s] Step %d: Validation accuracy = %.3f%%" %
                      (now_time, i, validation_accuacy * 100))

                if validation_accuacy >= best_accuracy:
                    best_accuracy = validation_accuacy
                    best_step = i
                    print("It is the best model until now, and will be saved!")
                    saver.save(sess, CKPT_FILE, global_step=best_step)
        print("End training.")

        # Finally, testing the best model on the tesing datset
        saver.restore(sess, CKPT_FILE + '-%d' % (best_step))
        validation_accuacy = sess.run(accuracy,
                                      feed_dict={
                                          input_images: testing_images,
                                          labels_true: testing_labels,
                                      })
        print(
            "Final test accuracy = %.3f%% (from the best model at %d step(s))."
            % (validation_accuacy * 100, best_step))
Ejemplo n.º 33
0
def main():
    # 加载预处理好的数据。
    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    n_training_example = len(training_images)
    training_labels = processed_data[1]

    validation_images = processed_data[2]
    validation_labels = processed_data[3]

    testing_images = processed_data[4]
    testing_labels = processed_data[5]
    print(
        "%d training examples, %d validation examples and %d testing examples."
        % (n_training_example, len(validation_labels), len(testing_labels)))

    # 定义inception-v3的输入,images为输入图片,labels为每一张图片对应的标签。
    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    # 定义inception-v3模型。因为谷歌给出的只有模型参数取值,所以这里
    # 需要在这个代码中定义inception-v3的模型结构。虽然理论上需要区分训练和
    # 测试中使用到的模型,也就是说在测试时应该使用is_training=False,但是
    # 因为预先训练好的inception-v3模型中使用的batch normalization参数与
    # 新的数据会有出入,所以这里直接使用同一个模型来做测试。
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images,
                                              num_classes=N_CLASSES,
                                              is_training=True)

    trainable_variables = get_trainable_variables()
    # 定义损失函数和训练过程。
    tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                    logits,
                                    weights=1.0)
    total_loss = tf.losses.get_total_loss()
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(total_loss)

    # 计算正确率。
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))

    # 定义加载Google训练好的Inception-v3模型的Saver。
    load_fn = slim.assign_from_checkpoint_fn(CKPT_FILE,
                                             get_tuned_variables(),
                                             ignore_missing_vars=True)

    # 定义保存新模型的Saver。
    saver = tf.train.Saver()

    with tf.Session() as sess:
        # 初始化没有加载进来的变量。
        init = tf.global_variables_initializer()
        sess.run(init)

        # 加载谷歌已经训练好的模型。
        print('Loading tuned variables from %s' % CKPT_FILE)
        load_fn(sess)

        start = 0
        end = BATCH
        for i in range(STEPS):
            _, loss = sess.run(
                [train_step, total_loss],
                feed_dict={
                    images: training_images[start:end],
                    labels: training_labels[start:end]
                })

            if i % 30 == 0 or i + 1 == STEPS:
                saver.save(sess, TRAIN_FILE, global_step=i)

                validation_accuracy = sess.run(evaluation_step,
                                               feed_dict={
                                                   images: validation_images,
                                                   labels: validation_labels
                                               })
                print(
                    'Step %d: Training loss is %.1f Validation accuracy = %.1f%%'
                    % (i, loss, validation_accuracy * 100.0))

            start = end
            if start == n_training_example:
                start = 0

            end = start + BATCH
            if end > n_training_example:
                end = n_training_example

        # 在最后的测试数据上测试正确率。
        test_accuracy = sess.run(evaluation_step,
                                 feed_dict={
                                     images: testing_images,
                                     labels: testing_labels
                                 })
        print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
Ejemplo n.º 34
0
def main(argv=None):
    input_data = '/Users/yxd/Downloads/flower_photos.npy'
    save_model_path = '/tmp/flower_model'
    # download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz
    ckpt_file = '/Users/yxd/Downloads/inception_v3.ckpt'

    learning_rate = 0.00001
    epochs = 10
    batch_size = 32
    classes = 5

    exclude_scopes = ['InceptionV3/Logits', 'InceptionV3/AuxLogits']
    trainable_scopes = ['InceptionV3/Logits', 'InceptionV3/AuxLogits']

    #debug
    for k, v in inception_v3.inception_v3_arg_scope().items():
        print(k)
        for k2, v2 in v.items():
            print("  " + k2)
            if isinstance(v2, dict):
                for k3, v3 in v2.items():
                    print("  " + k3 + ": " + str(v3))
            else:
                print(str(v2))
    npy_data = np.load(input_data)
    training_iamges = np.array(npy_data[0])
    training_labels = np.array(npy_data[1])
    validation_images = np.array(npy_data[2])
    validation_labels = np.array(npy_data[3])
    testing_images = np.array(npy_data[4])
    testing_labels = np.array(npy_data[5])
    n_training = len(training_iamges)
    print(np.array(training_iamges).shape)
    print(validation_images.shape)
    print(testing_images.shape)
    print(training_labels.shape)
    print(validation_labels.shape)
    print(testing_labels.shape)

    images = tf.placeholder(tf.float32, (None, 299, 299, 3),
                            name="input-images")
    labels = tf.placeholder(tf.int64, (None, ), name="input-labels")

    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, num_classes=classes)
    trainable_variables = get_trainable_variables(trainable_scopes)
    tf.losses.softmax_cross_entropy(tf.one_hot(labels, classes),
                                    logits,
                                    weights=1.0)
    optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(
        tf.losses.get_total_loss())

    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    load_fn = slim.assign_from_checkpoint_fn(
        ckpt_file,
        get_tuned_variables(exclude_scopes),
        ignore_missing_vars=True)

    saver = tf.train.Saver()

    with tf.Session() as s:
        tf.global_variables_initializer().run()
        print("loading tuned variables from %s" % ckpt_file)
        load_fn(s)
        global_step = 0
        for epoch in range(epochs):
            start = 0
            while start < training_iamges.shape[0]:
                end = min(start + batch_size, training_iamges.shape[0])
                s.run(optimizer,
                      feed_dict={
                          images: training_iamges[start:end],
                          labels: training_labels[start:end]
                      })
                global_step += 1
                start = end
                print("step " + str(global_step))
                if global_step % 5 == 0:
                    saver.save(s, save_model_path, global_step=global_step)
                    validation_acc = s.run(acc,
                                           feed_dict={
                                               images: validation_images,
                                               labels: validation_labels
                                           })
                    print("step %d:validation acc %f" %
                          (global_step, validation_acc))
            test_acc = s.run(acc,
                             feed_dict={
                                 images: testing_images,
                                 labels: testing_labels
                             })
            print("epoch %d:testing acc %f" % (epoch, test_acc))
Ejemplo n.º 35
0
def freeze_graph(model_dir, output_node_names):
    """Extract the sub graph defined by the output nodes and convert
    all its variables into constant

    Args:
        model_dir: the root folder containing the checkpoint state file
        output_node_names: a string, containing all the output node's names,
                            comma separated
    """
    if not tf.gfile.Exists(model_dir):
        raise AssertionError(
            "Export directory doesn't exists. Please specify an export "
            "directory: %s" % model_dir)

    if not output_node_names:
        print("You need to supply the name of a node to --output_node_names.")
        return -1

    # We retrieve our checkpoint fullpath
    checkpoint = tf.train.get_checkpoint_state(model_dir)
    input_checkpoint = checkpoint.model_checkpoint_path

    # We precise the file fullname of our freezed graph
    absolute_model_dir = "/".join(input_checkpoint.split('\\')[:-1])
    output_graph = absolute_model_dir + "/frozen_model.pb"

    # We clear devices to allow TensorFlow to control on which device it will load operations
    clear_devices = True

    # build graph

    x = tf.placeholder(dtype=tf.float32, shape=[1, 542, 718, 3], name='input')
    y = tf.placeholder(dtype=tf.float32, shape=[1, 2], name='label')

    x_preprocessed = preprocess(x)

    net, endpoints = inception_v3.inception_v3(inputs=x_preprocessed,
                                               num_classes=2,
                                               is_training=True,
                                               dropout_keep_prob=0.8)

    print(endpoints['Predictions'].graph == tf.get_default_graph())

    print("PREDICTION NAME : ", endpoints['Predictions'])

    saver = tf.train.Saver()

    # We start a session using a temporary fresh Graph
    with tf.Session(graph=endpoints['Predictions'].graph) as sess:
        # We import the meta graph in the current default Graph
        sess.run(tf.global_variables_initializer())

        # We restore the weights
        print(input_checkpoint)
        saver.restore(sess, input_checkpoint)

        # We use a built-in TF helper to export variables to constants
        output_graph_def = tf.graph_util.convert_variables_to_constants(
            sess,  # The session is used to retrieve the weights
            tf.get_default_graph().as_graph_def(
            ),  # The graph_def is used to retrieve the nodes
            output_node_names.split(
                ","
            )  # The output node names are used to select the usefull nodes
        )

        # output_graph ="./frozen.pb"
        #
        # # Finally we serialize and dump the output graph to the filesystem
        # print("output path: {}".format(output_graph))
        # with tf.gfile.GFile(output_graph, "wb") as f:
        #     f.write(output_graph_def.SerializeToString())
        print("%d ops in the final graph." % len(output_graph_def.node))

        #optimizing graph

        # input_graph_def = tf.GraphDef()
        #
        # with tf.gfile.Open(output_graph, "r") as f:
        #     data = f.read()
        #     input_graph_def.ParseFromString(data)

        output_graph_def_opt = optimize_for_inference_lib.optimize_for_inference(
            output_graph_def,
            ["input"],  # an array of the input node(s)
            [output_node_names],  # an array of output nodes
            tf.float32.as_datatype_enum)

        # Save the optimized graph
        output_graph_opt_path = "./frozen.pb"  # output_graph.split(".")[0] + "_opt" + "." +  output_graph.split(".")[-1]
        with tf.gfile.FastGFile(output_graph_opt_path, "w") as f:
            f.write(output_graph_def_opt.SerializeToString())

    return output_graph_def
# 加载数据进行操作
# shuffle the data
# state= np.random.get_state()
# np.random.shuffle(images_array)
# np.random.set_state(state)X
# np.random.shuffle(labels)

#定义输入与输出X
X = tf.placeholder(dtype=tf.float32,
                   shape=[None, 299, 299, 3],
                   name="input_image")
scores = tf.placeholder(dtype=tf.float32, shape=[None], name="scores")

with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
    logits, _ = inception_v3.inception_v3(X, num_classes=classes)

#获得trainable data
trainable_variables = get_trainable_variables()
#使用cross_entropy softmax
'''定义losses'''
tf.losses.softmax_cross_entropy(tf.one_hot(redefined_labels_index, classes),
                                logits,
                                weights=1.0)
'''定义优化'''
train_step = tf.train.RMSPropOptimizer(learning_rate).minimize(
    tf.losses.get_total_loss())
'''计算正确率'''

with tf.name_scope('evaluation'):
    correct_prediction = tf.equal(tf.argmax(logits, 1), redefined_labels_index)
Ejemplo n.º 37
0
def main():
    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    n_training_example = len(training_images)
    training_labels = processed_data[1]

    validation_images = processed_data[2]
    validation_labels = processed_data[3]

    testing_images = processed_data[4]
    testing_labels = processed_data[5]
    print(
        "%d training examples,%d validation examples and %d testing examples."
        % (n_training_example, len(validation_labels), len(testing_labels)))

    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name='input_image')
    labels = tf.placeholder(tf.int64, [None], name='labels')
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images,
                                              num_classes=N_CLASSES,
                                              is_training=True)
    trainable_variables = get_trainable_variables()
    tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                    logits,
                                    weights=1.0)
    total_loss = tf.losses.get_total_loss()
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(total_loss)

    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))

    load_fn = slim.assign_from_checkpoint_fn(CKPT_FILE,
                                             get_tuned_variables(),
                                             ignore_missing_vars=True)

    saver = tf.train.Saver()

    with tf.Session as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        print("loading tuned variables from %s" % CKPT_FILE)
        load_fn(sess)

        start = 0
        end = BATCH
        for i in range(STEPS):
            _, loss = sess.run(
                [train_step, total_loss],
                feed_dict={
                    images: training_images[start:end],
                    labels: training_labels[start:end]
                })
            if i % 30 == 0 or i + 1 == STEPS:
                saver.save(sess, TRAIN_FILE, global_step=i)
                validation_accuracy = sess.run(evaluation_step,
                                               feed_dict={
                                                   images: validation_images,
                                                   labels: validation_labels
                                               })
                print(
                    "Step %d :Training loss is %.1f Validation accuracy = %.1f%%"
                    % (i, loss, validation_accuracy * 100.0))

            start = end
            if start == n_training_example:
                start = 0

            end = start + BATCH
            if end > n_training_example:
                end = n_training_example

            test_accuracy = sess.run(evaluation_step,
                                     feed_dict={
                                         images: testing_images,
                                         labels: testing_labels
                                     })
            print("final test accuracy = %.1f%%" % (test_accuracy * 100))
Ejemplo n.º 38
0
def cnn_model_fn(features,labels,mode,params):
    """

    :param features:
    :param labels:
    :param mode:
    :param params:
    :return:
    """
    logits_name = 'preditions'
    labels = tf.one_hot(labels,params["num_classes"])

    # 导入onception_v3模型
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits,_ = inception_v3.inception_v3(inputs=features["image"],
                                             num_classes=params.get("num_classes"))

    # 预测
    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions= {
            "class":tf.argmax(logits,1),
            "probabilities":tf.nn.softmax(logits,name="softmax_tensor")
        }
        return tf.estimator.EstimatorSpec(
            mode=mode,
            predictions=predictions
        )

    # 获取需要训练的变量
    # trainable_variables = get_trainable_variable()
    # 定义损失
    loss = tf.losses.softmax_cross_entropy(onehot_labels=labels,logits=logits,weights=1.0)

    train_accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits,1),tf.argmax(labels,1)),tf.float32))
    tf.summary.scalar("loss",loss)
    tf.summary.scalar("train_accuracy",train_accuracy)
    # 定义模型保存钩子
    chpt_hook = tf.train.CheckpointSaverHook(
        checkpoint_dir = "./model/inception_v3/",
        save_secs=None,
        save_steps=20,
        saver=None,
        checkpoint_basename="trained_inception_v3_model.ckpt",
        scaffold=None,
        listeners=None
    )
    # tf.train.Scaffold()

    # 日志钩子
    train_summary_hook = tf.train.SummarySaverHook(
        save_steps=20,
        save_secs=None,
        output_dir=LOG_DIR,
        summary_writer=None,
        scaffold=None,
        summary_op=tf.summary.merge_all()
    )
    # 训练过程
    if mode == tf.estimator.ModeKeys.TRAIN:
        # 加载已有的存档点参数

        # print("restoring base ckpt")
        # exclusions = [scope.strip() for scope in CHECKPOINT_EXCLUDE_SCOPES.split(",")]
        # variables_to_restore = slim.get_variables_to_restore(exclude=exclusions)
        # # tf.train.init_from_checkpoint(MODEL_FILE,{v.name.split(":"):v for v in variables_to_restore})
        # tf.train.init_from_checkpoint(MODEL_FILE, {v.name.split(':')[0]: v for v in variables_to_restore})
        #
        # # tf.train.init_from_checkpoint(
        # #     ckpt_dir_or_file=MODEL_FILE,
        # #     assignment_map={params.get("checkpoint_scope"): params.get("checkpoint_scope")}  # 'OptimizeLoss/':'OptimizeLoss/'
        # # )
        #
        # print("restored base ckpt")

        # 训练
        global_step = tf.train.get_or_create_global_step()
        optimizer = tf.train.AdamOptimizer(params.get("learning_rate",0.0001))
        train_op = optimizer.minimize(loss=tf.losses.get_total_loss(),global_step=global_step)

        return tf.estimator.EstimatorSpec(
            mode=mode,
            train_op=train_op,
            loss=tf.losses.get_total_loss(),
            training_chief_hooks=[params.get("ckpt")], # 加载模型
            training_hooks=[chpt_hook,train_summary_hook] # 保存模型
        )

    eval_metric_ops = {
        'accuracy': tf.metrics.accuracy(labels=tf.argmax(labels, 1),
                                        predictions=tf.argmax(input=logits, axis=1),
                                        name='accuracy')
    }
    return tf.estimator.EstimatorSpec(
        mode=mode,
        loss=loss,
        eval_metric_ops=eval_metric_ops
    )
Ejemplo n.º 39
0
from tfoptests import persistor
from model_zoo.inception_v3 import save_dir, get_input

height = 299
width = 299
channels = 3

# Create graph
X = tf.placeholder(tf.float32,
                   shape=[None, height, width, channels],
                   name="input")
my_feed_dict = {}
my_feed_dict[X] = get_input("input")
with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
    net, end_points = inception_v3.inception_v3(X, num_classes=1001)
logits = end_points['Logits']
output = tf.nn.softmax(logits, name="output")
all_saver = tf.train.Saver()

# Execute graph
with tf.Session() as sess:
    all_saver.restore(
        sess,
        "/Users/susaneraly/SKYMIND/TFImport/TF_SOURCE_CODE/downloads_from_slim/inception_v3/inception_v3.ckpt"
    )
    prediction = output.eval(feed_dict=my_feed_dict)
    print prediction
    print prediction.shape
    print(np.sort(prediction.ravel()))
    tf.train.write_graph(
Ejemplo n.º 40
0
def main():
    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    n_training_example = len(training_images)
    training_labels = processed_data[1]
    validation_images = processed_data[2]
    validation_labels = processed_data[3]
    testing_images = processed_data[4]
    testing_labels = processed_data[5]
    print("%d training examples, %d validation examples and %d"
          "testing examples." % (
          n_training_example, len(validation_labels), len(testing_labels)))

    # 定义inception-v3的输入
    images = tf.placeholder(tf.float32, [None, 299, 299, 3], 'input_images')
    labels = tf.placeholder(tf.int64, [None], 'labels')

    # 定义inveption-v3的模型。直接从ckpt读取的只是参数值,所以需要定义一个对应模
    # 型。因为预先训练好的inception-v3模型中使用的batch normalization参数与新的
    # 数据会有差异,导致结果很差,所以这里直接使用同一个模型来进行测试
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, N_CLASSES)

    # 获取需要训练的变量
    trainable_variables = get_trainable_variables()

    # 定义交叉熵损失。注意在模型定义的时候已经将正则化损失加入损失集合了
    tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES), logits)

    # 定义训练过程,这里minimize的过程中指定了需要优化的变量集合
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(
        tf.losses.get_total_loss())

    # 计算正确率
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))

    # 定义加载模型的函数
    load_fn = slim.assign_from_checkpoint_fn(CKPT_FILE, get_tuned_variables(),
                                             True)

    # 定义保存新的训练好的模型的函数
    saver = tf.train.Saver()
    with tf.Session(config=config) as sess:
        # 在加载变量之前初始化所有变量,否则会把加载进来的变量重新赋值
        init = tf.global_variables_initializer()
        sess.run(init)

        # 加载已训练好的模型
        print("Loading tuned variables from %s" % CKPT_FILE)
        load_fn(sess)

        start = 0
        end = BATCH
        for i in range(STEPS):
            # 运行训练过程,这里不会更新全部的参数,只会更新指定的部分参数
            sess.run(train_step, {images: training_images[start:end],
                                  labels: training_labels[start:end]})

            # 输出日志
            if i % 30 == 0 or i + 1 == STEPS:
                saver.save(sess, TRAIN_FILE, i)
                validation_accuracy = sess.run(evaluation_step,
                                               {images: validation_images,
                                                labels: validation_labels})
                print("Step %d: Validation accuracy = %.1f%%" % (
                    i, validation_accuracy * 100.0))

            # 因为在数据预处理的时候已经打乱了数据,所以这里只需要顺序使用训练
            # 数据即可,不必使用next_batch
            start = end
            if start == n_training_example:
                start = 0
            end = min(start + BATCH, n_training_example)

        # 训练结束后再测试集上测试正确率
        test_accuracy = sess.run(evaluation_step, {images: testing_images,
                                                   labels: testing_labels})
        print("Final test accuracy = %.1f%%" % (test_accuracy * 100.0))
Ejemplo n.º 41
0
checkpoint_file = '/home/jingang/Downloads/inception_V3/inception_v3.ckpt'

with tf.Session() as sess:
    decode_jpeg = tf.image.decode_jpeg("jpeginput", channels=3)
    if decode_jpeg.dtype != tf.float32:
        decode_jpeg = tf.image.convert_image_dtype(decode_jpeg,
                                                   dtype=tf.float32)
    image_ = tf.expand_dims(decode_jpeg, 0)
    image = tf.image.resize_bicubic(image_, [299, 299], align_corners=True)
    scaled_input_tensor = tf.scalar_mul((1.0 / 255), image)
    scaled_input_tensor = tf.subtract(scaled_input_tensor, 0.5)
    scaled_input_tensor = tf.multiply(scaled_input_tensor, 2.0)

    arg_scope = inception_v3.inception_v3_arg_scope()
    with slim.arg_scope(arg_scope):
        logits, end_points = inception_v3.inception_v3(
            inputs=scaled_input_tensor, is_training=False, num_classes=1001)
    save = tf.train.Saver()
    save.restore(sess, checkpoint_file)

    output_filename = '/home/jingang/Downloads/inception_V3/inception-v3-retrain.pb'
    graph = sess.graph
    output_graph_def = graph_util.convert_variables_to_constants(
        sess, graph.as_graph_def(), ['InceptionV3/Predictions/Reshape_1'])
    with gfile.FastGFile(output_filename, 'wb') as f:
        f.write(output_graph_def.SerializeToString())
'''
decode_jpeg = tf.image.decode_jpeg("jpeginput", channels=3)
if decode_jpeg.dtype != tf.float32:
  decode_jpeg = tf.image.convert_image_dtype(decode_jpeg, dtype=tf.float32)
image_ = tf.expand_dims(decode_jpeg, 0)
image = tf.image.resize_bicubic(image_, [299, 299], align_corners=True)
Ejemplo n.º 42
0
def main(argv=None):
    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    n_training_example = len(training_images)
    training_labels = processed_data[1]
    validation_images = processed_data[2]
    validation_labels = processed_data[3]
    testing_images = processed_data[4]
    testing_labels = processed_data[5]
    print(
        "%d training examples, %d validation examples and %d testing examples."
        % (n_training_example, len(validation_images), len(testing_images)))

    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    #定义inception-v3模型,因为谷歌给出的只有模型参数取值,因此需要自己定义模型结构
    #虽然理论上需要区分训练和测试中使用的模型,也就是说在测试时应该使用is_training = False, 但是因为预先训练好的inception-v3模型使用的batch_normalization参数与新数据会有差异,导致结果很差,所以这里直接使用同一个模型进行测试
    #toask 这一问题详细解释见:https://github.com/tensorflow/models/issues/1314
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images,
                                              num_classes=N_CLASSES)  #toget

        trainable_variables = get_trainble_variables()

        tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                        logits,
                                        weights=1.0)  #toget

        train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(
            tf.losses.get_total_loss())

        with tf.name_scope('evaluation'):
            correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
            evaluation_step = tf.reduce_mean(
                tf.cast(correct_prediction, tf.float32))

        #定义加载模型的函数
        load_fn = slim.assign_from_checkpoint_fn(CKPT_FILE,
                                                 get_tuned_variables(),
                                                 ignore_missing_vars=True)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)

            print('Loading tuned varaibles from %s ' % CKPT_FILE)
            load_fn(sess)  #toget

            start = 0
            end = BATCH
            for i in range(STEPS):
                sess.run(train_step,
                         feed_dict={
                             images: training_images[start:end],
                             labels: training_images[start:end]
                         })

                if i % 30 == 0 or i + 1 == STEPS:
                    saver.save(sess, TRAIN_FILE, global_step=i)
                    validation_accuracy = sess.run(evaluation_step,
                                                   feed_dict={
                                                       images:
                                                       validation_images,
                                                       labels:
                                                       validation_labels
                                                   })
                    print('Step %d: Validation accuracy = %.1f%%' %
                          (i, validation_accuracy * 100.0))

                start = end
                if start == n_training_example:
                    start = 0
                end = start + BATCH
                if end > n_training_example:
                    end = n_training_example

            test_accuracy = sess.run(evaluation_step,
                                     feed_dict={
                                         images: testing_images,
                                         labels: testing_labels
                                     })
            print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
Ejemplo n.º 43
0
def main():
    # load data
    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    n_training_example = len(training_images)
    training_labels = processed_data[1]
    validation_images = processed_data[2]
    validatioan_labels = processed_data[3]
    testing_images = processed_data[4]
    testing_labels = processed_data[5]

    print "%d training examples, %d validation examples and %d examples." % \
          (n_training_example, len(validatioan_labels), len(testing_labels))

    # define inception-v3 input
    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    # define inception-v3 model
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, num_classes=N_CLASSES)

    # get trainable variables
    trainable_variables = get_trainable_variables()

    # define cross entropy.
    tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                    logits,
                                    weights=1.0)

    # define training process.
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(
        tf.losses.get_total_loss())

    # calc accuracy
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32))

    # define model load function
    load_fn = slim.assign_from_checkpoint_fn(CKPT_FILE,
                                             get_tuned_variables(),
                                             ignore_missing_vars=True)

    # define function to save trained model
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # initial variables
        init = tf.global_variables_initializer()
        sess.run(init)

        # load trained google model
        print 'loading tuned variables from {}'.format(CKPT_FILE)
        load_fn(sess)

        start = 0
        end = BATCH

        validation_images_value = []
        print "Validation set num is: {}.".format(len(validation_images))
        for file_name in validation_images:
            # print "Get validation set {} value.".format(file_name)
            image_raw_data = gfile.FastGFile(file_name, 'rb').read()
            image = tf.image.decode_jpeg(image_raw_data)
            if image.dtype != tf.float32:
                image = tf.image.convert_image_dtype(image, dtype=tf.float32)
            image = tf.image.resize_images(image, [299, 299])
            image_value = sess.run(image)
            validation_images_value.append(image_value)

        for i in xrange(STEPS):
            # transfer picture to 299*299, so that inception-v3 can process
            # it.
            training_images_value = []
            for file_name in training_images[start:end]:
                # print "Get training set {} value.".format(file_name)
                image_raw_data = gfile.FastGFile(file_name, 'rb').read()
                image = tf.image.decode_jpeg(image_raw_data)
                if image.dtype != tf.float32:
                    image = tf.image.convert_image_dtype(image,
                                                         dtype=tf.float32)
                image = tf.image.resize_images(image, [299, 299])
                image_value = sess.run(image)
                training_images_value.append(image_value)

            sess.run(train_step,
                     feed_dict={
                         images: training_images_value,
                         labels: training_labels[start:end]
                     })

            print "Training {} steps finished.".format(i)

            if i % 50 == 0 or i + 1 == STEPS:
                saver.save(sess, TRAIN_FILE, global_step=i)
                validation_accuracy = sess.run(evaluation_step,
                                               feed_dict={
                                                   images:
                                                   validation_images_value,
                                                   labels: validatioan_labels
                                               })
                print "Step %d: validation accuracy = %.1f%%" % \
                      (i, validation_accuracy * 100.0)

            start = end
            if start == n_training_example:
                start = 0
            end = start + BATCH

            if end > n_training_example:
                end = n_training_example

        testing_images_value = []
        for file_name in testing_images:
            # print "Get testing set {} value.".format(file_name)
            image_raw_data = gfile.FastGFile(file_name, 'rb').read()
            image = tf.image.decode_jpeg(image_raw_data)
            if image.dtype != tf.float32:
                image = tf.image.convert_image_dtype(image, dtype=tf.float32)
            image = tf.image.resize_images(image, [299, 299])
            image_value = sess.run(image)
            testing_images_value.append(image_value)

        test_accuracy = sess.run(evaluation_step,
                                 feed_dict={
                                     images: testing_images_value,
                                     labels: testing_labels
                                 })

        print "Final test accuracy = %.1f%%" % (test_accuracy * 100)
Ejemplo n.º 44
0
validation_x = processed_data[2]
validation_y = processed_data[3]

testing_x = processed_data[4]
testing_y = processed_data[5]
print("traing sample:%d,Validation sample %d,Test sample %d"\
      %(n_train,len(validation_x),len(testing_x)))
with tf.name_scope("input"):
    images = tf.placeholder(tf.float32, [None, 299, 299, 3], name="image")

    label = tf.placeholder(tf.int64, [None], name='label')

with tf.name_scope("train"):
    with slim.arg_scope(inception.inception_v3_arg_scope()):
        logits, endpoint = inception.inception_v3(images,
                                                  num_classes=2,
                                                  is_training=True)
    trainable_variables = get_trainable_variables()

    xentropy = tf.losses.sparse_softmax_cross_entropy(logits=logits,
                                                      labels=label)
    loss = tf.reduce_mean(xentropy)
    optimizer = tf.train.AdamOptimizer()
    training_op = optimizer.minimize(loss)

with tf.name_scope("eval"):
    correct = tf.equal(tf.argmax(logits, 1), label)
    accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

load_fn = slim.assign_from_checkpoint_fn(INCEPTION_V3_CHECKPOINT_PATH,get_tuned_variables(),\
                                         ignore_missing_vars=True)
def main(argv=None):
    #训练过程分为以下步骤:1 加载数据 2定义网络模型 3定义损失函数 4定义优化器 5定义评估指标

    #加载处理好的图片数据
    processed_data = np.load(DATA_FILE)
    training_images = processed_data[0]
    n_training_example = len(training_images)
    training_labels = processed_data[1]
    validation_images = processed_data[2]
    n_validation_example = len(validation_images)
    validation_labels = processed_data[3]
    testing_images = processed_data[4]
    n_testing_example = len(testing_images)
    testing_labels = processed_data[5]
    logger.info(
        '%d training examples, %d validation examples, %d testing examples.' %
        (n_training_example, n_validation_example, n_testing_example))

    #定义输入数据和label
    images = tf.placeholder(tf.float32, [None, 299, 299, 3],
                            name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    #定义inception-v3模型。因为谷歌中给的inception_v3模型只有参数取值,所以这里要定义inception_v3模型结构。因为训练好的inception_v3模型中使用的Batch_normlization参数与新的数据会有差异,导致训练结果很差,所以这里直接使用一个模型进行测试,不区分训练模型和测试模型
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, num_classes=N_CLASSES)

    #获取需要训练的变量
    trainable_variables = get_trainable_variables()

    #定义交叉熵损失函数,参数的正则项损失在定义模型的时候已经加载
    tf.losses.softmax_cross_entropy(tf.one_hot(labels, N_CLASSES),
                                    logits,
                                    weights=1.0)
    #定义优化器
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(
        tf.losses.get_total_loss())

    #计算正确率,评估模型
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evalution_step = tf.reduce_mean(tf.cast(correct_prediction,
                                                tf.float32))

    # 定义加载模型的函数
    load_fn = slim.assign_from_checkpoint_fn(CKPT_FILE,
                                             get_tuned_variables(),
                                             ignore_missing_vars=True)

    #定义保存训练好的模型
    saver = tf.train.Saver()

    #开始训练
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        #加载谷歌训练好的模型
        logger.info('Loading tuned variables from %s' % CKPT_FILE)
        load_fn(sess)

        start = 0
        end = BATCH_SIZE
        for i in range(STEPS):
            logger.info('Step %d-%d is training....' % (i, STEPS))
            try:
                sess.run(train_step,
                         feed_dict={
                             images: training_images[start:end],
                             labels: training_labels[start:end]
                         })
            except Exception:
                logger.error('trainging fail', exc_info=True)

            #输出日志
            if i % display_steps == 0 or i + 1 == STEPS:
                validation_acc = sess.run(evalution_step,
                                          feed_dict={
                                              images: validation_images,
                                              labels: validation_labels
                                          })
                logger.info('Step %d-%d:validation acc = %.1f%%' %
                            (i, STEPS, validation_acc * 100.0))
                #模型持久化
                if i % save_steps == 0 or i + 1 == STEPS:
                    saver.save(sess, TRAIN_FILE_SAVE_PATH, global_step=i)
            #因为数据处理时就已经打乱了顺序,所以在这里直接顺序使用训练数据就可以
            start = end
            if start == n_training_example:
                start = 0
            end = end + BATCH_SIZE
            if end > n_training_example:
                end = n_training_example
        #最后在测试集上测试正确率
        test_acc = sess.run(evalution_step,
                            feed_dict={
                                images: testing_images,
                                labels: testing_labels
                            })
        logger.info('Final test acc = %.1f%%' % (test_acc * 100.0))