Beispiel #1
0
 def testBuildNonExistingLayerLargeModel(self):
   """Tests that the model is built correctly without unnecessary layers."""
   inputs = tf.random_uniform((5, 331, 331, 3))
   tf.train.create_global_step()
   with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
     pnasnet.build_pnasnet_large(inputs, 1000)
   vars_names = [x.op.name for x in tf.trainable_variables()]
   self.assertIn('cell_stem_0/1x1/weights', vars_names)
   self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
Beispiel #2
0
 def testBuildNonExistingLayerLargeModel(self):
   """Tests that the model is built correctly without unnecessary layers."""
   inputs = tf.random_uniform((5, 331, 331, 3))
   tf.train.create_global_step()
   with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
     pnasnet.build_pnasnet_large(inputs, 1000)
   vars_names = [x.op.name for x in tf.trainable_variables()]
   self.assertIn('cell_stem_0/1x1/weights', vars_names)
   self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
Beispiel #3
0
def pnasnetfunrotate(input_imgs,reuse ):
    rotatedarr = []
    for i in range(batchsize):
        rotated = tf.contrib.image.rotate(input_imgs,
                                          tf.random_uniform((), minval=-np.pi/4, maxval=np.pi/4))
        rotatedarr.append(tf.reshape(rotated,[1,image_size,image_size,3]))
 
    inputarr = tf.concat(rotatedarr,axis = 0)
    print(inputarr.get_shape())

    
    
    preprocessed = tf.subtract(tf.multiply(inputarr, 2.0), 1.0)#2 *( input_imgs / 255.0)-1.0  

    arg_scope = pnasnet.pnasnet_large_arg_scope() #获得模型命名空间

    
    with slim.arg_scope(arg_scope):
        with slim.arg_scope([slim.conv2d,
                             slim.batch_norm, slim.fully_connected,
                             slim.separable_conv2d],reuse=reuse):

            rotated_logits, end_points = pnasnet.build_pnasnet_large(preprocessed,num_classes = 1001, is_training=False)   
            prob = end_points['Predictions']  
    return rotated_logits, prob
Beispiel #4
0
 def testBuildPreLogitsLargeModel(self):
     batch_size = 5
     height, width = 331, 331
     num_classes = None
     inputs = tf.random_uniform((batch_size, height, width, 3))
     tf.train.create_global_step()
     with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
         net, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
     self.assertFalse('AuxLogits' in end_points)
     self.assertFalse('Predictions' in end_points)
     self.assertTrue(net.op.name.startswith('final_layer/Mean'))
     self.assertListEqual(net.get_shape().as_list(), [batch_size, 4320])
Beispiel #5
0
 def testBuildPreLogitsLargeModel(self):
   batch_size = 5
   height, width = 331, 331
   num_classes = None
   inputs = tf.random_uniform((batch_size, height, width, 3))
   tf.train.create_global_step()
   with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
     net, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
   self.assertFalse('AuxLogits' in end_points)
   self.assertFalse('Predictions' in end_points)
   self.assertTrue(net.op.name.startswith('final_layer/Mean'))
   self.assertListEqual(net.get_shape().as_list(), [batch_size, 4320])
    def _extract_proposal_features(self, preprocessed_inputs, scope):
        """Extracts first stage RPN features.

    Extracts features using the first half of the PNASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      end_points: A dictionary mapping feature extractor tensor names to tensors

    Raises:
      ValueError: If the created network is missing the required activation.
    """
        del scope

        if len(preprocessed_inputs.get_shape().as_list()) != 4:
            raise ValueError(
                '`preprocessed_inputs` must be 4 dimensional, got a '
                'tensor of shape %s' % preprocessed_inputs.get_shape())

        with slim.arg_scope(
                pnasnet_large_arg_scope_for_detection(
                    is_batch_norm_training=self._train_batch_norm)):
            with arg_scope(
                [slim.conv2d, slim.batch_norm, slim.separable_conv2d],
                    reuse=self._reuse_weights):
                _, end_points = pnasnet.build_pnasnet_large(
                    preprocessed_inputs,
                    num_classes=None,
                    is_training=self._is_training,
                    final_endpoint='Cell_7')

        # Note that both 'Cell_6' and 'Cell_7' have equal depth = 2160.
        # Cell_7 is the last cell before second reduction.
        rpn_feature_map = tf.concat(
            [end_points['Cell_6'], end_points['Cell_7']], 3)

        # pnasnet.py does not maintain the batch size in the first dimension.
        # This work around permits us retaining the batch for below.
        batch = preprocessed_inputs.get_shape().as_list()[0]
        shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
        rpn_feature_map_shape = [batch] + shape_without_batch
        rpn_feature_map.set_shape(rpn_feature_map_shape)

        return rpn_feature_map, end_points
Beispiel #7
0
 def testOverrideHParamsLargeModel(self):
   batch_size = 5
   height, width = 331, 331
   num_classes = 1000
   inputs = tf.random_uniform((batch_size, height, width, 3))
   tf.train.create_global_step()
   config = pnasnet.large_imagenet_config()
   config.set_hparam('data_format', 'NCHW')
   with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
     _, end_points = pnasnet.build_pnasnet_large(
         inputs, num_classes, config=config)
   self.assertListEqual(
       end_points['Stem'].shape.as_list(), [batch_size, 540, 42, 42])
Beispiel #8
0
 def testOverrideHParamsLargeModel(self):
   batch_size = 5
   height, width = 331, 331
   num_classes = 1000
   inputs = tf.random_uniform((batch_size, height, width, 3))
   tf.train.create_global_step()
   config = pnasnet.large_imagenet_config()
   config.set_hparam('data_format', 'NCHW')
   with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
     _, end_points = pnasnet.build_pnasnet_large(
         inputs, num_classes, config=config)
   self.assertListEqual(
       end_points['Stem'].shape.as_list(), [batch_size, 540, 42, 42])
Beispiel #9
0
 def testNoAuxHeadLargeModel(self):
   batch_size = 5
   height, width = 331, 331
   num_classes = 1000
   for use_aux_head in (True, False):
     tf.reset_default_graph()
     inputs = tf.random_uniform((batch_size, height, width, 3))
     tf.train.create_global_step()
     config = pnasnet.large_imagenet_config()
     config.set_hparam('use_aux_head', int(use_aux_head))
     with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
       _, end_points = pnasnet.build_pnasnet_large(inputs, num_classes,
                                                   config=config)
     self.assertEqual('AuxLogits' in end_points, use_aux_head)
  def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the PNASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      end_points: A dictionary mapping feature extractor tensor names to tensors

    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(pnasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      with arg_scope([slim.conv2d,
                      slim.batch_norm,
                      slim.separable_conv2d],
                     reuse=self._reuse_weights):
        _, end_points = pnasnet.build_pnasnet_large(
            preprocessed_inputs, num_classes=None,
            is_training=self._is_training,
            final_endpoint='Cell_7')

    # Note that both 'Cell_6' and 'Cell_7' have equal depth = 2160.
    # Cell_7 is the last cell before second reduction.
    rpn_feature_map = tf.concat([end_points['Cell_6'],
                                 end_points['Cell_7']], 3)

    # pnasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map, end_points
Beispiel #11
0
 def testNoAuxHeadLargeModel(self):
   batch_size = 5
   height, width = 331, 331
   num_classes = 1000
   for use_aux_head in (True, False):
     tf.reset_default_graph()
     inputs = tf.random_uniform((batch_size, height, width, 3))
     tf.train.create_global_step()
     config = pnasnet.large_imagenet_config()
     config.set_hparam('use_aux_head', int(use_aux_head))
     with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
       _, end_points = pnasnet.build_pnasnet_large(inputs, num_classes,
                                                   config=config)
     self.assertEqual('AuxLogits' in end_points, use_aux_head)
Beispiel #12
0
def pnasnet_large(inputs, is_training, opts):
    with slim.arg_scope(pnasnet.pnasnet_large_arg_scope(
            weight_decay=opts.weight_decay,
            batch_norm_decay=opts.batch_norm_decay,
            batch_norm_epsilon=opts.batch_norm_epsilon)):

        config = pnasnet.large_imagenet_config()
        config.set_hparam('dense_dropout_keep_prob', opts.dropout_keep_prob)
        config.set_hparam('use_aux_head', int(opts.create_aux_logits))

        return pnasnet.build_pnasnet_large(
            inputs,
            num_classes=opts.num_classes,
            is_training=is_training,
            config=config)
Beispiel #13
0
 def testBuildLogitsLargeModel(self):
   batch_size = 5
   height, width = 331, 331
   num_classes = 1000
   inputs = tf.random_uniform((batch_size, height, width, 3))
   tf.train.create_global_step()
   with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
     logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
   auxlogits = end_points['AuxLogits']
   predictions = end_points['Predictions']
   self.assertListEqual(auxlogits.get_shape().as_list(),
                        [batch_size, num_classes])
   self.assertListEqual(logits.get_shape().as_list(),
                        [batch_size, num_classes])
   self.assertListEqual(predictions.get_shape().as_list(),
                        [batch_size, num_classes])
Beispiel #14
0
 def testBuildLogitsLargeModel(self):
   batch_size = 5
   height, width = 331, 331
   num_classes = 1000
   inputs = tf.random_uniform((batch_size, height, width, 3))
   tf.train.create_global_step()
   with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
     logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
   auxlogits = end_points['AuxLogits']
   predictions = end_points['Predictions']
   self.assertListEqual(auxlogits.get_shape().as_list(),
                        [batch_size, num_classes])
   self.assertListEqual(logits.get_shape().as_list(),
                        [batch_size, num_classes])
   self.assertListEqual(predictions.get_shape().as_list(),
                        [batch_size, num_classes])
Beispiel #15
0
    def extract_features(self, preprocessed_inputs):
        """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """

        feature_map_layout = {
            'from_layer': ['Cell_7', 'Cell_11', '', '', '',
                           ''][:self._num_layers],
            'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers],
            'use_explicit_padding': self._use_explicit_padding,
            'use_depthwise': self._use_depthwise,
        }

        with slim.arg_scope(
                pnasnet_large_arg_scope_for_detection(
                    is_batch_norm_training=self._is_training)):
            with slim.arg_scope(
                [slim.conv2d, slim.batch_norm, slim.separable_conv2d],
                    reuse=self._reuse_weights):
                with (slim.arg_scope(self._conv_hyperparams_fn())
                      if self._override_base_feature_extractor_hyperparams else
                      context_manager.IdentityContextManager()):
                    _, image_features = pnasnet.build_pnasnet_large(
                        ops.pad_to_multiple(preprocessed_inputs,
                                            self._pad_to_multiple),
                        num_classes=None,
                        is_training=self._is_training,
                        final_endpoint='Cell_11')
        with tf.compat.v1.variable_scope('SSD_feature_maps',
                                         reuse=self._reuse_weights):
            with slim.arg_scope(self._conv_hyperparams_fn()):
                feature_maps = feature_map_generators.multi_resolution_feature_maps(
                    feature_map_layout=feature_map_layout,
                    depth_multiplier=self._depth_multiplier,
                    min_depth=self._min_depth,
                    insert_1x1_conv=True,
                    image_features=image_features)

        return feature_maps.values()
Beispiel #16
0
def pnasnetfun(input_imgs, reuse):
    preprocessed = tf.subtract(tf.multiply(tf.expand_dims(input_imgs, 0), 2.0),
                               1.0)  #2 *( input_imgs / 255.0)-1.0

    arg_scope = pnasnet.pnasnet_large_arg_scope()  #获得模型命名空间

    with slim.arg_scope(arg_scope):
        with slim.arg_scope([
                slim.conv2d, slim.batch_norm, slim.fully_connected,
                slim.separable_conv2d
        ],
                            reuse=reuse):

            logits, end_points = pnasnet.build_pnasnet_large(preprocessed,
                                                             num_classes=1001,
                                                             is_training=False)
            prob = end_points['Predictions']
    return logits, prob
  def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """

    feature_map_layout = {
        'from_layer': ['Cell_7', 'Cell_11', '', '', '', ''],
        'layer_depth': [-1, -1, 512, 256, 256, 128],
        'use_explicit_padding': self._use_explicit_padding,
        'use_depthwise': self._use_depthwise,
    }

    with slim.arg_scope(
        pnasnet_large_arg_scope_for_detection(
            is_batch_norm_training=self._is_training)):
      with slim.arg_scope([slim.conv2d, slim.batch_norm, slim.separable_conv2d],
                          reuse=self._reuse_weights):
        with (slim.arg_scope(self._conv_hyperparams_fn())
              if self._override_base_feature_extractor_hyperparams else
              context_manager.IdentityContextManager()):
          _, image_features = pnasnet.build_pnasnet_large(
              ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
              num_classes=None,
              is_training=self._is_training,
              final_endpoint='Cell_11')
    with tf.variable_scope('SSD_feature_maps', reuse=self._reuse_weights):
      with slim.arg_scope(self._conv_hyperparams_fn()):
        feature_maps = feature_map_generators.multi_resolution_feature_maps(
            feature_map_layout=feature_map_layout,
            depth_multiplier=self._depth_multiplier,
            min_depth=self._min_depth,
            insert_1x1_conv=True,
            image_features=image_features)

    return feature_maps.values()
Beispiel #18
0
    def testAllEndPointsShapesLargeModel(self):
        batch_size = 5
        height, width = 331, 331
        num_classes = 1000
        inputs = tf.random.uniform((batch_size, height, width, 3))
        tf.compat.v1.train.create_global_step()
        with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
            _, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)

        endpoints_shapes = {
            'Stem': [batch_size, 42, 42, 540],
            'Cell_0': [batch_size, 42, 42, 1080],
            'Cell_1': [batch_size, 42, 42, 1080],
            'Cell_2': [batch_size, 42, 42, 1080],
            'Cell_3': [batch_size, 42, 42, 1080],
            'Cell_4': [batch_size, 21, 21, 2160],
            'Cell_5': [batch_size, 21, 21, 2160],
            'Cell_6': [batch_size, 21, 21, 2160],
            'Cell_7': [batch_size, 21, 21, 2160],
            'Cell_8': [batch_size, 11, 11, 4320],
            'Cell_9': [batch_size, 11, 11, 4320],
            'Cell_10': [batch_size, 11, 11, 4320],
            'Cell_11': [batch_size, 11, 11, 4320],
            'global_pool': [batch_size, 4320],
            # Logits and predictions
            'AuxLogits': [batch_size, 1000],
            'Predictions': [batch_size, 1000],
            'Logits': [batch_size, 1000],
        }
        self.assertEqual(len(end_points), 17)
        self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
        for endpoint_name in endpoints_shapes:
            tf.compat.v1.logging.info(
                'Endpoint name: {}'.format(endpoint_name))
            expected_shape = endpoints_shapes[endpoint_name]
            self.assertIn(endpoint_name, end_points)
            self.assertListEqual(
                end_points[endpoint_name].get_shape().as_list(),
                expected_shape)
Beispiel #19
0
  def testAllEndPointsShapesLargeModel(self):
    batch_size = 5
    height, width = 331, 331
    num_classes = 1000
    inputs = tf.random_uniform((batch_size, height, width, 3))
    tf.train.create_global_step()
    with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
      _, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)

    endpoints_shapes = {'Stem': [batch_size, 42, 42, 540],
                        'Cell_0': [batch_size, 42, 42, 1080],
                        'Cell_1': [batch_size, 42, 42, 1080],
                        'Cell_2': [batch_size, 42, 42, 1080],
                        'Cell_3': [batch_size, 42, 42, 1080],
                        'Cell_4': [batch_size, 21, 21, 2160],
                        'Cell_5': [batch_size, 21, 21, 2160],
                        'Cell_6': [batch_size, 21, 21, 2160],
                        'Cell_7': [batch_size, 21, 21, 2160],
                        'Cell_8': [batch_size, 11, 11, 4320],
                        'Cell_9': [batch_size, 11, 11, 4320],
                        'Cell_10': [batch_size, 11, 11, 4320],
                        'Cell_11': [batch_size, 11, 11, 4320],
                        'global_pool': [batch_size, 4320],
                        # Logits and predictions
                        'AuxLogits': [batch_size, 1000],
                        'Predictions': [batch_size, 1000],
                        'Logits': [batch_size, 1000],
                       }
    self.assertEqual(len(end_points), 17)
    self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
    for endpoint_name in endpoints_shapes:
      tf.logging.info('Endpoint name: {}'.format(endpoint_name))
      expected_shape = endpoints_shapes[endpoint_name]
      self.assertIn(endpoint_name, end_points)
      self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
                           expected_shape)
def main():
    slim = tf.contrib.slim

    tf.reset_default_graph()
    image_size = pnasnet.build_pnasnet_large.default_image_size  # 331

    labels = imagenet.create_readable_names_for_imagenet_labels()

    sample_images = ['hy.jpg', 'mac.jpg', 'filename3.jpg', '72.jpg', 'ps.jpg']

    input_imgs = tf.placeholder(tf.float32, [None, image_size, image_size, 3])
    x1 = 2 * (input_imgs / 255.0) - 1.0

    arg_scope = pnasnet.pnasnet_large_arg_scope()

    with slim.arg_scope(arg_scope):
        logit, end_points = pnasnet.build_pnasnet_large(x1,
                                                        num_classes=1001,
                                                        is_training=False)
        print(end_points)
        prob = end_points['Predictions']
        y = tf.argmax(prob, axis=1)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        saver.restore(sess, checkepoint_file)

        def preimg(img):
            ch = 3
            print(img.mode)

            if img.mode == 'RGBA':
                ch = 4

            imgnp = np.asarray(img.resize((image_size, image_size)),
                               dtype=np.float32).reshape(
                                   image_size, image_size, ch)
            return imgnp[:, :, :3]

        batchImg = [
            preimg(Image.open(imgfilename)) for imgfilename in sample_images
        ]
        orgImg = [Image.open(imgfilename) for imgfilename in sample_images]

        yv, img_norm = sess.run([y, x1], feed_dict={input_imgs: batchImg})

        # print(yv, np.shape(yv))

        def showresult(yy, img_norm, img_org):
            plt.figure()
            p1 = plt.subplot(121)
            p2 = plt.subplot(122)

            p1.imshow(img_org)
            p1.axis('off')
            p1.set_title("organization image")

            p2.imshow((img_norm * 255).astype(np.uint8))
            p2.axis('off')
            p2.set_title("input image")

            plt.show()
            # print(yy)
            # print(labels[yy])

        for yy, img1, img2 in zip(yv, batchImg, orgImg):
            showresult(yy, img1, img2)
with open('中文标签.csv', 'r+') as f:  #打开文件
    labels = list(map(getone, list(f)))
    print(len(labels), type(labels), labels[:5])  #显示输出中文标签

sample_images = ['hy.jpg', 'ps.jpg', '72.jpg']  #定义待测试图片路径

input_imgs = tf.placeholder(tf.float32,
                            [None, image_size, image_size, 3])  #定义占位符

x1 = 2 * (input_imgs / 255.0) - 1.0  #归一化图片

arg_scope = pnasnet.pnasnet_large_arg_scope()  #获得模型命名空间
with slim.arg_scope(arg_scope):
    logits, end_points = pnasnet.build_pnasnet_large(x1,
                                                     num_classes=1001,
                                                     is_training=False)
    prob = end_points['Predictions']
    y = tf.argmax(prob, axis=1)  #获得结果的输出节点

checkpoint_file = r'./pnasnet-5_large_2017_12_13/model.ckpt'  #定义模型路径
saver = tf.train.Saver()  #定义saver,用于加载模型
with tf.Session() as sess:  #建立会话
    saver.restore(sess, checkpoint_file)  #载入模型

    def preimg(img):  #定义图片预处理函数
        ch = 3
        if img.mode == 'RGBA':  #兼容RGBA图片
            ch = 4

        imgnp = np.asarray(img.resize((image_size, image_size)),
使用TF-Slim进行图像分类,还需要安装:TF-slim image models library,这个库不在TF的核心库中。
(1)git clone https://github.com/tensorflow/models
(2)cd models/research/slim
(3)测试是否可用:python -c "from nets import cifarnet; mynet = cifarnet.cifarnet"
注意:models/research/slim这个目录可以取出来放到单独放到一个地方直接使用。
可以查看源码目录中的各种*_test.py学习不同模型的使用
'''
import sys
# 我将slim文件夹放到了/Users/rensike/Files/temp/目录下
sys.path.append("/Users/rensike/Files/temp/slim")

import tensorflow as tf

slim = tf.contrib.slim
'''
获取pnasnet最后的输出层的变量名称
'''
from nets.nasnet import pnasnet

batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
    logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']

print(predictions.op.name)  # final_layer/predictions
Beispiel #23
0
    def _build_graph(self, input_tensor):
        with tf.name_scope('inputs'):
            if input_tensor is None:
                input_tensor = tf.placeholder(tf.float32,
                                              shape=self.image_shape,
                                              name='input_img')
            else:
                assert self.image_shape == tuple(input_tensor.shape.as_list())
            self.input_tensor = input_tensor

        if self.model_name == 'vgg_16':
            self.ckpt_path = slim_models_path + "vgg_16.ckpt"
            from nets.vgg import vgg_16, vgg_arg_scope
            with slim.arg_scope(vgg_arg_scope()):
                self.output, self.outputs = vgg_16(
                    self.input_tensor,
                    num_classes=self.num_classes,
                    is_training=self.is_training,
                    global_pool=self.global_pool)

        if self.model_name == 'resnet_v2_152':
            self.ckpt_path = slim_models_path + "resnet_v2_152.ckpt"
            from nets.resnet_v2 import resnet_v2_152, resnet_arg_scope
            with slim.arg_scope(resnet_arg_scope()):
                self.output, self.outputs = resnet_v2_152(
                    self.input_tensor,
                    num_classes=self.num_classes,
                    is_training=self.is_training,
                    global_pool=self.global_pool)

        elif self.model_name == 'resnet_v2_101':
            self.ckpt_path = slim_models_path + "resnet_v2_101.ckpt"
            from nets.resnet_v2 import resnet_v2_101, resnet_arg_scope
            with slim.arg_scope(resnet_arg_scope()):
                self.output, self.outputs = resnet_v2_101(
                    self.input_tensor,
                    num_classes=self.num_classes,
                    is_training=self.is_training,
                    global_pool=self.global_pool)

        elif self.model_name == 'resnet_v2_50':
            self.ckpt_path = slim_models_path + "resnet_v2_50.ckpt"
            from nets.resnet_v2 import resnet_v2_50, resnet_arg_scope
            with slim.arg_scope(resnet_arg_scope()):
                self.output, self.outputs = resnet_v2_50(
                    self.input_tensor,
                    num_classes=self.num_classes,
                    is_training=self.is_training,
                    global_pool=self.global_pool)

        elif self.model_name == 'InceptionV3':
            self.ckpt_path = slim_models_path + "inception_v3.ckpt"
            from nets.inception import inception_v3, inception_v3_arg_scope
            with slim.arg_scope(inception_v3_arg_scope()):
                self.output, self.outputs = inception_v3(
                    self.input_tensor,
                    num_classes=self.num_classes,
                    is_training=self.is_training)

        elif self.model_name == 'InceptionV4':
            self.ckpt_path = slim_models_path + "inception_v4.ckpt"
            from nets.inception import inception_v4, inception_v4_arg_scope
            with slim.arg_scope(inception_v4_arg_scope()):
                self.output, self.outputs = inception_v4(
                    self.input_tensor,
                    num_classes=self.num_classes,
                    is_training=self.is_training)

        elif self.model_name == 'pnasnet_large':
            self.ckpt_path = slim_models_path + "pnasnet_large_2.ckpt"
            from nets.nasnet.pnasnet import build_pnasnet_large, pnasnet_large_arg_scope
            with tf.variable_scope(self.model_name):
                with slim.arg_scope(pnasnet_large_arg_scope()):
                    self.output, self.outputs = build_pnasnet_large(
                        self.input_tensor,
                        num_classes=self.num_classes,
                        is_training=self.is_training)

        #collecting all variables related to this model
        #self.model_weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.model_name+'/model')
        self.model_weights = slim.get_model_variables(self.model_name)