def pnasnetfunrotate(input_imgs,reuse ): rotatedarr = [] for i in range(batchsize): rotated = tf.contrib.image.rotate(input_imgs, tf.random_uniform((), minval=-np.pi/4, maxval=np.pi/4)) rotatedarr.append(tf.reshape(rotated,[1,image_size,image_size,3])) inputarr = tf.concat(rotatedarr,axis = 0) print(inputarr.get_shape()) preprocessed = tf.subtract(tf.multiply(inputarr, 2.0), 1.0)#2 *( input_imgs / 255.0)-1.0 arg_scope = pnasnet.pnasnet_large_arg_scope() #获得模型命名空间 with slim.arg_scope(arg_scope): with slim.arg_scope([slim.conv2d, slim.batch_norm, slim.fully_connected, slim.separable_conv2d],reuse=reuse): rotated_logits, end_points = pnasnet.build_pnasnet_large(preprocessed,num_classes = 1001, is_training=False) prob = end_points['Predictions'] return rotated_logits, prob
def testBuildNonExistingLayerLargeModel(self): """Tests that the model is built correctly without unnecessary layers.""" inputs = tf.random_uniform((5, 331, 331, 3)) tf.train.create_global_step() with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): pnasnet.build_pnasnet_large(inputs, 1000) vars_names = [x.op.name for x in tf.trainable_variables()] self.assertIn('cell_stem_0/1x1/weights', vars_names) self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildPreLogitsLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = None inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): net, end_points = pnasnet.build_pnasnet_large(inputs, num_classes) self.assertFalse('AuxLogits' in end_points) self.assertFalse('Predictions' in end_points) self.assertTrue(net.op.name.startswith('final_layer/Mean')) self.assertListEqual(net.get_shape().as_list(), [batch_size, 4320])
def testOverrideHParamsLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() config = pnasnet.large_imagenet_config() config.set_hparam('data_format', 'NCHW') with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): _, end_points = pnasnet.build_pnasnet_large( inputs, num_classes, config=config) self.assertListEqual( end_points['Stem'].shape.as_list(), [batch_size, 540, 42, 42])
def testNoAuxHeadLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = 1000 for use_aux_head in (True, False): tf.reset_default_graph() inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() config = pnasnet.large_imagenet_config() config.set_hparam('use_aux_head', int(use_aux_head)) with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): _, end_points = pnasnet.build_pnasnet_large(inputs, num_classes, config=config) self.assertEqual('AuxLogits' in end_points, use_aux_head)
def pnasnet_large(inputs, is_training, opts): with slim.arg_scope(pnasnet.pnasnet_large_arg_scope( weight_decay=opts.weight_decay, batch_norm_decay=opts.batch_norm_decay, batch_norm_epsilon=opts.batch_norm_epsilon)): config = pnasnet.large_imagenet_config() config.set_hparam('dense_dropout_keep_prob', opts.dropout_keep_prob) config.set_hparam('use_aux_head', int(opts.create_aux_logits)) return pnasnet.build_pnasnet_large( inputs, num_classes=opts.num_classes, is_training=is_training, config=config)
def pnasnet_large_arg_scope_for_detection(is_batch_norm_training=False): """Defines the default arg scope for the PNASNet Large for object detection. This provides a small edit to switch batch norm training on and off. Args: is_batch_norm_training: Boolean indicating whether to train with batch norm. Returns: An `arg_scope` to use for the PNASNet Large Model. """ imagenet_scope = pnasnet.pnasnet_large_arg_scope() with arg_scope(imagenet_scope): with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc: return sc
def testBuildLogitsLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes) auxlogits = end_points['AuxLogits'] predictions = end_points['Predictions'] self.assertListEqual(auxlogits.get_shape().as_list(), [batch_size, num_classes]) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertListEqual(predictions.get_shape().as_list(), [batch_size, num_classes])
def pnasnetfun(input_imgs, reuse): preprocessed = tf.subtract(tf.multiply(tf.expand_dims(input_imgs, 0), 2.0), 1.0) #2 *( input_imgs / 255.0)-1.0 arg_scope = pnasnet.pnasnet_large_arg_scope() #获得模型命名空间 with slim.arg_scope(arg_scope): with slim.arg_scope([ slim.conv2d, slim.batch_norm, slim.fully_connected, slim.separable_conv2d ], reuse=reuse): logits, end_points = pnasnet.build_pnasnet_large(preprocessed, num_classes=1001, is_training=False) prob = end_points['Predictions'] return logits, prob
def testAllEndPointsShapesLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = 1000 inputs = tf.random.uniform((batch_size, height, width, 3)) tf.compat.v1.train.create_global_step() with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): _, end_points = pnasnet.build_pnasnet_large(inputs, num_classes) endpoints_shapes = { 'Stem': [batch_size, 42, 42, 540], 'Cell_0': [batch_size, 42, 42, 1080], 'Cell_1': [batch_size, 42, 42, 1080], 'Cell_2': [batch_size, 42, 42, 1080], 'Cell_3': [batch_size, 42, 42, 1080], 'Cell_4': [batch_size, 21, 21, 2160], 'Cell_5': [batch_size, 21, 21, 2160], 'Cell_6': [batch_size, 21, 21, 2160], 'Cell_7': [batch_size, 21, 21, 2160], 'Cell_8': [batch_size, 11, 11, 4320], 'Cell_9': [batch_size, 11, 11, 4320], 'Cell_10': [batch_size, 11, 11, 4320], 'Cell_11': [batch_size, 11, 11, 4320], 'global_pool': [batch_size, 4320], # Logits and predictions 'AuxLogits': [batch_size, 1000], 'Predictions': [batch_size, 1000], 'Logits': [batch_size, 1000], } self.assertEqual(len(end_points), 17) self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name in endpoints_shapes: tf.compat.v1.logging.info( 'Endpoint name: {}'.format(endpoint_name)) expected_shape = endpoints_shapes[endpoint_name] self.assertIn(endpoint_name, end_points) self.assertListEqual( end_points[endpoint_name].get_shape().as_list(), expected_shape)
def testAllEndPointsShapesLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): _, end_points = pnasnet.build_pnasnet_large(inputs, num_classes) endpoints_shapes = {'Stem': [batch_size, 42, 42, 540], 'Cell_0': [batch_size, 42, 42, 1080], 'Cell_1': [batch_size, 42, 42, 1080], 'Cell_2': [batch_size, 42, 42, 1080], 'Cell_3': [batch_size, 42, 42, 1080], 'Cell_4': [batch_size, 21, 21, 2160], 'Cell_5': [batch_size, 21, 21, 2160], 'Cell_6': [batch_size, 21, 21, 2160], 'Cell_7': [batch_size, 21, 21, 2160], 'Cell_8': [batch_size, 11, 11, 4320], 'Cell_9': [batch_size, 11, 11, 4320], 'Cell_10': [batch_size, 11, 11, 4320], 'Cell_11': [batch_size, 11, 11, 4320], 'global_pool': [batch_size, 4320], # Logits and predictions 'AuxLogits': [batch_size, 1000], 'Predictions': [batch_size, 1000], 'Logits': [batch_size, 1000], } self.assertEqual(len(end_points), 17) self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name in endpoints_shapes: tf.logging.info('Endpoint name: {}'.format(endpoint_name)) expected_shape = endpoints_shapes[endpoint_name] self.assertIn(endpoint_name, end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape)
使用TF-Slim进行图像分类,还需要安装:TF-slim image models library,这个库不在TF的核心库中。 (1)git clone https://github.com/tensorflow/models (2)cd models/research/slim (3)测试是否可用:python -c "from nets import cifarnet; mynet = cifarnet.cifarnet" 注意:models/research/slim这个目录可以取出来放到单独放到一个地方直接使用。 可以查看源码目录中的各种*_test.py学习不同模型的使用 ''' import sys # 我将slim文件夹放到了/Users/rensike/Files/temp/目录下 sys.path.append("/Users/rensike/Files/temp/slim") import tensorflow as tf slim = tf.contrib.slim ''' 获取pnasnet最后的输出层的变量名称 ''' from nets.nasnet import pnasnet batch_size = 5 height, width = 331, 331 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes) auxlogits = end_points['AuxLogits'] predictions = end_points['Predictions'] print(predictions.op.name) # final_layer/predictions
def getone(onestr): return onestr.replace(',', ' ') with open('中文标签.csv', 'r+') as f: #打开文件 labels = list(map(getone, list(f))) print(len(labels), type(labels), labels[:5]) #显示输出中文标签 sample_images = ['hy.jpg', 'ps.jpg', '72.jpg'] #定义待测试图片路径 input_imgs = tf.placeholder(tf.float32, [None, image_size, image_size, 3]) #定义占位符 x1 = 2 * (input_imgs / 255.0) - 1.0 #归一化图片 arg_scope = pnasnet.pnasnet_large_arg_scope() #获得模型命名空间 with slim.arg_scope(arg_scope): logits, end_points = pnasnet.build_pnasnet_large(x1, num_classes=1001, is_training=False) prob = end_points['Predictions'] y = tf.argmax(prob, axis=1) #获得结果的输出节点 checkpoint_file = r'./pnasnet-5_large_2017_12_13/model.ckpt' #定义模型路径 saver = tf.train.Saver() #定义saver,用于加载模型 with tf.Session() as sess: #建立会话 saver.restore(sess, checkpoint_file) #载入模型 def preimg(img): #定义图片预处理函数 ch = 3 if img.mode == 'RGBA': #兼容RGBA图片
def _build_graph(self, input_tensor): with tf.name_scope('inputs'): if input_tensor is None: input_tensor = tf.placeholder(tf.float32, shape=self.image_shape, name='input_img') else: assert self.image_shape == tuple(input_tensor.shape.as_list()) self.input_tensor = input_tensor if self.model_name == 'vgg_16': self.ckpt_path = slim_models_path + "vgg_16.ckpt" from nets.vgg import vgg_16, vgg_arg_scope with slim.arg_scope(vgg_arg_scope()): self.output, self.outputs = vgg_16( self.input_tensor, num_classes=self.num_classes, is_training=self.is_training, global_pool=self.global_pool) if self.model_name == 'resnet_v2_152': self.ckpt_path = slim_models_path + "resnet_v2_152.ckpt" from nets.resnet_v2 import resnet_v2_152, resnet_arg_scope with slim.arg_scope(resnet_arg_scope()): self.output, self.outputs = resnet_v2_152( self.input_tensor, num_classes=self.num_classes, is_training=self.is_training, global_pool=self.global_pool) elif self.model_name == 'resnet_v2_101': self.ckpt_path = slim_models_path + "resnet_v2_101.ckpt" from nets.resnet_v2 import resnet_v2_101, resnet_arg_scope with slim.arg_scope(resnet_arg_scope()): self.output, self.outputs = resnet_v2_101( self.input_tensor, num_classes=self.num_classes, is_training=self.is_training, global_pool=self.global_pool) elif self.model_name == 'resnet_v2_50': self.ckpt_path = slim_models_path + "resnet_v2_50.ckpt" from nets.resnet_v2 import resnet_v2_50, resnet_arg_scope with slim.arg_scope(resnet_arg_scope()): self.output, self.outputs = resnet_v2_50( self.input_tensor, num_classes=self.num_classes, is_training=self.is_training, global_pool=self.global_pool) elif self.model_name == 'InceptionV3': self.ckpt_path = slim_models_path + "inception_v3.ckpt" from nets.inception import inception_v3, inception_v3_arg_scope with slim.arg_scope(inception_v3_arg_scope()): self.output, self.outputs = inception_v3( self.input_tensor, num_classes=self.num_classes, is_training=self.is_training) elif self.model_name == 'InceptionV4': self.ckpt_path = slim_models_path + "inception_v4.ckpt" from nets.inception import inception_v4, inception_v4_arg_scope with slim.arg_scope(inception_v4_arg_scope()): self.output, self.outputs = inception_v4( self.input_tensor, num_classes=self.num_classes, is_training=self.is_training) elif self.model_name == 'pnasnet_large': self.ckpt_path = slim_models_path + "pnasnet_large_2.ckpt" from nets.nasnet.pnasnet import build_pnasnet_large, pnasnet_large_arg_scope with tf.variable_scope(self.model_name): with slim.arg_scope(pnasnet_large_arg_scope()): self.output, self.outputs = build_pnasnet_large( self.input_tensor, num_classes=self.num_classes, is_training=self.is_training) #collecting all variables related to this model #self.model_weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.model_name+'/model') self.model_weights = slim.get_model_variables(self.model_name)
def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. This function reconstructs the "second half" of the PNASNet network after the part defined in `_extract_proposal_features`. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ del scope # Number of used stem cells. num_stem_cells = 2 # Note that we always feed into 2 layers of equal depth # where the first N channels corresponds to previous hidden layer # and the second N channels correspond to the final hidden layer. hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3) # Note that what follows is largely a copy of build_pnasnet_large() within # pnasnet.py. We are copying to minimize code pollution in slim. # TODO(shlens,skornblith): Determine the appropriate drop path schedule. # For now the schedule is the default (1.0->0.7 over 250,000 train steps). hparams = pnasnet.large_imagenet_config() if not self._is_training: hparams.set_hparam('drop_path_keep_prob', 1.0) # Calculate the total number of cells in the network total_num_cells = hparams.num_cells + num_stem_cells normal_cell = pnasnet.PNasNetNormalCell(hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps) with arg_scope([slim.dropout, nasnet_utils.drop_path], is_training=self._is_training): with arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with arg_scope([ slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim ], data_format=hparams.data_format): # This corresponds to the cell number just past 'Cell_7' used by # _extract_proposal_features(). start_cell_num = 8 true_cell_num = start_cell_num + num_stem_cells with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): net = _build_pnasnet_base( hidden_previous, hidden, normal_cell=normal_cell, hparams=hparams, true_cell_num=true_cell_num, start_cell_num=start_cell_num) proposal_classifier_features = net return proposal_classifier_features
def main(): slim = tf.contrib.slim tf.reset_default_graph() image_size = pnasnet.build_pnasnet_large.default_image_size # 331 labels = imagenet.create_readable_names_for_imagenet_labels() sample_images = ['hy.jpg', 'mac.jpg', 'filename3.jpg', '72.jpg', 'ps.jpg'] input_imgs = tf.placeholder(tf.float32, [None, image_size, image_size, 3]) x1 = 2 * (input_imgs / 255.0) - 1.0 arg_scope = pnasnet.pnasnet_large_arg_scope() with slim.arg_scope(arg_scope): logit, end_points = pnasnet.build_pnasnet_large(x1, num_classes=1001, is_training=False) print(end_points) prob = end_points['Predictions'] y = tf.argmax(prob, axis=1) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, checkepoint_file) def preimg(img): ch = 3 print(img.mode) if img.mode == 'RGBA': ch = 4 imgnp = np.asarray(img.resize((image_size, image_size)), dtype=np.float32).reshape( image_size, image_size, ch) return imgnp[:, :, :3] batchImg = [ preimg(Image.open(imgfilename)) for imgfilename in sample_images ] orgImg = [Image.open(imgfilename) for imgfilename in sample_images] yv, img_norm = sess.run([y, x1], feed_dict={input_imgs: batchImg}) # print(yv, np.shape(yv)) def showresult(yy, img_norm, img_org): plt.figure() p1 = plt.subplot(121) p2 = plt.subplot(122) p1.imshow(img_org) p1.axis('off') p1.set_title("organization image") p2.imshow((img_norm * 255).astype(np.uint8)) p2.axis('off') p2.set_title("input image") plt.show() # print(yy) # print(labels[yy]) for yy, img1, img2 in zip(yv, batchImg, orgImg): showresult(yy, img1, img2)
def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. This function reconstructs the "second half" of the PNASNet network after the part defined in `_extract_proposal_features`. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ del scope # Number of used stem cells. num_stem_cells = 2 # Note that we always feed into 2 layers of equal depth # where the first N channels corresponds to previous hidden layer # and the second N channels correspond to the final hidden layer. hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3) # Note that what follows is largely a copy of build_pnasnet_large() within # pnasnet.py. We are copying to minimize code pollution in slim. # TODO(shlens,skornblith): Determine the appropriate drop path schedule. # For now the schedule is the default (1.0->0.7 over 250,000 train steps). hparams = pnasnet.large_imagenet_config() if not self._is_training: hparams.set_hparam('drop_path_keep_prob', 1.0) # Calculate the total number of cells in the network total_num_cells = hparams.num_cells + num_stem_cells normal_cell = pnasnet.PNasNetNormalCell( hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps) with arg_scope([slim.dropout, nasnet_utils.drop_path], is_training=self._is_training): with arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim], data_format=hparams.data_format): # This corresponds to the cell number just past 'Cell_7' used by # _extract_proposal_features(). start_cell_num = 8 true_cell_num = start_cell_num + num_stem_cells with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): net = _build_pnasnet_base( hidden_previous, hidden, normal_cell=normal_cell, hparams=hparams, true_cell_num=true_cell_num, start_cell_num=start_cell_num) proposal_classifier_features = net return proposal_classifier_features