Exemple #1
0
def test_base_fcn(net_name, img_fnames, weight_fname):
    "Quick effort to test the base fcns and figure out what preprocessing is appropriate--isn't well documented."
    inputs = tf.placeholder(tf.float32, [None, None, None, 3])
    inputs = _vgg_preprocess(inputs)

    if net_name == 'resnet_v1_152':
        with slim.arg_scope(resnet_v1.resnet_arg_scope()) as scope:
            base_net, _ = resnet_v1.resnet_v1_152(inputs,
                                                  is_training=False,
                                                  num_classes=1000)
    else:
        raise Exception('net_name not recognized.')

    pred = tf.argmax(base_net, -1)
    saver = tf.train.Saver()
    sess = tf.InteractiveSession()
    #tf.global_variables_initializer().run()
    saver.restore(sess, weight_fname)
    for img_fname in img_fnames:
        img = cv2.imread(img_fname)
        prediction = sess.run(pred,
                              feed_dict={inputs: img[np.newaxis, :, :, :]})
        print('pred for %s:' % img_fname)
        print(imagenet_names[int(prediction)])
        print('=========')
Exemple #2
0
def _load_tf_model(checkpoint_file):
    # Placeholder
    input_tensor = tf.placeholder(tf.float32,
                                  shape=(None, 224, 224, 3),
                                  name='input_image')

    # Load the model
    sess = tf.Session()
    arg_scope = resnet_v1.resnet_arg_scope()
    with tf.contrib.slim.arg_scope(arg_scope):
        logits, _ = resnet_v1.resnet_v1_152(input_tensor,
                                            num_classes=1000,
                                            is_training=False,
                                            reuse=tf.AUTO_REUSE)
    probabilities = tf.nn.softmax(logits)

    saver = tf.train.Saver()
    saver.restore(sess, checkpoint_file)

    def predict_for(image):
        pred, pred_proba = sess.run([logits, probabilities],
                                    feed_dict={input_tensor: image})
        return pred_proba

    return predict_for
Exemple #3
0
def _load_tf_model(checkpoint_file):
    # Placeholder
    input_tensor = tf.compat.v1.placeholder(tf.float32,
                                            shape=(None, 224, 224, 3),
                                            name='input_image')

    # Make the Tensorflow warnings go away

    # Load the model
    sess = tf.compat.v1.Session()
    arg_scope = resnet_v1.resnet_arg_scope()
    with tf.contrib.slim.arg_scope(arg_scope):
        logits, _ = resnet_v1.resnet_v1_152(input_tensor,
                                            num_classes=1000,
                                            is_training=False,
                                            reuse=tf.AUTO_REUSE)
    probabilities = tf.nn.softmax(logits)

    saver = tf.train.Saver()
    try:
        saver.restore(sess, checkpoint_file)
    except ValueError:
        raise mlhub.utils.DataResourceNotFoundException(checkpoint_file)

    return sess, logits, probabilities, input_tensor
Exemple #4
0
 def _build_graph(self, inputs):
     orig_image = inputs[0]
     mean = tf.get_variable('resnet_v1_'+str(args.depth)+'/mean_rgb', shape=[3])
     with tp.symbolic_functions.guided_relu():
         with slim.arg_scope(resnet_v1.resnet_arg_scope(is_training=False)):
             image = tf.expand_dims(orig_image - mean, 0)
             if args.depth == 50:
               logits, _ = resnet_v1.resnet_v1_50(image, 1000)
             elif args.depth == 101:
               logits, _ = resnet_v1.resnet_v1_101(image, 1000)
             else:
               logits, _ = resnet_v1.resnet_v1_152(image, 1000)
         tp.symbolic_functions.saliency_map(logits, orig_image, name="saliency")
Exemple #5
0
def _build_base_net(inputs, net_opts, is_training, reuse=None):
    "return a net from slim research nets. is_training must be an actual boolean, not a tensor, which is why we're abstracting this out."
    if net_opts['base_net'] == 'resnet_v1_152':
        with slim.arg_scope(resnet_v1.resnet_arg_scope()) as scope:
            inputs = _vgg_preprocess(inputs)
            base_net, _ = resnet_v1.resnet_v1_152(
                inputs,
                is_training=False
                if net_opts['is_batchnorm_fixed'] else is_training,
                global_pool=False,
                reuse=reuse)
            if DEBUG:
                print('resnet_out:')
                print(base_net.shape.as_list())

    elif net_opts['base_net'] == 'resnet_v2_152':
        with slim.arg_scope(resnet_v2.resnet_arg_scope()) as scope:
            inputs = _vgg_preprocess(inputs)
            base_net, _ = resnet_v2.resnet_v2_152(
                inputs,
                is_training=False
                if net_opts['is_batchnorm_fixed'] else is_training,
                global_pool=False,
                reuse=reuse)
            if DEBUG:
                print('resnet_out:')
                print(base_net.shape.as_list())
    elif net_opts['base_net'] == 'inception_v3':
        #WARNING: is_train for inception controls not just batch norm but dropout. So it's a little awkward. We may need more functionality here later TODO
        '''
		TODO add inception preprocessing: https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/preprocessing_factory.py
		
		#WARNING untested. Not sure I fully understand slim scopes yet
		base_scope = 'InceptionV3'
		with slim.arg_scope(inception.inception_v3_arg_scope()) as scope:
			with slim.variable_scope(scope, base_scope, [inputs, None], reuse=False) as scope:
				with slim.arg_scope([layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
					base_net, _ = inception_v3_base(inputs,scope=scope)
		'''

    elif net_opts['base_net'] == 'nothing':
        # a nothing for debugging purposes
        base_net = inputs
    else:
        raise Exception("basenet name not recognized")

    return base_net
Exemple #6
0
def _get_endpoints(model_name, img_tensor):
    if model_name == "res50":
        with slim.arg_scope(resnet_v1.resnet_arg_scope()):
            _, end_points = resnet_v1.resnet_v1_50(img_tensor,
                                                   1000,
                                                   is_training=False)
        return end_points["predictions"]

    elif model_name == "res152":
        with slim.arg_scope(resnet_v1.resnet_arg_scope()):
            _, end_points = resnet_v1.resnet_v1_152(img_tensor,
                                                    1000,
                                                    is_training=False)
        return end_points["predictions"]

    elif model_name.startswith("mobilenet"):
        with tf.contrib.slim.arg_scope(
                mobilenet_v2.training_scope(is_training=False)):
            _, endpoints = mobilenet_v2.mobilenet(img_tensor)
        return endpoints["Predictions"]
def main(_):
	reader = init_reader(FLAGS.data_dir)
	batch_xs, batch_ys = reader.make_one_shot_iterator().get_next()
	# param batch_xs: shape [-1, 512, 512, 3] type tf.float32
	# param batch_ys: shape [-1] type tf.int32
	off_ws = [0, 0, 0, 0, 256, 256, 256, 256]
	off_hs = [0, 128, 256, 384, 0, 128, 256, 384]
	x_img_cuts = [tf.image.crop_to_bounding_box(batch_xs, hs, ws, 128, 256)\
		              	for hs, ws in zip(off_hs, off_ws)]
	batch_xs = tf.reshape(tf.concat(x_img_cuts, axis=0), [FLAGS.batch_size*8, 128, 256, 3])
	batch_ys = tf.reshape(batch_ys, [FLAGS.batch_size * 8])

	with slim.arg_scope(resnet_v1.resnet_arg_scope()):
		probs, end_points = resnet_v1.resnet_v1_152(batch_xs, num_classes=6, is_training=False)
		probs = tf.reshape(probs, [-1, 6])
		prediction = tf.argmax(probs, axis=-1)
		loss, binary = init_loss(probs, batch_ys, end_points, losstype=FLAGS.losstype)
	
	config = tf.ConfigProto()
	config.gpu_options.allow_growth = True
	sess = tf.InteractiveSession(config=config)
	counter = get_counter(FLAGS.model_dir)

	learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, counter, 
	                                           100, 0.98, staircase=True)
	if 'SGD' in FLAGS.optimizer:
		optim = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,
		                                                                  global_step=tf.Variable(counter))
	elif 'Adam' in FLAGS.optimizer:
		optim = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=tf.Variable(counter))
	else:
		raise NotImplementedError
	
	# Initialize all variables
	# Load the pretrained model and initialize the global counter
	sess.run(tf.global_variables_initializer())
	counter = load(sess, FLAGS.model_basedir)

	confusion_matrix = Confusion(sess, prediction, binary, batch_ys)
	print(confusion_matrix)
def main(_):
	reader = init_reader(FLAGS.data_dir, batch_size=FLAGS.batch_size)
	batch_xs, batch_ys = reader.make_one_shot_iterator().get_next()
	# param batch_xs: shape [-1, 512, 512, 3] type tf.float32
	# param batch_ys: shape [-1] type tf.int32
	off_ws = [0, 0, 0, 0, 256, 256, 256, 256]
	off_hs = [0, 128, 256, 384, 0, 128, 256, 384]
	x_img_cuts = [tf.image.crop_to_bounding_box(batch_xs, hs, ws, 128, 256)\
	              	for hs, ws in zip(off_hs, off_ws)]
	batch_xs = tf.reshape(tf.concat(x_img_cuts, axis=0), [FLAGS.batch_size*8, 128, 256, 3])
	batch_ys = tf.reshape(batch_ys, [FLAGS.batch_size * 8])

	if FLAGS.is_training:
		with slim.arg_scope(resnet_v1.resnet_arg_scope()):
			logits, end_points = resnet_v1.resnet_v1_152(batch_xs, num_classes=6, 
																									 is_training=True)
			logits = tf.reshape(logits, [-1, 6], name='logits_2d')
			prediction = tf.argmax(logits, axis=-1, output_type=tf.int32)
			mAP = tf.reduce_mean(tf.cast(tf.equal(prediction, batch_ys), 
			                             dtype=tf.float32))
			loss, _ = init_loss(logits, batch_ys, end_points=end_points, losstype=FLAGS.losstype)
			mAP_sum = tf.summary.scalar('mAP', mAP)
			loss_sum = tf.summary.scalar('loss', loss)
			summaries = tf.summary.merge([mAP_sum, loss_sum])
			
		config = tf.ConfigProto()
		config.gpu_options.allow_growth = True
		sess = tf.InteractiveSession(config=config)
		counter = get_counter(FLAGS.model_dir)

		# Exponential decay learning rate and optimizer configurations
		learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, counter, 
		                                           100, 0.98, staircase=True)
		if 'SGD' in FLAGS.optimizer:
			optim = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, 
			                                                                  global_step=tf.Variable(counter))
		elif 'Adam' in FLAGS.optimizer:
			optim = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=tf.Variable(counter))
		else:
			optim = None
			raise NotImplementedError
		sess.run(tf.global_variables_initializer())

		if FLAGS.pretrained:
			# Load the pretrained model given by TensorFlow official
			exclusions = ['resnet_v1_152/logits', 'predictions']
			resnet_except_logits = slim.get_variables_to_restore(exclude=exclusions)
			init_fn = slim.assign_from_checkpoint_fn(FLAGS.pretrain_dir, resnet_except_logits,
			                                         ignore_missing_vars=True)
			init_fn(sess)
			print('Model successfully loaded')
		else:
			# Load the model trained by ourselves
			counter = load(sess, FLAGS.model_basedir)

		# Ready to train
		train(sess, optim, loss, summaries, FLAGS.loops, counter=counter)
		print('Training finished')
	else:
		with slim.arg_scope(resnet_v1.resnet_arg_scope()):
			probs, end_points = resnet_v1.resnet_v1_152(batch_xs, num_classes=6,
																									is_training=False)
			prediction = tf.argmax(tf.reshape(probs, [-1, 6]), axis=-1)
			accuracy, update_acc = tf.metrics.accuracy(batch_ys, arg_ys)
			_, binary = init_loss(probs, batch_ys, end_points, loss_type=FLAGS.losstype)

		# TODO: Consider the binary classification... 
		# which is even more tricky to be implemented...
		print('The model accuracy is {}'.format(evaluate(sess, accuracy, update_acc)))