def model_fn(features, labels, mode, params, config): """ Model function for estimator :param features: :param labels: :param mode: :param params: :param config: :return: """ image = features['image'] # Init network. ssdnet = ssd_resnet_50.init(params['class_num'], params['weight_decay'], params['is_training']) # Compute output. logits, locations, endpoints = ssdnet(image) if mode == tf.estimator.ModeKeys.TRAIN: # Compute SSD loss and put it to global loss. ssd_resnet_50.ssdLoss(logits, locations, labels, params['alpha']) total_loss = tf.losses.get_total_loss() # Create train op optimazer = tf.train.GradientDescentOptimizer( learning_rate=params['learning_rate']) train_op = optimazer.minimize( total_loss, global_step=tf.train.get_or_create_global_step()) return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op) if mode == tf.estimator.ModeKeys.EVAL: plogits = tf.unstack(logits, axis=0) probs = tf.nn.softmax(plogits, axis=1) pbboxes = tf.unstack(locations, axis=0) # Remove all background bboxes pbboxes, probs = evalUtil.rmBackgroundBox(pbboxes, probs) #TODO Apply non maximum suppression. pbboxes_list = multiclass_non_max_suppression( pbboxes, probs, ) eval_metrics = {} eval_metrics.update( evalUtil.get_evaluate_ops(probs, pbboxes_list, labels, categories=labels['category'])) return eval_metrics if mode == tf.estimator.ModeKeys.PREDICT: return logits, locations
def model_fn(features, labels, mode, params, config): """ Model function for estimator :param features: :param labels: :param mode: :param params: :param config: :return: """ image = features['image'] # Init network. ssdnet = ssd_resnet_50.init(params['class_num'], params['weight_decay'], params['is_training']) # Compute output. logits, locations, endpoints = ssdnet(image) # Compute SSD loss and put it to global loss. ssd_resnet_50.ssdLoss(logits, locations, labels, params['alpha']) total_loss = tf.losses.get_total_loss() if mode == tf.estimator.ModeKeys.TRAIN: # Create train op optimazer = tf.train.GradientDescentOptimizer( learning_rate=params['learning_rate']) train_op = optimazer.minimize( total_loss, global_step=tf.train.get_or_create_global_step()) return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op) if mode == tf.estimator.ModeKeys.EVAL: pass # TODO if mode == tf.estimator.ModeKeys.PREDICT: prob_pred = tf.nn.softmax(logits, axis=4) predictions = {'prob': prob_pred, 'location': locations} return tf.estimator.EstimatorSpec(mode, predictions=predictions)
def test_ssdLoss(): # Test passed. logits = tf.constant([[[0, 0, 0, 1, 0], [0, 0, 0, 1, 0]]], dtype=tf.float32) loc = tf.constant([[[0.5, 0.5, 0.2, 0.1], [0.5, 0.5, 0.1, 0.2]]], dtype=tf.float32) labels = { 'bbox_num': tf.constant([1], dtype=tf.float32), 'labels': tf.constant([[3]]), 'bboxes': tf.constant([[[0.5, 0.5, 0.2, 0.2]]]) } alpha = 1 batch_size = 1 loss = ssd_resnet_50.ssdLoss(logits, loc, labels, alpha, batch_size) with tf.Session() as ss: loss = ss.run([loss]) print(loss) return