def callback(self, image_msg):

        frame = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")

        image_data = cv2.imencode('.jpg', frame)[1].tostring()

        image_data2 = cv2.imencode('.jpg', frame)[1]

        image_data3 = cv2.imdecode(image_data2, cv2.IMREAD_COLOR)

        #		cv2.imshow("imgNet_tf", image_data3)

        #		cv2.waitKey(1)

        cv2.putText(
            image_data3,
            self.human_string + ": " + str(int(self.score * 10000)) + "%",
            (10, 30), 0, 1, (0, 8, 255), 2)

        msg = self.bridge.cv2_to_imgmsg(image_data3, "bgr8")

        self._pub1.publish(msg)

        rospy.sleep(0.9)

        softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')

        predictions = self._session.run(softmax_tensor,
                                        {'DecodeJpeg/contents:0': image_data})

        predictions = np.squeeze(predictions)

        # Creates node ID --> English string lookup.
        node_lookup = classify_image.NodeLookup()

        top_k = predictions.argsort()[-self.use_top_k:][::-1]

        for node_id in top_k:
            self.human_string = node_lookup.id_to_string(node_id)
            self.score = predictions[node_id]

            if self.score > self.score_threshold:

                rospy.logdebug('%s (score = %.5f)' %
                               (self.human_string, self.score))

                self._pub.publish(self.human_string)

                #topic_diag_main("ros_tf_node_imgNet", "", human_string, (float(score)) * 100)
                data = {}
                data['class'] = self.human_string
                data['probablity'] = (float(self.score)) * 100
                self.monitor.node_info(data, 'Result',
                                       'Detection result received.', 0)

                data3 = {}
                data3['Status'] = 1
                self.monitor.node_info(data3, 'Status',
                                       'Detection status received.', 0)
示例#2
0
	def callback(self, image_msg):
		cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
		image_data = cv2.imencode('.jpg', cv_image)[1].tostring()
		softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')
		predicitons = self._session.run( softmax_tensor, {'DecodeJpeg/contents:0': image_data})
		node_lookup = classify_image.NodeLookup()
		top_k = predictions.argsort()[-self.use_top_k:][::-1]
		for node_id in top_k:
			human_string = node_lookup.id_to_string(node_id)
			score = predictions[node_id]
			if score > self.score_threshold:
				rospy.loginfo['%s (score = %.5f)' % (human_string, score))
				self._pub.publish(human_string)
示例#3
0
 def callback(self, image_msg):
     cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
     # copy from
     # classify_image.py
     image_data = cv2.imencode('.jpg', cv_image)[1].tostring()
     # Creates graph from saved GraphDef.
     softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')
     predictions = self._session.run(softmax_tensor,
                                     {'DecodeJpeg/contents:0': image_data})
     predictions = np.squeeze(predictions)
     # Creates node ID --> English string lookup.
     node_lookup = classify_image.NodeLookup()
     top_k = predictions.argsort()[-self.use_top_k:][::-1]
     for node_id in top_k:
         human_string = node_lookup.id_to_string(node_id)
         score = predictions[node_id]
         #print(score, human_string)
         if score > self.score_threshold:
             rospy.loginfo('%s (score = %.5f)' % (human_string, score))
             self._pub.publish(human_string)
示例#4
0
    def callback(self, image_msg):
        # 将ROS的图像数据转换成OpenCV的图像格式        
        cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
        # copy from
        # classify_image.py
        image_data = cv2.imencode('.jpg', cv_image)[1].tostring()

        # 从保存的GraphDef中创建softmax张量,其中softmax:0部分包含1000个标签化预测的张量
        softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')

        # 运行softmax张量,其中DecodeJpeg/contents:0行是一个包含图像JPEG编码字符串的张量
        predictions = self._session.run(
            softmax_tensor, {'DecodeJpeg/contents:0': image_data})
        predictions = np.squeeze(predictions)

        # 寻找匹配对象的字符串及其概率
        node_lookup = classify_image.NodeLookup()
        top_k = predictions.argsort()[-self.use_top_k:][::-1]
        for node_id in top_k:
            human_string = node_lookup.id_to_string(node_id)
            score = predictions[node_id]
            if score > self.score_threshold:                
                rospy.loginfo('%s (score = %.5f)' % (human_string, score))                
                self._pub.publish(human_string)
示例#5
0
    def run(self):
        sqs = session.resource('sqs', 'us-west-1')
        s3 = session.resource('s3')

        request_queue = sqs.get_queue_by_name(QueueName=SQS_REQUEST_QUEUE_NAME)
        response_queue = sqs.get_queue_by_name(
            QueueName=SQS_RESPONSE_QUEUE_NAME)
        terminate_queue = sqs.get_queue_by_name(
            QueueName=SQS_TERMINATE_QUEUE_NAME)
        bucket = s3.Bucket(S3_BUCKET_NAME)

        # Creates node ID --> English string lookup.
        node_lookup = classify_image.NodeLookup()

        request_count = 0
        with tf.Session() as sess:
            softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')

            check = False

            while True:

                if check:
                    check = False
                    request_count = 0
                    while True:
                        for message in terminate_queue.receive_messages(
                                MaxNumberOfMessages=1,
                                AttributeNames=['SentTimestamp']):
                            message.delete()
                            message_ts = int(
                                message.attributes['SentTimestamp'])
                            current_ts = int(time.time() * 1000)
                            print current_ts, message_ts
                            if (current_ts - message_ts
                                ) <= TERMINATE_REQUEST_EXPIRES_IN:
                                # Valid termination request
                                return
                            else:
                                # Expired request
                                break
                        else:
                            break

                processed = False
                for message in request_queue.receive_messages(
                        WaitTimeSeconds=20, MaxNumberOfMessages=1):
                    request = json.loads(message.body)
                    message.delete()

                    image_url = request['imageUrl']
                    image_data = self.download_image(image_url)
                    predictions = sess.run(
                        softmax_tensor, {'DecodeJpeg/contents:0': image_data})
                    predictions = np.squeeze(predictions)
                    node_id = predictions.argsort()[-1]
                    human_string = node_lookup.id_to_string(node_id)

                    print image_url, human_string

                    request['result'] = human_string
                    response_queue.send_message(
                        MessageBody=json.dumps(request))

                    if S3_UPLOAD_ENABLED:
                        image_name = os.path.basename(image_url)
                        while True:
                            try:
                                bucket.put_object(Key=image_name,
                                                  Body=human_string)
                                break
                            except Exception:
                                bucket = s3.Bucket(S3_BUCKET_NAME)
                                time.sleep(2)

                    processed = True
                    request_count += 1

                if not processed:
                    check = True
                elif request_count and (request_count %
                                        TERMINATE_CHECK_INTERVAL == 0):
                    check = True
示例#6
0
from flask import Flask, render_template, request
import classify_image
import tensorflow as tf

# setup flags
classify_image.FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('model_dir', '/tmp/imagenet',
                           """Path to classify_image_graph_def.pb""")

# prepare to using trained model
classify_image.maybe_download_and_extract()
classify_image.create_graph()
node_lookup = classify_image.NodeLookup()
sess = tf.Session()
softmax_tensor = tf.squeeze(sess.graph.get_tensor_by_name('softmax:0'))

app = Flask(__name__)


@app.route('/recognize', methods=['POST'])
def recognize():
    f = request.files['image']
    predictions = sess.run(softmax_tensor,
                           feed_dict={'DecodeJpeg/contents:0': f.read()})
    results = []
    top_k = predictions.argsort()[-5:][::-1]
    for node_id in top_k:
        human_string = node_lookup.id_to_string(node_id)
        score = predictions[node_id]
        results.append({'label': human_string, 'score': score})
    return render_template('result.html', results=results)