コード例 #1
0
def show_webcam(address):
    cam = webcam.WebcamStream(address)
    cam.start_stream_threads()

    ssd = SSD()

    global i2name
    i2name = pickle.load(open("i2name.p", "rb"))

    cv2.namedWindow("outputs", cv2.WINDOW_NORMAL)

    boxes_ = None
    confidences_ = None

    while True:
        sample = cam.image
        resized_img = skimage.transform.resize(sample,
                                               (image_size, image_size))

        pred_labels_f, pred_locs_f = ssd.sess.run(
            [ssd.pred_labels, ssd.pred_locs],
            feed_dict={
                ssd.imgs_ph: [resized_img],
                ssd.bn: False
            })

        boxes_, confidences_ = matcher.format_output(pred_labels_f[0],
                                                     pred_locs_f[0], boxes_,
                                                     confidences_)

        resize_boxes(resized_img, sample, boxes_)
        draw_outputs(np.asarray(sample) / 255.0, boxes_, confidences_, wait=10)
コード例 #2
0
	def single_image(self, sample, min_conf=0.01, nms=0.45):
		resized_img = cv2.resize(sample, (image_size, image_size))
		pred_labels_f, pred_locs_f, step = self.sess.run([self.pred_labels, self.pred_locs, self.global_step], feed_dict={self.imgs_ph: [resized_img], self.bn: False})
		boxes_, confidences_ = matcher.format_output(pred_labels_f[0], pred_locs_f[0])
		resize_boxes(resized_img, sample, boxes_, scale=float(image_size))

		return postprocess_boxes(boxes_, confidences_, min_conf, nms)
コード例 #3
0
def evaluate_images():
	tb = TB()

	cv2.namedWindow("outputs", cv2.WINDOW_NORMAL)
	test_loader = sLoader.SVT('./svt1/train.xml', './svt1/test.xml')

	while True:
		imgs, anns = test_loader.nextBatch(3,'test')
		pred_labels_f, pred_locs_f, step = tb.sess.run([tb.pred_labels, tb.pred_locs, tb.global_step],
														feed_dict={tb.imgs_ph: imgs, tb.bn: False})
		boxes_, confidences_ = matcher.format_output(pred_labels_f[0], pred_locs_f[0])
		draw_outputs(imgs[0], boxes_, confidences_, wait=0)
コード例 #4
0
		def match_boxes(batch_i):
			#a = time.time()
			matches = box_matcher.match_boxes(pred_labels_f[batch_i], anns[batch_i])
			#print("a: %f" % (time.time() - a))
			#a = time.time()
			positives_f, negatives_f, true_labels_f, true_locs_f = prepare_feed(matches)

			batch_values[batch_i] = (positives_f, negatives_f, true_labels_f, true_locs_f)

			if batch_i == 0:
				boxes_, confidences_ = matcher.format_output(pred_labels_f[batch_i], pred_locs_f[batch_i])
				if FLAGS.display:
					draw_outputs(imgs[batch_i], boxes_, confidences_)
					draw_matches(imgs[batch_i], c.defaults, matches, anns[batch_i])
					draw_matches2(imgs[batch_i], positives_f, negatives_f, true_labels_f, true_locs_f)
コード例 #5
0
def evaluate_images():
    ssd = SSD()

    cv2.namedWindow("outputs", cv2.WINDOW_NORMAL)
    loader = coco.Loader(False)
    test_batches = loader.create_batches(1, shuffle=True)
    global i2name
    i2name = loader.i2name

    while True:
        batch = test_batches.next()
        imgs, anns = loader.preprocess_batch(batch)
        pred_labels_f, pred_locs_f, step = ssd.sess.run(
            [ssd.pred_labels, ssd.pred_locs, ssd.global_step],
            feed_dict={
                ssd.imgs_ph: imgs,
                ssd.bn: False
            })
        boxes_, confidences_ = matcher.format_output(pred_labels_f[0],
                                                     pred_locs_f[0])
        draw_outputs(imgs[0], boxes_, confidences_, wait=0)