コード例 #1
0
ファイル: gunshot.py プロジェクト: fishblowbubbles/devastator
 def listen(self, host=respeaker.HOST, port=respeaker.PORT):
     while True:
         samples = get_data(host, port)
         is_gunshot = self.detect(samples[:, 0])
         if is_gunshot:
             direction = respeaker.api.direction
             print("Gunshot(s)! Direction: {}".format(direction))
コード例 #2
0
 def listen(self, rate=respeaker.RATE,
            host=respeaker.HOST, port=respeaker.PORT):
     while True:
         samples = get_data(host, port)
         emotion, confidence = self.detect(samples[:, 0], rate)
         if emotion:
             direction = respeaker.api.direction
             print("Emotion: {:10}\tConfidence: {:5.2}\tDirection: {:5}"
                   .format(emotion, confidence, direction))
コード例 #3
0
def livestream(detect, fps=realsense.FPS, **kwargs):
    delay = int(100 / fps)
    while True:
        rgbd = get_data(realsense.HOST, realsense.PORT)
        rgb, depth = split_rgbd(rgbd)
        rgb, _ = detect(rgb, depth, **kwargs)
        cv2.imshow("livestream", rgb)
        if cv2.waitKey(delay) == ord("q"):
            break
    cv2.destroyAllWindows()
コード例 #4
0
    def detect():
        samples = get_data(respeaker.HOST, respeaker.PORT)
        direction = respeaker.api.direction
        if direction > 180:
            direction -= 360

        emotion, confidence = sentiment.detect(samples[:, 0])
        is_gunshot = gunshot.detect(samples[:, 0])
        
        print("Emotion: {}, Confidence: {:5.2}".format(emotion, float(confidence)))
        print("Gunshot?: {}".format(is_gunshot))
        print("Direction: {}".format(direction))

        frames = d435i._get_frames()
        frames = d435i._frames_to_rgbd(frames)

        rgb, depth = split_rgbd(frames)
        rgb, markers = tracker.detect(rgb, depth)
        rgb, detections = yolov3.detect(rgb, depth)
        
        marker = {}
        for m in markers:
            if m["id"] == route[index]:
                marker = m

        for detection in detections:
            h_angle = detection["h_angle"]
            if direction < h_angle + 20 and direction > h_angle - 20:
                detection["emotion"] = emotion
                for e in detection["equip"]:
                    if e["label"] == "Rifle" or e["label"] == "Handgun":
                        e["gunshot"] = is_gunshot
                        e["direction"] = direction
            else:
                detection["emotion"] = '-'

        print("Marker: {}".format(marker))
        print("Detections: {}".format(detections))

        if marker:
            marker["objectsDetected"] = detections
            connect_and_send(marker, host="192.168.1.136", port=8998)
        
        if detections:
            connect_and_send(detections, host="192.168.1.136", port=8888)

        return rgb
コード例 #5
0
import argparse
import sys

sys.path.append("./devastator")

from robot import respeaker
from robot.helpers import get_data
from sound.sentiment import Sentiment

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--listen", action="store_true")
    args = parser.parse_args()

    sentiment = Sentiment()
    if args.listen:
        sentiment.listen()
    else:
        samples = get_data(respeaker.HOST, respeaker.PORT)
        emotion, confidence = sentiment.detect(samples[:, 0])
        print("Emotion: {:10}\tConfidence: {:5.2}".format(emotion, confidence))
コード例 #6
0
import argparse
import sys

sys.path.append("./devastator")

import cv2

import robot.realsense as realsense
from robot.helpers import get_data
from vision.helpers import split_rgbd
from vision.tracker import Tracker
from vision.yolo import YoloV3

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--fps", type=int, default=realsense.FPS)
    args = parser.parse_args()

    yolov3, tracker = YoloV3(), Tracker()
    delay = int(100 / args.fps)

    while True:
        frames = get_data(realsense.HOST, realsense.PORT)
        rgb, depth = split_rgbd(frames)
        rgb, detections = yolov3.detect(rgb, depth)
        rgb, markers = tracker.detect(rgb, depth)

        cv2.imshow("vision", rgb)
        if cv2.waitKey(delay) == ord("q"):
            break