Esempio n. 1
0
 def __init__(self):
     self.logger = Logger()
     self.logger.write("Robot: initializing")
     self.logger.display("Starting...")
     ports = usb_probe.probe()
     self.logger.write("Robot: found USB ports...")
     for port in ports:
         self.logger.write("       %s, %s" % (ports[port], port))
     self.speedometer = Speedometer(ports['speedometer'], 9600, self.logger)
     self.compasswitch = Compasswitch(ports['compasswitch'], 9600,
                                      self.logger)
     self.powersteering = PowerSteering(ports['chias'], 9600, self.logger,
                                        self.speedometer, self.compasswitch)
     self.gps = GPS(ports['gps'], 4800, self.logger)
     self.camera = Camera(9788, self.logger)
Esempio n. 2
0
    def __init__(self):
        """ Constructs the World """
        self.sky = SkyDome("Resources/Models/skydome.egg")
        self.sky.sky.setScale(Vec3(10, 10, 10))
        self.setupPhysX()
        self.setup3DAudio()

        self.car = Car(self.physxScene, self.audio3d, "defender.xml")
        self.car.setActiveAudioProfile('outside')

        self.setupLight()
        self.initTrack()
        taskMgr.add(self.simulate, 'PhysX Simulation')

        # To to use Steering Wheel change inputHandler to SteeringControl
        self.inputHandler = KeyControl(self.car)
        #self.inputHandler = SteeringControl( self.car )

        self.cameraControl = CameraControl(self.car)
        self.cameraControl.enableTowerCamera()
        self.speedometer = Speedometer()
        render.setShaderAuto()
        self.accept('escape', sys.exit)
def main():
    speedometer = Speedometer()
    client = Client()
    client.manage_client()
    client.send_requests(speedometer)
Esempio n. 4
0
  def __init__(self):
    self.cmdPub = rospy.Publisher('/clearpath/robots/default/cmd_vel', Twist)
    self.estPub = rospy.Publisher('state_est', StateEstimate)
    self.spd = Speedometer(self._velocityCb)
    rospy.Subscriber('indoor_pos', ips_msg, self._poseCb)

    self.running = False
    self.ts = 0.05 # Sample time [s]
    self.length = 0.265 # Robot length [m]

    # Velocity control state:

    # Proportional control constants
    self.kp = 143.9
    self.ki = 857.8 
    self.integrator = 0.0
    self.lastErr = 0.0  # (needed for trapezoidal integrator)

    # Reference and control signals:
    self.velRef = None  # [m/s]
    self.velCtrl = 0.0  # [-100,100]

    # Record values for analysis 
    self.stateRecord = []
    self.msmtRecord = []
    self.outputRecord = []

    # Proportional control constant for steering angle.
    self.kSteer = 0.5

    # Next waypoint
    self.xRef = None
    self.yRef = None
    self.hRef = None

    # Steering reference and control signals: 
    self.steerRef = None # [-0.4,0.4] [rad]
    self.steerCtrl = None # [-100,100]
    self.setSteeringAngle(0.0) # initialize sensibly

    # EKF State:
    # Latest measurements.  Set to None when consumed for EKF estimate. 
    self.lastPose = None
    self.lastVel = None

    # Process covariance:
    self.Q = np.matrix([[0.0020, 0, 0, 0],  
                       [0, 0.15, 0, 0],
                       [0, 0, 0.07, 0],
                       [0, 0, 0, 0.07]])
    # Measurement covariance:
    self.R = np.matrix([[0.0020, 0, 0, 0],
                       [0, 0.0001, 0, 0],
                       [0, 0, 0.0001, 0],
                       [0, 0, 0, 0.0001]])

    # Zero initial state
    self.stateEst = np.matrix(np.zeros((4,1)))
    self.velOutDelay = [0, 0, 0, 0]
    # ... with low confidence
    self.P = np.matrix(np.ones((4,4)))

    # Velocity model params:
    Km = 0.003267973309
    am = 6.779750386209
    self.velNum = (Km*am*self.ts)/(am*self.ts + 2)
    self.velDen = (am*self.ts-2)/(am*self.ts+2)
Esempio n. 5
0
def detect_on_video(video_path,
                    out_path,
                    detector,
                    tracker,
                    coef_file='coef.json',
                    mask_file='mask.png',
                    to_mp4=False,
                    class_names=None):
    if class_names is None:
        class_names = [
            'car', 'minibus', 'trolleybus', 'tram', 'truck', 'bus',
            'middle_bus', 'ambulance', 'fire_truck', 'middle_truck', 'tractor',
            'uncategorized', 'van', 'person'
        ]

    # Создаём считыватель исходного видео
    istream = cv2.VideoCapture(video_path)

    # Получаем данные о исходном видео
    w = int(istream.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(istream.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(istream.get(cv2.CAP_PROP_FPS))
    frame_count = int(istream.get(cv2.CAP_PROP_FRAME_COUNT))

    # Создаём писателя для результирующего видео
    if to_mp4:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
    else:
        fourcc = cv2.VideoWriter_fourcc(*'VP80')
    writer = cv2.VideoWriter(out_path, fourcc, fps, (w, h), True)

    # Создаём экземпляр класса Masker для выделения проезжей части на каждом кадре
    masker = Masker(mask_file, (w, h))

    # Создаём трекер для отслеживания автомобилей на разных кадрах
    tracker = Sort()
    speedometer = Speedometer(coef_file, fps=fps, size=(w, h))

    # Обрабатываем видео покадрово
    for frame_id in tqdm(range(frame_count)):
        # Считываем кадр, создаём кадр для видео с результатом
        ret, frame = istream.read()
        if not ret:
            break
        else:
            out_frame = frame

        # Выделяем проезжую часть
        masked_frame = masker.apply(frame)

        # Распознаём объекты на кадре
        outputs = detector(masked_frame)

        # Получаем bbox (рамки) и вероятность принадлежности классу для каждого найденного объекта
        boxes = outputs['instances'].pred_boxes.tensor.to("cpu").numpy()
        scores = outputs['instances'].scores.to("cpu").numpy()
        classes = outputs['instances'].pred_classes.to("cpu").numpy()

        # Обновляем трекер, получаем track_id (id объекта на предыдущих кадрах) для каждого найденного объекта
        for_tracker = np.concatenate([boxes, scores[:, None]], axis=1)
        dets, associaties = tracker.update(for_tracker, make_associaties=True)

        speeds = spmeter.update(dets)

        for i in range(scores.shape[0]):
            box = boxes[i].astype(np.int16)
            track_id = associaties[i]

            if track_id == 0: continue

            speed = spmeter.ms_to_kmh(speeds[track_id])
            class_id = classes[i]
            class_label = class_names[class_id]

            draw_box(out_frame, box)

            draw_label(out_frame, track_id, box[:2], size=2, shadow=True)
            draw_label(out_frame,
                       round(speed, 2), (box[0], box[1] + 10),
                       color=(255, 100, 100),
                       shadow=True)
            draw_label(out_frame,
                       class_label, (box[0], box[1] + 25),
                       color=(100, 255, 100),
                       shadow=True)

        # Добавляем кадр с разметкой к результирующему видео
        writer.write(out_frame)

    # Сохраняем видео с результатом
    writer.release()
    return True